kreuzberg 4.0.0.pre.rc.13 → 4.0.0.pre.rc.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +14 -14
- data/.rspec +3 -3
- data/.rubocop.yaml +1 -1
- data/.rubocop.yml +538 -538
- data/Gemfile +8 -8
- data/Gemfile.lock +105 -2
- data/README.md +454 -454
- data/Rakefile +33 -25
- data/Steepfile +47 -47
- data/examples/async_patterns.rb +341 -341
- data/ext/kreuzberg_rb/extconf.rb +45 -45
- data/ext/kreuzberg_rb/native/.cargo/config.toml +2 -2
- data/ext/kreuzberg_rb/native/Cargo.lock +6940 -6941
- data/ext/kreuzberg_rb/native/Cargo.toml +54 -54
- data/ext/kreuzberg_rb/native/README.md +425 -425
- data/ext/kreuzberg_rb/native/build.rs +15 -15
- data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -11
- data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -14
- data/ext/kreuzberg_rb/native/include/strings.h +20 -20
- data/ext/kreuzberg_rb/native/include/unistd.h +47 -47
- data/ext/kreuzberg_rb/native/src/lib.rs +3158 -3158
- data/extconf.rb +28 -28
- data/kreuzberg.gemspec +214 -214
- data/lib/kreuzberg/api_proxy.rb +142 -142
- data/lib/kreuzberg/cache_api.rb +81 -81
- data/lib/kreuzberg/cli.rb +55 -55
- data/lib/kreuzberg/cli_proxy.rb +127 -127
- data/lib/kreuzberg/config.rb +724 -724
- data/lib/kreuzberg/error_context.rb +80 -80
- data/lib/kreuzberg/errors.rb +118 -118
- data/lib/kreuzberg/extraction_api.rb +340 -340
- data/lib/kreuzberg/mcp_proxy.rb +186 -186
- data/lib/kreuzberg/ocr_backend_protocol.rb +113 -113
- data/lib/kreuzberg/post_processor_protocol.rb +86 -86
- data/lib/kreuzberg/result.rb +279 -279
- data/lib/kreuzberg/setup_lib_path.rb +80 -80
- data/lib/kreuzberg/validator_protocol.rb +89 -89
- data/lib/kreuzberg/version.rb +5 -5
- data/lib/kreuzberg.rb +109 -109
- data/lib/{pdfium.dll → libpdfium.dylib} +0 -0
- data/sig/kreuzberg/internal.rbs +184 -184
- data/sig/kreuzberg.rbs +546 -546
- data/spec/binding/cache_spec.rb +227 -227
- data/spec/binding/cli_proxy_spec.rb +85 -85
- data/spec/binding/cli_spec.rb +55 -55
- data/spec/binding/config_spec.rb +345 -345
- data/spec/binding/config_validation_spec.rb +283 -283
- data/spec/binding/error_handling_spec.rb +213 -213
- data/spec/binding/errors_spec.rb +66 -66
- data/spec/binding/plugins/ocr_backend_spec.rb +307 -307
- data/spec/binding/plugins/postprocessor_spec.rb +269 -269
- data/spec/binding/plugins/validator_spec.rb +274 -274
- data/spec/fixtures/config.toml +39 -39
- data/spec/fixtures/config.yaml +41 -41
- data/spec/fixtures/invalid_config.toml +4 -4
- data/spec/smoke/package_spec.rb +178 -178
- data/spec/spec_helper.rb +42 -42
- data/vendor/Cargo.toml +1 -1
- data/vendor/kreuzberg/Cargo.toml +5 -5
- data/vendor/kreuzberg/README.md +230 -230
- data/vendor/kreuzberg/benches/otel_overhead.rs +48 -48
- data/vendor/kreuzberg/build.rs +843 -843
- data/vendor/kreuzberg/src/api/error.rs +81 -81
- data/vendor/kreuzberg/src/api/handlers.rs +199 -199
- data/vendor/kreuzberg/src/api/mod.rs +79 -79
- data/vendor/kreuzberg/src/api/server.rs +353 -353
- data/vendor/kreuzberg/src/api/types.rs +170 -170
- data/vendor/kreuzberg/src/cache/mod.rs +1167 -1167
- data/vendor/kreuzberg/src/chunking/mod.rs +1877 -1877
- data/vendor/kreuzberg/src/chunking/processor.rs +220 -220
- data/vendor/kreuzberg/src/core/batch_mode.rs +95 -95
- data/vendor/kreuzberg/src/core/config.rs +1080 -1080
- data/vendor/kreuzberg/src/core/extractor.rs +1156 -1156
- data/vendor/kreuzberg/src/core/io.rs +329 -329
- data/vendor/kreuzberg/src/core/mime.rs +605 -605
- data/vendor/kreuzberg/src/core/mod.rs +47 -47
- data/vendor/kreuzberg/src/core/pipeline.rs +1184 -1184
- data/vendor/kreuzberg/src/embeddings.rs +500 -500
- data/vendor/kreuzberg/src/error.rs +431 -431
- data/vendor/kreuzberg/src/extraction/archive.rs +954 -954
- data/vendor/kreuzberg/src/extraction/docx.rs +398 -398
- data/vendor/kreuzberg/src/extraction/email.rs +854 -854
- data/vendor/kreuzberg/src/extraction/excel.rs +688 -688
- data/vendor/kreuzberg/src/extraction/html.rs +601 -601
- data/vendor/kreuzberg/src/extraction/image.rs +491 -491
- data/vendor/kreuzberg/src/extraction/libreoffice.rs +574 -574
- data/vendor/kreuzberg/src/extraction/markdown.rs +213 -213
- data/vendor/kreuzberg/src/extraction/mod.rs +81 -81
- data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -398
- data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -247
- data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -240
- data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +130 -130
- data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +284 -284
- data/vendor/kreuzberg/src/extraction/pptx.rs +3100 -3100
- data/vendor/kreuzberg/src/extraction/structured.rs +490 -490
- data/vendor/kreuzberg/src/extraction/table.rs +328 -328
- data/vendor/kreuzberg/src/extraction/text.rs +269 -269
- data/vendor/kreuzberg/src/extraction/xml.rs +333 -333
- data/vendor/kreuzberg/src/extractors/archive.rs +447 -447
- data/vendor/kreuzberg/src/extractors/bibtex.rs +470 -470
- data/vendor/kreuzberg/src/extractors/docbook.rs +504 -504
- data/vendor/kreuzberg/src/extractors/docx.rs +400 -400
- data/vendor/kreuzberg/src/extractors/email.rs +157 -157
- data/vendor/kreuzberg/src/extractors/epub.rs +708 -708
- data/vendor/kreuzberg/src/extractors/excel.rs +345 -345
- data/vendor/kreuzberg/src/extractors/fictionbook.rs +492 -492
- data/vendor/kreuzberg/src/extractors/html.rs +407 -407
- data/vendor/kreuzberg/src/extractors/image.rs +219 -219
- data/vendor/kreuzberg/src/extractors/jats.rs +1054 -1054
- data/vendor/kreuzberg/src/extractors/jupyter.rs +368 -368
- data/vendor/kreuzberg/src/extractors/latex.rs +653 -653
- data/vendor/kreuzberg/src/extractors/markdown.rs +701 -701
- data/vendor/kreuzberg/src/extractors/mod.rs +429 -429
- data/vendor/kreuzberg/src/extractors/odt.rs +628 -628
- data/vendor/kreuzberg/src/extractors/opml.rs +635 -635
- data/vendor/kreuzberg/src/extractors/orgmode.rs +529 -529
- data/vendor/kreuzberg/src/extractors/pdf.rs +749 -749
- data/vendor/kreuzberg/src/extractors/pptx.rs +267 -267
- data/vendor/kreuzberg/src/extractors/rst.rs +577 -577
- data/vendor/kreuzberg/src/extractors/rtf.rs +809 -809
- data/vendor/kreuzberg/src/extractors/security.rs +484 -484
- data/vendor/kreuzberg/src/extractors/security_tests.rs +367 -367
- data/vendor/kreuzberg/src/extractors/structured.rs +142 -142
- data/vendor/kreuzberg/src/extractors/text.rs +265 -265
- data/vendor/kreuzberg/src/extractors/typst.rs +651 -651
- data/vendor/kreuzberg/src/extractors/xml.rs +147 -147
- data/vendor/kreuzberg/src/image/dpi.rs +164 -164
- data/vendor/kreuzberg/src/image/mod.rs +6 -6
- data/vendor/kreuzberg/src/image/preprocessing.rs +417 -417
- data/vendor/kreuzberg/src/image/resize.rs +89 -89
- data/vendor/kreuzberg/src/keywords/config.rs +154 -154
- data/vendor/kreuzberg/src/keywords/mod.rs +237 -237
- data/vendor/kreuzberg/src/keywords/processor.rs +275 -275
- data/vendor/kreuzberg/src/keywords/rake.rs +293 -293
- data/vendor/kreuzberg/src/keywords/types.rs +68 -68
- data/vendor/kreuzberg/src/keywords/yake.rs +163 -163
- data/vendor/kreuzberg/src/language_detection/mod.rs +985 -985
- data/vendor/kreuzberg/src/language_detection/processor.rs +219 -219
- data/vendor/kreuzberg/src/lib.rs +113 -113
- data/vendor/kreuzberg/src/mcp/mod.rs +35 -35
- data/vendor/kreuzberg/src/mcp/server.rs +2076 -2076
- data/vendor/kreuzberg/src/ocr/cache.rs +469 -469
- data/vendor/kreuzberg/src/ocr/error.rs +37 -37
- data/vendor/kreuzberg/src/ocr/hocr.rs +216 -216
- data/vendor/kreuzberg/src/ocr/mod.rs +58 -58
- data/vendor/kreuzberg/src/ocr/processor.rs +863 -863
- data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -4
- data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -144
- data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +452 -452
- data/vendor/kreuzberg/src/ocr/types.rs +393 -393
- data/vendor/kreuzberg/src/ocr/utils.rs +47 -47
- data/vendor/kreuzberg/src/ocr/validation.rs +206 -206
- data/vendor/kreuzberg/src/panic_context.rs +154 -154
- data/vendor/kreuzberg/src/pdf/bindings.rs +44 -44
- data/vendor/kreuzberg/src/pdf/bundled.rs +346 -346
- data/vendor/kreuzberg/src/pdf/error.rs +130 -130
- data/vendor/kreuzberg/src/pdf/images.rs +139 -139
- data/vendor/kreuzberg/src/pdf/metadata.rs +489 -489
- data/vendor/kreuzberg/src/pdf/mod.rs +68 -68
- data/vendor/kreuzberg/src/pdf/rendering.rs +368 -368
- data/vendor/kreuzberg/src/pdf/table.rs +420 -420
- data/vendor/kreuzberg/src/pdf/text.rs +240 -240
- data/vendor/kreuzberg/src/plugins/extractor.rs +1044 -1044
- data/vendor/kreuzberg/src/plugins/mod.rs +212 -212
- data/vendor/kreuzberg/src/plugins/ocr.rs +639 -639
- data/vendor/kreuzberg/src/plugins/processor.rs +650 -650
- data/vendor/kreuzberg/src/plugins/registry.rs +1339 -1339
- data/vendor/kreuzberg/src/plugins/traits.rs +258 -258
- data/vendor/kreuzberg/src/plugins/validator.rs +967 -967
- data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -1470
- data/vendor/kreuzberg/src/text/mod.rs +25 -25
- data/vendor/kreuzberg/src/text/quality.rs +697 -697
- data/vendor/kreuzberg/src/text/quality_processor.rs +219 -219
- data/vendor/kreuzberg/src/text/string_utils.rs +217 -217
- data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -164
- data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -100
- data/vendor/kreuzberg/src/text/token_reduction/core.rs +796 -796
- data/vendor/kreuzberg/src/text/token_reduction/filters.rs +902 -902
- data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -160
- data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -619
- data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +147 -147
- data/vendor/kreuzberg/src/types.rs +1055 -1055
- data/vendor/kreuzberg/src/utils/mod.rs +17 -17
- data/vendor/kreuzberg/src/utils/quality.rs +959 -959
- data/vendor/kreuzberg/src/utils/string_utils.rs +381 -381
- data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -53
- data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -482
- data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -261
- data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -400
- data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -1205
- data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -280
- data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -425
- data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -172
- data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -622
- data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -849
- data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -1300
- data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -175
- data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -734
- data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -37
- data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -100
- data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -801
- data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -849
- data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -693
- data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -111
- data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -162
- data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -226
- data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -41
- data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -196
- data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -227
- data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -181
- data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -791
- data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -47
- data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -760
- data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -634
- data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -136
- data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -84
- data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -681
- data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -64
- data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -51
- data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -476
- data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -163
- data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -1
- data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -101
- data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -477
- data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -490
- data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -415
- data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -223
- data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -331
- data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -562
- data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -436
- data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -561
- data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -193
- data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -420
- data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -448
- data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -32
- data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -33
- data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -420
- data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -76
- data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -129
- data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -54
- data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -118
- data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -149
- data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -506
- data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -75
- data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -519
- data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -647
- data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -62
- data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -796
- data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -31
- data/vendor/kreuzberg/tests/api_extract_multipart.rs +52 -52
- data/vendor/kreuzberg/tests/api_tests.rs +966 -966
- data/vendor/kreuzberg/tests/archive_integration.rs +545 -545
- data/vendor/kreuzberg/tests/batch_orchestration.rs +556 -556
- data/vendor/kreuzberg/tests/batch_processing.rs +318 -318
- data/vendor/kreuzberg/tests/bibtex_parity_test.rs +421 -421
- data/vendor/kreuzberg/tests/concurrency_stress.rs +533 -533
- data/vendor/kreuzberg/tests/config_features.rs +612 -612
- data/vendor/kreuzberg/tests/config_loading_tests.rs +416 -416
- data/vendor/kreuzberg/tests/core_integration.rs +510 -510
- data/vendor/kreuzberg/tests/csv_integration.rs +414 -414
- data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +500 -500
- data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +122 -122
- data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +370 -370
- data/vendor/kreuzberg/tests/email_integration.rs +327 -327
- data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +275 -275
- data/vendor/kreuzberg/tests/error_handling.rs +402 -402
- data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +228 -228
- data/vendor/kreuzberg/tests/format_integration.rs +164 -164
- data/vendor/kreuzberg/tests/helpers/mod.rs +142 -142
- data/vendor/kreuzberg/tests/html_table_test.rs +551 -551
- data/vendor/kreuzberg/tests/image_integration.rs +255 -255
- data/vendor/kreuzberg/tests/instrumentation_test.rs +139 -139
- data/vendor/kreuzberg/tests/jats_extractor_tests.rs +639 -639
- data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +704 -704
- data/vendor/kreuzberg/tests/keywords_integration.rs +479 -479
- data/vendor/kreuzberg/tests/keywords_quality.rs +509 -509
- data/vendor/kreuzberg/tests/latex_extractor_tests.rs +496 -496
- data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +490 -490
- data/vendor/kreuzberg/tests/mime_detection.rs +429 -429
- data/vendor/kreuzberg/tests/ocr_configuration.rs +514 -514
- data/vendor/kreuzberg/tests/ocr_errors.rs +698 -698
- data/vendor/kreuzberg/tests/ocr_quality.rs +629 -629
- data/vendor/kreuzberg/tests/ocr_stress.rs +469 -469
- data/vendor/kreuzberg/tests/odt_extractor_tests.rs +674 -674
- data/vendor/kreuzberg/tests/opml_extractor_tests.rs +616 -616
- data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +822 -822
- data/vendor/kreuzberg/tests/pdf_integration.rs +45 -45
- data/vendor/kreuzberg/tests/pdfium_linking.rs +374 -374
- data/vendor/kreuzberg/tests/pipeline_integration.rs +1436 -1436
- data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +776 -776
- data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +560 -560
- data/vendor/kreuzberg/tests/plugin_system.rs +927 -927
- data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -783
- data/vendor/kreuzberg/tests/registry_integration_tests.rs +587 -587
- data/vendor/kreuzberg/tests/rst_extractor_tests.rs +694 -694
- data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +775 -775
- data/vendor/kreuzberg/tests/security_validation.rs +416 -416
- data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -888
- data/vendor/kreuzberg/tests/test_fastembed.rs +631 -631
- data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +1260 -1260
- data/vendor/kreuzberg/tests/typst_extractor_tests.rs +648 -648
- data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -87
- data/vendor/kreuzberg-ffi/Cargo.toml +1 -1
- data/vendor/kreuzberg-ffi/README.md +851 -851
- data/vendor/kreuzberg-ffi/build.rs +176 -176
- data/vendor/kreuzberg-ffi/cbindgen.toml +27 -27
- data/vendor/kreuzberg-ffi/kreuzberg-ffi.pc.in +12 -12
- data/vendor/kreuzberg-ffi/kreuzberg.h +1087 -1087
- data/vendor/kreuzberg-ffi/src/lib.rs +3616 -3616
- data/vendor/kreuzberg-ffi/src/panic_shield.rs +247 -247
- data/vendor/kreuzberg-ffi/tests.disabled/README.md +48 -48
- data/vendor/kreuzberg-ffi/tests.disabled/config_loading_tests.rs +299 -299
- data/vendor/kreuzberg-ffi/tests.disabled/config_tests.rs +346 -346
- data/vendor/kreuzberg-ffi/tests.disabled/extractor_tests.rs +232 -232
- data/vendor/kreuzberg-ffi/tests.disabled/plugin_registration_tests.rs +470 -470
- data/vendor/kreuzberg-tesseract/.commitlintrc.json +13 -13
- data/vendor/kreuzberg-tesseract/.crate-ignore +2 -2
- data/vendor/kreuzberg-tesseract/Cargo.lock +2933 -2933
- data/vendor/kreuzberg-tesseract/Cargo.toml +2 -2
- data/vendor/kreuzberg-tesseract/LICENSE +22 -22
- data/vendor/kreuzberg-tesseract/README.md +399 -399
- data/vendor/kreuzberg-tesseract/build.rs +1354 -1354
- data/vendor/kreuzberg-tesseract/patches/README.md +71 -71
- data/vendor/kreuzberg-tesseract/patches/tesseract.diff +199 -199
- data/vendor/kreuzberg-tesseract/src/api.rs +1371 -1371
- data/vendor/kreuzberg-tesseract/src/choice_iterator.rs +77 -77
- data/vendor/kreuzberg-tesseract/src/enums.rs +297 -297
- data/vendor/kreuzberg-tesseract/src/error.rs +81 -81
- data/vendor/kreuzberg-tesseract/src/lib.rs +145 -145
- data/vendor/kreuzberg-tesseract/src/monitor.rs +57 -57
- data/vendor/kreuzberg-tesseract/src/mutable_iterator.rs +197 -197
- data/vendor/kreuzberg-tesseract/src/page_iterator.rs +253 -253
- data/vendor/kreuzberg-tesseract/src/result_iterator.rs +286 -286
- data/vendor/kreuzberg-tesseract/src/result_renderer.rs +183 -183
- data/vendor/kreuzberg-tesseract/tests/integration_test.rs +211 -211
- data/vendor/rb-sys/.cargo_vcs_info.json +5 -5
- data/vendor/rb-sys/Cargo.lock +393 -393
- data/vendor/rb-sys/Cargo.toml +70 -70
- data/vendor/rb-sys/Cargo.toml.orig +57 -57
- data/vendor/rb-sys/LICENSE-APACHE +190 -190
- data/vendor/rb-sys/LICENSE-MIT +21 -21
- data/vendor/rb-sys/build/features.rs +111 -111
- data/vendor/rb-sys/build/main.rs +286 -286
- data/vendor/rb-sys/build/stable_api_config.rs +155 -155
- data/vendor/rb-sys/build/version.rs +50 -50
- data/vendor/rb-sys/readme.md +36 -36
- data/vendor/rb-sys/src/bindings.rs +21 -21
- data/vendor/rb-sys/src/hidden.rs +11 -11
- data/vendor/rb-sys/src/lib.rs +35 -35
- data/vendor/rb-sys/src/macros.rs +371 -371
- data/vendor/rb-sys/src/memory.rs +53 -53
- data/vendor/rb-sys/src/ruby_abi_version.rs +38 -38
- data/vendor/rb-sys/src/special_consts.rs +31 -31
- data/vendor/rb-sys/src/stable_api/compiled.c +179 -179
- data/vendor/rb-sys/src/stable_api/compiled.rs +257 -257
- data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +324 -324
- data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +332 -332
- data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +325 -325
- data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +323 -323
- data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +339 -339
- data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +339 -339
- data/vendor/rb-sys/src/stable_api.rs +260 -260
- data/vendor/rb-sys/src/symbol.rs +31 -31
- data/vendor/rb-sys/src/tracking_allocator.rs +330 -330
- data/vendor/rb-sys/src/utils.rs +89 -89
- data/vendor/rb-sys/src/value_type.rs +7 -7
- metadata +73 -4
- data/vendor/kreuzberg-ffi/kreuzberg-ffi-install.pc +0 -12
|
@@ -1,619 +1,619 @@
|
|
|
1
|
-
use ahash::AHashMap;
|
|
2
|
-
use std::cmp::Ordering;
|
|
3
|
-
|
|
4
|
-
#[derive(Debug, Clone)]
|
|
5
|
-
struct ScoredToken {
|
|
6
|
-
token: String,
|
|
7
|
-
position: usize,
|
|
8
|
-
importance_score: f32,
|
|
9
|
-
#[allow(dead_code)]
|
|
10
|
-
context_boost: f32,
|
|
11
|
-
#[allow(dead_code)]
|
|
12
|
-
frequency_score: f32,
|
|
13
|
-
}
|
|
14
|
-
|
|
15
|
-
impl PartialEq for ScoredToken {
|
|
16
|
-
fn eq(&self, other: &Self) -> bool {
|
|
17
|
-
self.importance_score == other.importance_score
|
|
18
|
-
}
|
|
19
|
-
}
|
|
20
|
-
|
|
21
|
-
impl Eq for ScoredToken {}
|
|
22
|
-
|
|
23
|
-
impl PartialOrd for ScoredToken {
|
|
24
|
-
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
|
25
|
-
Some(self.cmp(other))
|
|
26
|
-
}
|
|
27
|
-
}
|
|
28
|
-
|
|
29
|
-
impl Ord for ScoredToken {
|
|
30
|
-
fn cmp(&self, other: &Self) -> Ordering {
|
|
31
|
-
self.importance_score
|
|
32
|
-
.partial_cmp(&other.importance_score)
|
|
33
|
-
.unwrap_or(Ordering::Equal)
|
|
34
|
-
}
|
|
35
|
-
}
|
|
36
|
-
|
|
37
|
-
pub struct SemanticAnalyzer {
|
|
38
|
-
importance_weights: AHashMap<String, f32>,
|
|
39
|
-
hypernyms: AHashMap<String, String>,
|
|
40
|
-
semantic_clusters: AHashMap<String, Vec<String>>,
|
|
41
|
-
}
|
|
42
|
-
|
|
43
|
-
impl SemanticAnalyzer {
|
|
44
|
-
pub fn new(_language: &str) -> Self {
|
|
45
|
-
let mut analyzer = Self {
|
|
46
|
-
importance_weights: AHashMap::new(),
|
|
47
|
-
hypernyms: AHashMap::new(),
|
|
48
|
-
semantic_clusters: AHashMap::new(),
|
|
49
|
-
};
|
|
50
|
-
|
|
51
|
-
analyzer.initialize_importance_weights();
|
|
52
|
-
analyzer.initialize_hypernyms();
|
|
53
|
-
analyzer.initialize_semantic_clusters();
|
|
54
|
-
|
|
55
|
-
analyzer
|
|
56
|
-
}
|
|
57
|
-
|
|
58
|
-
pub fn apply_semantic_filtering(&self, text: &str, threshold: f32) -> String {
|
|
59
|
-
let tokens = self.tokenize_and_score(text);
|
|
60
|
-
let filtered_tokens = self.filter_by_importance(tokens, threshold);
|
|
61
|
-
self.reconstruct_text(filtered_tokens)
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
pub fn apply_hypernym_compression(&self, text: &str, target_reduction: Option<f32>) -> String {
|
|
65
|
-
let tokens = self.tokenize_and_score(text);
|
|
66
|
-
let compressed_tokens = self.compress_with_hypernyms(tokens, target_reduction);
|
|
67
|
-
self.reconstruct_text(compressed_tokens)
|
|
68
|
-
}
|
|
69
|
-
|
|
70
|
-
fn tokenize_and_score(&self, text: &str) -> Vec<ScoredToken> {
|
|
71
|
-
let words: Vec<&str> = text.split_whitespace().collect();
|
|
72
|
-
let mut scored_tokens = Vec::with_capacity(words.len());
|
|
73
|
-
|
|
74
|
-
let mut word_freq = AHashMap::new();
|
|
75
|
-
for word in &words {
|
|
76
|
-
let clean_word = self.clean_word(word);
|
|
77
|
-
*word_freq.entry(clean_word).or_insert(0) += 1;
|
|
78
|
-
}
|
|
79
|
-
|
|
80
|
-
for (position, word) in words.iter().enumerate() {
|
|
81
|
-
let clean_word = self.clean_word(word);
|
|
82
|
-
let base_importance = self.calculate_base_importance(&clean_word);
|
|
83
|
-
let context_boost = self.calculate_context_boost(&clean_word, position, &words);
|
|
84
|
-
let frequency_score = self.calculate_frequency_score(&clean_word, &word_freq, words.len());
|
|
85
|
-
|
|
86
|
-
let total_score = base_importance + context_boost + frequency_score;
|
|
87
|
-
|
|
88
|
-
scored_tokens.push(ScoredToken {
|
|
89
|
-
token: word.to_string(),
|
|
90
|
-
position,
|
|
91
|
-
importance_score: total_score,
|
|
92
|
-
context_boost,
|
|
93
|
-
frequency_score,
|
|
94
|
-
});
|
|
95
|
-
}
|
|
96
|
-
|
|
97
|
-
scored_tokens
|
|
98
|
-
}
|
|
99
|
-
|
|
100
|
-
fn filter_by_importance(&self, tokens: Vec<ScoredToken>, threshold: f32) -> Vec<ScoredToken> {
|
|
101
|
-
tokens
|
|
102
|
-
.into_iter()
|
|
103
|
-
.filter(|token| token.importance_score >= threshold)
|
|
104
|
-
.collect()
|
|
105
|
-
}
|
|
106
|
-
|
|
107
|
-
fn compress_with_hypernyms(&self, tokens: Vec<ScoredToken>, target_reduction: Option<f32>) -> Vec<ScoredToken> {
|
|
108
|
-
let mut result = tokens;
|
|
109
|
-
|
|
110
|
-
if let Some(target) = target_reduction {
|
|
111
|
-
let target_count = ((1.0 - target) * result.len() as f32) as usize;
|
|
112
|
-
|
|
113
|
-
// Handle NaN values in importance scores by treating them as equal ~keep
|
|
114
|
-
result.sort_by(|a, b| {
|
|
115
|
-
b.importance_score
|
|
116
|
-
.partial_cmp(&a.importance_score)
|
|
117
|
-
.unwrap_or(std::cmp::Ordering::Equal)
|
|
118
|
-
});
|
|
119
|
-
|
|
120
|
-
for token in result.iter_mut().skip(target_count) {
|
|
121
|
-
if let Some(hypernym) = self.get_hypernym(&token.token) {
|
|
122
|
-
token.token = hypernym;
|
|
123
|
-
token.importance_score *= 0.8;
|
|
124
|
-
}
|
|
125
|
-
}
|
|
126
|
-
|
|
127
|
-
result.truncate(target_count.max(1));
|
|
128
|
-
} else {
|
|
129
|
-
for token in &mut result {
|
|
130
|
-
if token.importance_score < 0.5
|
|
131
|
-
&& let Some(hypernym) = self.get_hypernym(&token.token)
|
|
132
|
-
{
|
|
133
|
-
token.token = hypernym;
|
|
134
|
-
}
|
|
135
|
-
}
|
|
136
|
-
}
|
|
137
|
-
|
|
138
|
-
result.sort_by_key(|token| token.position);
|
|
139
|
-
result
|
|
140
|
-
}
|
|
141
|
-
|
|
142
|
-
fn reconstruct_text(&self, tokens: Vec<ScoredToken>) -> String {
|
|
143
|
-
tokens
|
|
144
|
-
.into_iter()
|
|
145
|
-
.map(|token| token.token)
|
|
146
|
-
.collect::<Vec<_>>()
|
|
147
|
-
.join(" ")
|
|
148
|
-
}
|
|
149
|
-
|
|
150
|
-
fn calculate_base_importance(&self, word: &str) -> f32 {
|
|
151
|
-
if let Some(&weight) = self.importance_weights.get(word) {
|
|
152
|
-
return weight;
|
|
153
|
-
}
|
|
154
|
-
|
|
155
|
-
let mut score = 0.3;
|
|
156
|
-
|
|
157
|
-
score += (word.len() as f32 * 0.02).min(0.2);
|
|
158
|
-
|
|
159
|
-
if word.chars().next().map(|c| c.is_uppercase()).unwrap_or(false) {
|
|
160
|
-
score += 0.2;
|
|
161
|
-
}
|
|
162
|
-
|
|
163
|
-
if word.chars().any(|c| c.is_numeric()) {
|
|
164
|
-
score += 0.15;
|
|
165
|
-
}
|
|
166
|
-
|
|
167
|
-
if self.is_technical_term(word) {
|
|
168
|
-
score += 0.25;
|
|
169
|
-
}
|
|
170
|
-
|
|
171
|
-
score.min(1.0)
|
|
172
|
-
}
|
|
173
|
-
|
|
174
|
-
fn calculate_context_boost(&self, word: &str, position: usize, words: &[&str]) -> f32 {
|
|
175
|
-
let mut boost = 0.0;
|
|
176
|
-
|
|
177
|
-
if position == 0 || position == words.len() - 1 {
|
|
178
|
-
boost += 0.1;
|
|
179
|
-
}
|
|
180
|
-
|
|
181
|
-
let window = 2;
|
|
182
|
-
let start = position.saturating_sub(window);
|
|
183
|
-
let end = (position + window + 1).min(words.len());
|
|
184
|
-
|
|
185
|
-
for &context_word in &words[start..end] {
|
|
186
|
-
if context_word != word {
|
|
187
|
-
boost += self.calculate_contextual_weight(word, context_word);
|
|
188
|
-
}
|
|
189
|
-
}
|
|
190
|
-
|
|
191
|
-
boost.min(0.3)
|
|
192
|
-
}
|
|
193
|
-
|
|
194
|
-
fn calculate_frequency_score(&self, word: &str, word_freq: &AHashMap<String, i32>, total_words: usize) -> f32 {
|
|
195
|
-
if let Some(&freq) = word_freq.get(word) {
|
|
196
|
-
let tf = freq as f32 / total_words as f32;
|
|
197
|
-
|
|
198
|
-
(tf.ln() + 1.0) * 0.1
|
|
199
|
-
} else {
|
|
200
|
-
0.0
|
|
201
|
-
}
|
|
202
|
-
}
|
|
203
|
-
|
|
204
|
-
fn calculate_contextual_weight(&self, word: &str, context_word: &str) -> f32 {
|
|
205
|
-
if self.is_technical_term(word) && self.is_technical_term(context_word) {
|
|
206
|
-
0.05
|
|
207
|
-
} else if context_word.chars().next().map(|c| c.is_uppercase()).unwrap_or(false) {
|
|
208
|
-
0.02
|
|
209
|
-
} else {
|
|
210
|
-
0.0
|
|
211
|
-
}
|
|
212
|
-
}
|
|
213
|
-
|
|
214
|
-
fn is_technical_term(&self, word: &str) -> bool {
|
|
215
|
-
word.len() > 6
|
|
216
|
-
&& (word.contains("_")
|
|
217
|
-
|| word.chars().filter(|&c| c.is_uppercase()).count() > 1
|
|
218
|
-
|| word.ends_with("tion")
|
|
219
|
-
|| word.ends_with("ment")
|
|
220
|
-
|| word.ends_with("ing"))
|
|
221
|
-
}
|
|
222
|
-
|
|
223
|
-
fn get_hypernym(&self, word: &str) -> Option<String> {
|
|
224
|
-
let clean_word = self.clean_word(word).to_lowercase();
|
|
225
|
-
self.hypernyms.get(&clean_word).cloned()
|
|
226
|
-
}
|
|
227
|
-
|
|
228
|
-
fn clean_word(&self, word: &str) -> String {
|
|
229
|
-
word.chars()
|
|
230
|
-
.filter(|c| c.is_alphanumeric())
|
|
231
|
-
.collect::<String>()
|
|
232
|
-
.to_lowercase()
|
|
233
|
-
}
|
|
234
|
-
|
|
235
|
-
fn initialize_importance_weights(&mut self) {
|
|
236
|
-
let high_importance = [
|
|
237
|
-
("result", 0.8),
|
|
238
|
-
("conclusion", 0.8),
|
|
239
|
-
("important", 0.7),
|
|
240
|
-
("significant", 0.7),
|
|
241
|
-
("analysis", 0.7),
|
|
242
|
-
("method", 0.6),
|
|
243
|
-
("data", 0.6),
|
|
244
|
-
("system", 0.6),
|
|
245
|
-
("performance", 0.6),
|
|
246
|
-
("improvement", 0.6),
|
|
247
|
-
];
|
|
248
|
-
|
|
249
|
-
for (word, score) in &high_importance {
|
|
250
|
-
self.importance_weights.insert(word.to_string(), *score);
|
|
251
|
-
}
|
|
252
|
-
|
|
253
|
-
let medium_importance = [
|
|
254
|
-
("process", 0.5),
|
|
255
|
-
("algorithm", 0.5),
|
|
256
|
-
("function", 0.5),
|
|
257
|
-
("model", 0.5),
|
|
258
|
-
("implementation", 0.5),
|
|
259
|
-
];
|
|
260
|
-
|
|
261
|
-
for (word, score) in &medium_importance {
|
|
262
|
-
self.importance_weights.insert(word.to_string(), *score);
|
|
263
|
-
}
|
|
264
|
-
}
|
|
265
|
-
|
|
266
|
-
fn initialize_hypernyms(&mut self) {
|
|
267
|
-
let hypernym_pairs = [
|
|
268
|
-
("car", "vehicle"),
|
|
269
|
-
("dog", "animal"),
|
|
270
|
-
("apple", "fruit"),
|
|
271
|
-
("chair", "furniture"),
|
|
272
|
-
("book", "publication"),
|
|
273
|
-
("computer", "device"),
|
|
274
|
-
("algorithm", "method"),
|
|
275
|
-
("implementation", "approach"),
|
|
276
|
-
("optimization", "improvement"),
|
|
277
|
-
("analysis", "study"),
|
|
278
|
-
];
|
|
279
|
-
|
|
280
|
-
for (word, hypernym) in &hypernym_pairs {
|
|
281
|
-
self.hypernyms.insert(word.to_string(), hypernym.to_string());
|
|
282
|
-
}
|
|
283
|
-
}
|
|
284
|
-
|
|
285
|
-
fn initialize_semantic_clusters(&mut self) {
|
|
286
|
-
self.semantic_clusters.insert(
|
|
287
|
-
"computing".to_string(),
|
|
288
|
-
vec![
|
|
289
|
-
"computer".to_string(),
|
|
290
|
-
"algorithm".to_string(),
|
|
291
|
-
"software".to_string(),
|
|
292
|
-
"programming".to_string(),
|
|
293
|
-
"code".to_string(),
|
|
294
|
-
],
|
|
295
|
-
);
|
|
296
|
-
|
|
297
|
-
self.semantic_clusters.insert(
|
|
298
|
-
"analysis".to_string(),
|
|
299
|
-
vec![
|
|
300
|
-
"analysis".to_string(),
|
|
301
|
-
"study".to_string(),
|
|
302
|
-
"research".to_string(),
|
|
303
|
-
"investigation".to_string(),
|
|
304
|
-
"examination".to_string(),
|
|
305
|
-
],
|
|
306
|
-
);
|
|
307
|
-
|
|
308
|
-
self.semantic_clusters.insert(
|
|
309
|
-
"performance".to_string(),
|
|
310
|
-
vec![
|
|
311
|
-
"performance".to_string(),
|
|
312
|
-
"speed".to_string(),
|
|
313
|
-
"efficiency".to_string(),
|
|
314
|
-
"optimization".to_string(),
|
|
315
|
-
"improvement".to_string(),
|
|
316
|
-
],
|
|
317
|
-
);
|
|
318
|
-
}
|
|
319
|
-
}
|
|
320
|
-
|
|
321
|
-
#[cfg(test)]
|
|
322
|
-
mod tests {
|
|
323
|
-
use super::*;
|
|
324
|
-
|
|
325
|
-
#[test]
|
|
326
|
-
fn test_semantic_filtering() {
|
|
327
|
-
let analyzer = SemanticAnalyzer::new("en");
|
|
328
|
-
let input = "The quick brown fox jumps over the lazy dog with great performance";
|
|
329
|
-
let result = analyzer.apply_semantic_filtering(input, 0.4);
|
|
330
|
-
|
|
331
|
-
assert!(result.contains("performance") || result.contains("fox") || result.contains("dog"));
|
|
332
|
-
assert!(result.len() < input.len());
|
|
333
|
-
}
|
|
334
|
-
|
|
335
|
-
#[test]
|
|
336
|
-
fn test_hypernym_compression() {
|
|
337
|
-
let analyzer = SemanticAnalyzer::new("en");
|
|
338
|
-
let input = "The car drove past the dog near the apple tree";
|
|
339
|
-
let result = analyzer.apply_hypernym_compression(input, Some(0.5));
|
|
340
|
-
|
|
341
|
-
let original_words = input.split_whitespace().count();
|
|
342
|
-
let result_words = result.split_whitespace().count();
|
|
343
|
-
assert!(result_words <= (original_words as f32 * 0.5) as usize + 1);
|
|
344
|
-
}
|
|
345
|
-
|
|
346
|
-
#[test]
|
|
347
|
-
fn test_importance_scoring() {
|
|
348
|
-
let analyzer = SemanticAnalyzer::new("en");
|
|
349
|
-
let tokens = analyzer.tokenize_and_score("The important analysis shows significant results");
|
|
350
|
-
|
|
351
|
-
let important_token = tokens.iter().find(|t| t.token == "important").unwrap();
|
|
352
|
-
let analysis_token = tokens.iter().find(|t| t.token == "analysis").unwrap();
|
|
353
|
-
let the_token = tokens.iter().find(|t| t.token == "The").unwrap();
|
|
354
|
-
|
|
355
|
-
assert!(important_token.importance_score > the_token.importance_score);
|
|
356
|
-
assert!(analysis_token.importance_score > the_token.importance_score);
|
|
357
|
-
}
|
|
358
|
-
|
|
359
|
-
#[test]
|
|
360
|
-
fn test_semantic_filtering_empty_text() {
|
|
361
|
-
let analyzer = SemanticAnalyzer::new("en");
|
|
362
|
-
let result = analyzer.apply_semantic_filtering("", 0.5);
|
|
363
|
-
assert_eq!(result, "");
|
|
364
|
-
}
|
|
365
|
-
|
|
366
|
-
#[test]
|
|
367
|
-
fn test_semantic_filtering_high_threshold() {
|
|
368
|
-
let analyzer = SemanticAnalyzer::new("en");
|
|
369
|
-
let input = "The quick brown fox";
|
|
370
|
-
let result = analyzer.apply_semantic_filtering(input, 0.9);
|
|
371
|
-
assert!(result.len() <= input.len());
|
|
372
|
-
}
|
|
373
|
-
|
|
374
|
-
#[test]
|
|
375
|
-
fn test_hypernym_compression_without_target() {
|
|
376
|
-
let analyzer = SemanticAnalyzer::new("en");
|
|
377
|
-
let input = "The car drove past the dog";
|
|
378
|
-
let result = analyzer.apply_hypernym_compression(input, None);
|
|
379
|
-
assert!(!result.is_empty());
|
|
380
|
-
}
|
|
381
|
-
|
|
382
|
-
#[test]
|
|
383
|
-
fn test_technical_term_detection() {
|
|
384
|
-
let analyzer = SemanticAnalyzer::new("en");
|
|
385
|
-
|
|
386
|
-
assert!(analyzer.is_technical_term("implementation"));
|
|
387
|
-
assert!(analyzer.is_technical_term("optimization"));
|
|
388
|
-
assert!(analyzer.is_technical_term("processing"));
|
|
389
|
-
assert!(analyzer.is_technical_term("HTTP_SERVER"));
|
|
390
|
-
assert!(!analyzer.is_technical_term("cat"));
|
|
391
|
-
assert!(!analyzer.is_technical_term("dog"));
|
|
392
|
-
}
|
|
393
|
-
|
|
394
|
-
#[test]
|
|
395
|
-
fn test_clean_word() {
|
|
396
|
-
let analyzer = SemanticAnalyzer::new("en");
|
|
397
|
-
|
|
398
|
-
assert_eq!(analyzer.clean_word("Hello!"), "hello");
|
|
399
|
-
assert_eq!(analyzer.clean_word("test123"), "test123");
|
|
400
|
-
assert_eq!(analyzer.clean_word("word,"), "word");
|
|
401
|
-
assert_eq!(analyzer.clean_word("(test)"), "test");
|
|
402
|
-
}
|
|
403
|
-
|
|
404
|
-
#[test]
|
|
405
|
-
fn test_calculate_base_importance() {
|
|
406
|
-
let analyzer = SemanticAnalyzer::new("en");
|
|
407
|
-
|
|
408
|
-
let result_score = analyzer.calculate_base_importance("result");
|
|
409
|
-
let conclusion_score = analyzer.calculate_base_importance("conclusion");
|
|
410
|
-
|
|
411
|
-
assert!(result_score > 0.5);
|
|
412
|
-
assert!(conclusion_score > 0.5);
|
|
413
|
-
|
|
414
|
-
let process_score = analyzer.calculate_base_importance("process");
|
|
415
|
-
assert!(process_score >= 0.4);
|
|
416
|
-
|
|
417
|
-
let regular_score = analyzer.calculate_base_importance("cat");
|
|
418
|
-
assert!(regular_score < result_score);
|
|
419
|
-
}
|
|
420
|
-
|
|
421
|
-
#[test]
|
|
422
|
-
fn test_calculate_base_importance_uppercase() {
|
|
423
|
-
let analyzer = SemanticAnalyzer::new("en");
|
|
424
|
-
|
|
425
|
-
let uppercase_score = analyzer.calculate_base_importance("Test");
|
|
426
|
-
let lowercase_score = analyzer.calculate_base_importance("test");
|
|
427
|
-
|
|
428
|
-
assert!(uppercase_score > lowercase_score);
|
|
429
|
-
}
|
|
430
|
-
|
|
431
|
-
#[test]
|
|
432
|
-
fn test_calculate_base_importance_with_numbers() {
|
|
433
|
-
let analyzer = SemanticAnalyzer::new("en");
|
|
434
|
-
|
|
435
|
-
let with_number = analyzer.calculate_base_importance("test123");
|
|
436
|
-
let without_number = analyzer.calculate_base_importance("test");
|
|
437
|
-
|
|
438
|
-
assert!(with_number > without_number);
|
|
439
|
-
}
|
|
440
|
-
|
|
441
|
-
#[test]
|
|
442
|
-
fn test_calculate_base_importance_length_bonus() {
|
|
443
|
-
let analyzer = SemanticAnalyzer::new("en");
|
|
444
|
-
|
|
445
|
-
let long_word = analyzer.calculate_base_importance("verylongword");
|
|
446
|
-
let short_word = analyzer.calculate_base_importance("cat");
|
|
447
|
-
|
|
448
|
-
assert!(long_word > short_word);
|
|
449
|
-
}
|
|
450
|
-
|
|
451
|
-
#[test]
|
|
452
|
-
fn test_get_hypernym() {
|
|
453
|
-
let analyzer = SemanticAnalyzer::new("en");
|
|
454
|
-
|
|
455
|
-
assert_eq!(analyzer.get_hypernym("car"), Some("vehicle".to_string()));
|
|
456
|
-
assert_eq!(analyzer.get_hypernym("dog"), Some("animal".to_string()));
|
|
457
|
-
assert_eq!(analyzer.get_hypernym("apple"), Some("fruit".to_string()));
|
|
458
|
-
assert_eq!(analyzer.get_hypernym("unknown"), None);
|
|
459
|
-
}
|
|
460
|
-
|
|
461
|
-
#[test]
|
|
462
|
-
fn test_get_hypernym_case_insensitive() {
|
|
463
|
-
let analyzer = SemanticAnalyzer::new("en");
|
|
464
|
-
|
|
465
|
-
assert_eq!(analyzer.get_hypernym("CAR"), Some("vehicle".to_string()));
|
|
466
|
-
assert_eq!(analyzer.get_hypernym("Dog"), Some("animal".to_string()));
|
|
467
|
-
}
|
|
468
|
-
|
|
469
|
-
#[test]
|
|
470
|
-
fn test_tokenize_and_score_positions() {
|
|
471
|
-
let analyzer = SemanticAnalyzer::new("en");
|
|
472
|
-
let tokens = analyzer.tokenize_and_score("first middle last");
|
|
473
|
-
|
|
474
|
-
assert_eq!(tokens[0].position, 0);
|
|
475
|
-
assert_eq!(tokens[1].position, 1);
|
|
476
|
-
assert_eq!(tokens[2].position, 2);
|
|
477
|
-
}
|
|
478
|
-
|
|
479
|
-
#[test]
|
|
480
|
-
fn test_context_boost_for_edge_positions() {
|
|
481
|
-
let analyzer = SemanticAnalyzer::new("en");
|
|
482
|
-
let tokens = analyzer.tokenize_and_score("first middle last");
|
|
483
|
-
|
|
484
|
-
assert!(tokens[0].importance_score > 0.0);
|
|
485
|
-
assert!(tokens[2].importance_score > 0.0);
|
|
486
|
-
}
|
|
487
|
-
|
|
488
|
-
#[test]
|
|
489
|
-
fn test_frequency_score() {
|
|
490
|
-
let analyzer = SemanticAnalyzer::new("en");
|
|
491
|
-
let tokens = analyzer.tokenize_and_score("test test test other");
|
|
492
|
-
|
|
493
|
-
let test_token = tokens.iter().find(|t| t.token == "test").unwrap();
|
|
494
|
-
let other_token = tokens.iter().find(|t| t.token == "other").unwrap();
|
|
495
|
-
|
|
496
|
-
assert!(test_token.frequency_score > other_token.frequency_score);
|
|
497
|
-
}
|
|
498
|
-
|
|
499
|
-
#[test]
|
|
500
|
-
fn test_scored_token_ordering() {
|
|
501
|
-
let token1 = ScoredToken {
|
|
502
|
-
token: "a".to_string(),
|
|
503
|
-
position: 0,
|
|
504
|
-
importance_score: 0.5,
|
|
505
|
-
context_boost: 0.0,
|
|
506
|
-
frequency_score: 0.0,
|
|
507
|
-
};
|
|
508
|
-
|
|
509
|
-
let token2 = ScoredToken {
|
|
510
|
-
token: "b".to_string(),
|
|
511
|
-
position: 1,
|
|
512
|
-
importance_score: 0.7,
|
|
513
|
-
context_boost: 0.0,
|
|
514
|
-
frequency_score: 0.0,
|
|
515
|
-
};
|
|
516
|
-
|
|
517
|
-
assert!(token2 > token1);
|
|
518
|
-
assert_eq!(token1, token1.clone());
|
|
519
|
-
}
|
|
520
|
-
|
|
521
|
-
#[test]
|
|
522
|
-
fn test_reconstruct_text() {
|
|
523
|
-
let analyzer = SemanticAnalyzer::new("en");
|
|
524
|
-
let tokens = vec![
|
|
525
|
-
ScoredToken {
|
|
526
|
-
token: "Hello".to_string(),
|
|
527
|
-
position: 0,
|
|
528
|
-
importance_score: 0.5,
|
|
529
|
-
context_boost: 0.0,
|
|
530
|
-
frequency_score: 0.0,
|
|
531
|
-
},
|
|
532
|
-
ScoredToken {
|
|
533
|
-
token: "world".to_string(),
|
|
534
|
-
position: 1,
|
|
535
|
-
importance_score: 0.5,
|
|
536
|
-
context_boost: 0.0,
|
|
537
|
-
frequency_score: 0.0,
|
|
538
|
-
},
|
|
539
|
-
];
|
|
540
|
-
|
|
541
|
-
let result = analyzer.reconstruct_text(tokens);
|
|
542
|
-
assert_eq!(result, "Hello world");
|
|
543
|
-
}
|
|
544
|
-
|
|
545
|
-
#[test]
|
|
546
|
-
fn test_compress_with_hypernyms_respects_target() {
|
|
547
|
-
let analyzer = SemanticAnalyzer::new("en");
|
|
548
|
-
let tokens = vec![
|
|
549
|
-
ScoredToken {
|
|
550
|
-
token: "car".to_string(),
|
|
551
|
-
position: 0,
|
|
552
|
-
importance_score: 0.3,
|
|
553
|
-
context_boost: 0.0,
|
|
554
|
-
frequency_score: 0.0,
|
|
555
|
-
},
|
|
556
|
-
ScoredToken {
|
|
557
|
-
token: "dog".to_string(),
|
|
558
|
-
position: 1,
|
|
559
|
-
importance_score: 0.3,
|
|
560
|
-
context_boost: 0.0,
|
|
561
|
-
frequency_score: 0.0,
|
|
562
|
-
},
|
|
563
|
-
ScoredToken {
|
|
564
|
-
token: "test".to_string(),
|
|
565
|
-
position: 2,
|
|
566
|
-
importance_score: 0.8,
|
|
567
|
-
context_boost: 0.0,
|
|
568
|
-
frequency_score: 0.0,
|
|
569
|
-
},
|
|
570
|
-
];
|
|
571
|
-
|
|
572
|
-
let result = analyzer.compress_with_hypernyms(tokens, Some(0.5));
|
|
573
|
-
assert!(result.len() <= 2);
|
|
574
|
-
}
|
|
575
|
-
|
|
576
|
-
#[test]
|
|
577
|
-
fn test_initialize_importance_weights() {
|
|
578
|
-
let analyzer = SemanticAnalyzer::new("en");
|
|
579
|
-
|
|
580
|
-
assert!(analyzer.importance_weights.contains_key("result"));
|
|
581
|
-
assert!(analyzer.importance_weights.contains_key("conclusion"));
|
|
582
|
-
assert!(analyzer.importance_weights.contains_key("important"));
|
|
583
|
-
assert!(analyzer.importance_weights.contains_key("process"));
|
|
584
|
-
}
|
|
585
|
-
|
|
586
|
-
#[test]
|
|
587
|
-
fn test_initialize_hypernyms() {
|
|
588
|
-
let analyzer = SemanticAnalyzer::new("en");
|
|
589
|
-
|
|
590
|
-
assert!(analyzer.hypernyms.contains_key("car"));
|
|
591
|
-
assert!(analyzer.hypernyms.contains_key("dog"));
|
|
592
|
-
assert!(analyzer.hypernyms.contains_key("apple"));
|
|
593
|
-
}
|
|
594
|
-
|
|
595
|
-
#[test]
|
|
596
|
-
fn test_initialize_semantic_clusters() {
|
|
597
|
-
let analyzer = SemanticAnalyzer::new("en");
|
|
598
|
-
|
|
599
|
-
assert!(analyzer.semantic_clusters.contains_key("computing"));
|
|
600
|
-
assert!(analyzer.semantic_clusters.contains_key("analysis"));
|
|
601
|
-
assert!(analyzer.semantic_clusters.contains_key("performance"));
|
|
602
|
-
}
|
|
603
|
-
|
|
604
|
-
#[test]
|
|
605
|
-
fn test_contextual_weight_technical_terms() {
|
|
606
|
-
let analyzer = SemanticAnalyzer::new("en");
|
|
607
|
-
|
|
608
|
-
let weight = analyzer.calculate_contextual_weight("implementation", "optimization");
|
|
609
|
-
assert!(weight > 0.0);
|
|
610
|
-
}
|
|
611
|
-
|
|
612
|
-
#[test]
|
|
613
|
-
fn test_hypernym_compression_zero_target() {
|
|
614
|
-
let analyzer = SemanticAnalyzer::new("en");
|
|
615
|
-
let input = "The car drove fast";
|
|
616
|
-
let result = analyzer.apply_hypernym_compression(input, Some(0.0));
|
|
617
|
-
assert!(!result.is_empty());
|
|
618
|
-
}
|
|
619
|
-
}
|
|
1
|
+
use ahash::AHashMap;
|
|
2
|
+
use std::cmp::Ordering;
|
|
3
|
+
|
|
4
|
+
#[derive(Debug, Clone)]
|
|
5
|
+
struct ScoredToken {
|
|
6
|
+
token: String,
|
|
7
|
+
position: usize,
|
|
8
|
+
importance_score: f32,
|
|
9
|
+
#[allow(dead_code)]
|
|
10
|
+
context_boost: f32,
|
|
11
|
+
#[allow(dead_code)]
|
|
12
|
+
frequency_score: f32,
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
impl PartialEq for ScoredToken {
|
|
16
|
+
fn eq(&self, other: &Self) -> bool {
|
|
17
|
+
self.importance_score == other.importance_score
|
|
18
|
+
}
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
impl Eq for ScoredToken {}
|
|
22
|
+
|
|
23
|
+
impl PartialOrd for ScoredToken {
|
|
24
|
+
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
|
25
|
+
Some(self.cmp(other))
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
impl Ord for ScoredToken {
|
|
30
|
+
fn cmp(&self, other: &Self) -> Ordering {
|
|
31
|
+
self.importance_score
|
|
32
|
+
.partial_cmp(&other.importance_score)
|
|
33
|
+
.unwrap_or(Ordering::Equal)
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
pub struct SemanticAnalyzer {
|
|
38
|
+
importance_weights: AHashMap<String, f32>,
|
|
39
|
+
hypernyms: AHashMap<String, String>,
|
|
40
|
+
semantic_clusters: AHashMap<String, Vec<String>>,
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
impl SemanticAnalyzer {
|
|
44
|
+
pub fn new(_language: &str) -> Self {
|
|
45
|
+
let mut analyzer = Self {
|
|
46
|
+
importance_weights: AHashMap::new(),
|
|
47
|
+
hypernyms: AHashMap::new(),
|
|
48
|
+
semantic_clusters: AHashMap::new(),
|
|
49
|
+
};
|
|
50
|
+
|
|
51
|
+
analyzer.initialize_importance_weights();
|
|
52
|
+
analyzer.initialize_hypernyms();
|
|
53
|
+
analyzer.initialize_semantic_clusters();
|
|
54
|
+
|
|
55
|
+
analyzer
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
pub fn apply_semantic_filtering(&self, text: &str, threshold: f32) -> String {
|
|
59
|
+
let tokens = self.tokenize_and_score(text);
|
|
60
|
+
let filtered_tokens = self.filter_by_importance(tokens, threshold);
|
|
61
|
+
self.reconstruct_text(filtered_tokens)
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
pub fn apply_hypernym_compression(&self, text: &str, target_reduction: Option<f32>) -> String {
|
|
65
|
+
let tokens = self.tokenize_and_score(text);
|
|
66
|
+
let compressed_tokens = self.compress_with_hypernyms(tokens, target_reduction);
|
|
67
|
+
self.reconstruct_text(compressed_tokens)
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
fn tokenize_and_score(&self, text: &str) -> Vec<ScoredToken> {
|
|
71
|
+
let words: Vec<&str> = text.split_whitespace().collect();
|
|
72
|
+
let mut scored_tokens = Vec::with_capacity(words.len());
|
|
73
|
+
|
|
74
|
+
let mut word_freq = AHashMap::new();
|
|
75
|
+
for word in &words {
|
|
76
|
+
let clean_word = self.clean_word(word);
|
|
77
|
+
*word_freq.entry(clean_word).or_insert(0) += 1;
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
for (position, word) in words.iter().enumerate() {
|
|
81
|
+
let clean_word = self.clean_word(word);
|
|
82
|
+
let base_importance = self.calculate_base_importance(&clean_word);
|
|
83
|
+
let context_boost = self.calculate_context_boost(&clean_word, position, &words);
|
|
84
|
+
let frequency_score = self.calculate_frequency_score(&clean_word, &word_freq, words.len());
|
|
85
|
+
|
|
86
|
+
let total_score = base_importance + context_boost + frequency_score;
|
|
87
|
+
|
|
88
|
+
scored_tokens.push(ScoredToken {
|
|
89
|
+
token: word.to_string(),
|
|
90
|
+
position,
|
|
91
|
+
importance_score: total_score,
|
|
92
|
+
context_boost,
|
|
93
|
+
frequency_score,
|
|
94
|
+
});
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
scored_tokens
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
fn filter_by_importance(&self, tokens: Vec<ScoredToken>, threshold: f32) -> Vec<ScoredToken> {
|
|
101
|
+
tokens
|
|
102
|
+
.into_iter()
|
|
103
|
+
.filter(|token| token.importance_score >= threshold)
|
|
104
|
+
.collect()
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
fn compress_with_hypernyms(&self, tokens: Vec<ScoredToken>, target_reduction: Option<f32>) -> Vec<ScoredToken> {
|
|
108
|
+
let mut result = tokens;
|
|
109
|
+
|
|
110
|
+
if let Some(target) = target_reduction {
|
|
111
|
+
let target_count = ((1.0 - target) * result.len() as f32) as usize;
|
|
112
|
+
|
|
113
|
+
// Handle NaN values in importance scores by treating them as equal ~keep
|
|
114
|
+
result.sort_by(|a, b| {
|
|
115
|
+
b.importance_score
|
|
116
|
+
.partial_cmp(&a.importance_score)
|
|
117
|
+
.unwrap_or(std::cmp::Ordering::Equal)
|
|
118
|
+
});
|
|
119
|
+
|
|
120
|
+
for token in result.iter_mut().skip(target_count) {
|
|
121
|
+
if let Some(hypernym) = self.get_hypernym(&token.token) {
|
|
122
|
+
token.token = hypernym;
|
|
123
|
+
token.importance_score *= 0.8;
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
result.truncate(target_count.max(1));
|
|
128
|
+
} else {
|
|
129
|
+
for token in &mut result {
|
|
130
|
+
if token.importance_score < 0.5
|
|
131
|
+
&& let Some(hypernym) = self.get_hypernym(&token.token)
|
|
132
|
+
{
|
|
133
|
+
token.token = hypernym;
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
result.sort_by_key(|token| token.position);
|
|
139
|
+
result
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
fn reconstruct_text(&self, tokens: Vec<ScoredToken>) -> String {
|
|
143
|
+
tokens
|
|
144
|
+
.into_iter()
|
|
145
|
+
.map(|token| token.token)
|
|
146
|
+
.collect::<Vec<_>>()
|
|
147
|
+
.join(" ")
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
fn calculate_base_importance(&self, word: &str) -> f32 {
|
|
151
|
+
if let Some(&weight) = self.importance_weights.get(word) {
|
|
152
|
+
return weight;
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
let mut score = 0.3;
|
|
156
|
+
|
|
157
|
+
score += (word.len() as f32 * 0.02).min(0.2);
|
|
158
|
+
|
|
159
|
+
if word.chars().next().map(|c| c.is_uppercase()).unwrap_or(false) {
|
|
160
|
+
score += 0.2;
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
if word.chars().any(|c| c.is_numeric()) {
|
|
164
|
+
score += 0.15;
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
if self.is_technical_term(word) {
|
|
168
|
+
score += 0.25;
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
score.min(1.0)
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
fn calculate_context_boost(&self, word: &str, position: usize, words: &[&str]) -> f32 {
|
|
175
|
+
let mut boost = 0.0;
|
|
176
|
+
|
|
177
|
+
if position == 0 || position == words.len() - 1 {
|
|
178
|
+
boost += 0.1;
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
let window = 2;
|
|
182
|
+
let start = position.saturating_sub(window);
|
|
183
|
+
let end = (position + window + 1).min(words.len());
|
|
184
|
+
|
|
185
|
+
for &context_word in &words[start..end] {
|
|
186
|
+
if context_word != word {
|
|
187
|
+
boost += self.calculate_contextual_weight(word, context_word);
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
boost.min(0.3)
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
fn calculate_frequency_score(&self, word: &str, word_freq: &AHashMap<String, i32>, total_words: usize) -> f32 {
|
|
195
|
+
if let Some(&freq) = word_freq.get(word) {
|
|
196
|
+
let tf = freq as f32 / total_words as f32;
|
|
197
|
+
|
|
198
|
+
(tf.ln() + 1.0) * 0.1
|
|
199
|
+
} else {
|
|
200
|
+
0.0
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
fn calculate_contextual_weight(&self, word: &str, context_word: &str) -> f32 {
|
|
205
|
+
if self.is_technical_term(word) && self.is_technical_term(context_word) {
|
|
206
|
+
0.05
|
|
207
|
+
} else if context_word.chars().next().map(|c| c.is_uppercase()).unwrap_or(false) {
|
|
208
|
+
0.02
|
|
209
|
+
} else {
|
|
210
|
+
0.0
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
fn is_technical_term(&self, word: &str) -> bool {
|
|
215
|
+
word.len() > 6
|
|
216
|
+
&& (word.contains("_")
|
|
217
|
+
|| word.chars().filter(|&c| c.is_uppercase()).count() > 1
|
|
218
|
+
|| word.ends_with("tion")
|
|
219
|
+
|| word.ends_with("ment")
|
|
220
|
+
|| word.ends_with("ing"))
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
fn get_hypernym(&self, word: &str) -> Option<String> {
|
|
224
|
+
let clean_word = self.clean_word(word).to_lowercase();
|
|
225
|
+
self.hypernyms.get(&clean_word).cloned()
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
fn clean_word(&self, word: &str) -> String {
|
|
229
|
+
word.chars()
|
|
230
|
+
.filter(|c| c.is_alphanumeric())
|
|
231
|
+
.collect::<String>()
|
|
232
|
+
.to_lowercase()
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
fn initialize_importance_weights(&mut self) {
|
|
236
|
+
let high_importance = [
|
|
237
|
+
("result", 0.8),
|
|
238
|
+
("conclusion", 0.8),
|
|
239
|
+
("important", 0.7),
|
|
240
|
+
("significant", 0.7),
|
|
241
|
+
("analysis", 0.7),
|
|
242
|
+
("method", 0.6),
|
|
243
|
+
("data", 0.6),
|
|
244
|
+
("system", 0.6),
|
|
245
|
+
("performance", 0.6),
|
|
246
|
+
("improvement", 0.6),
|
|
247
|
+
];
|
|
248
|
+
|
|
249
|
+
for (word, score) in &high_importance {
|
|
250
|
+
self.importance_weights.insert(word.to_string(), *score);
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
let medium_importance = [
|
|
254
|
+
("process", 0.5),
|
|
255
|
+
("algorithm", 0.5),
|
|
256
|
+
("function", 0.5),
|
|
257
|
+
("model", 0.5),
|
|
258
|
+
("implementation", 0.5),
|
|
259
|
+
];
|
|
260
|
+
|
|
261
|
+
for (word, score) in &medium_importance {
|
|
262
|
+
self.importance_weights.insert(word.to_string(), *score);
|
|
263
|
+
}
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
fn initialize_hypernyms(&mut self) {
|
|
267
|
+
let hypernym_pairs = [
|
|
268
|
+
("car", "vehicle"),
|
|
269
|
+
("dog", "animal"),
|
|
270
|
+
("apple", "fruit"),
|
|
271
|
+
("chair", "furniture"),
|
|
272
|
+
("book", "publication"),
|
|
273
|
+
("computer", "device"),
|
|
274
|
+
("algorithm", "method"),
|
|
275
|
+
("implementation", "approach"),
|
|
276
|
+
("optimization", "improvement"),
|
|
277
|
+
("analysis", "study"),
|
|
278
|
+
];
|
|
279
|
+
|
|
280
|
+
for (word, hypernym) in &hypernym_pairs {
|
|
281
|
+
self.hypernyms.insert(word.to_string(), hypernym.to_string());
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
fn initialize_semantic_clusters(&mut self) {
|
|
286
|
+
self.semantic_clusters.insert(
|
|
287
|
+
"computing".to_string(),
|
|
288
|
+
vec![
|
|
289
|
+
"computer".to_string(),
|
|
290
|
+
"algorithm".to_string(),
|
|
291
|
+
"software".to_string(),
|
|
292
|
+
"programming".to_string(),
|
|
293
|
+
"code".to_string(),
|
|
294
|
+
],
|
|
295
|
+
);
|
|
296
|
+
|
|
297
|
+
self.semantic_clusters.insert(
|
|
298
|
+
"analysis".to_string(),
|
|
299
|
+
vec![
|
|
300
|
+
"analysis".to_string(),
|
|
301
|
+
"study".to_string(),
|
|
302
|
+
"research".to_string(),
|
|
303
|
+
"investigation".to_string(),
|
|
304
|
+
"examination".to_string(),
|
|
305
|
+
],
|
|
306
|
+
);
|
|
307
|
+
|
|
308
|
+
self.semantic_clusters.insert(
|
|
309
|
+
"performance".to_string(),
|
|
310
|
+
vec![
|
|
311
|
+
"performance".to_string(),
|
|
312
|
+
"speed".to_string(),
|
|
313
|
+
"efficiency".to_string(),
|
|
314
|
+
"optimization".to_string(),
|
|
315
|
+
"improvement".to_string(),
|
|
316
|
+
],
|
|
317
|
+
);
|
|
318
|
+
}
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
#[cfg(test)]
|
|
322
|
+
mod tests {
|
|
323
|
+
use super::*;
|
|
324
|
+
|
|
325
|
+
#[test]
|
|
326
|
+
fn test_semantic_filtering() {
|
|
327
|
+
let analyzer = SemanticAnalyzer::new("en");
|
|
328
|
+
let input = "The quick brown fox jumps over the lazy dog with great performance";
|
|
329
|
+
let result = analyzer.apply_semantic_filtering(input, 0.4);
|
|
330
|
+
|
|
331
|
+
assert!(result.contains("performance") || result.contains("fox") || result.contains("dog"));
|
|
332
|
+
assert!(result.len() < input.len());
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
#[test]
|
|
336
|
+
fn test_hypernym_compression() {
|
|
337
|
+
let analyzer = SemanticAnalyzer::new("en");
|
|
338
|
+
let input = "The car drove past the dog near the apple tree";
|
|
339
|
+
let result = analyzer.apply_hypernym_compression(input, Some(0.5));
|
|
340
|
+
|
|
341
|
+
let original_words = input.split_whitespace().count();
|
|
342
|
+
let result_words = result.split_whitespace().count();
|
|
343
|
+
assert!(result_words <= (original_words as f32 * 0.5) as usize + 1);
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
#[test]
|
|
347
|
+
fn test_importance_scoring() {
|
|
348
|
+
let analyzer = SemanticAnalyzer::new("en");
|
|
349
|
+
let tokens = analyzer.tokenize_and_score("The important analysis shows significant results");
|
|
350
|
+
|
|
351
|
+
let important_token = tokens.iter().find(|t| t.token == "important").unwrap();
|
|
352
|
+
let analysis_token = tokens.iter().find(|t| t.token == "analysis").unwrap();
|
|
353
|
+
let the_token = tokens.iter().find(|t| t.token == "The").unwrap();
|
|
354
|
+
|
|
355
|
+
assert!(important_token.importance_score > the_token.importance_score);
|
|
356
|
+
assert!(analysis_token.importance_score > the_token.importance_score);
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
#[test]
|
|
360
|
+
fn test_semantic_filtering_empty_text() {
|
|
361
|
+
let analyzer = SemanticAnalyzer::new("en");
|
|
362
|
+
let result = analyzer.apply_semantic_filtering("", 0.5);
|
|
363
|
+
assert_eq!(result, "");
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
#[test]
|
|
367
|
+
fn test_semantic_filtering_high_threshold() {
|
|
368
|
+
let analyzer = SemanticAnalyzer::new("en");
|
|
369
|
+
let input = "The quick brown fox";
|
|
370
|
+
let result = analyzer.apply_semantic_filtering(input, 0.9);
|
|
371
|
+
assert!(result.len() <= input.len());
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
#[test]
|
|
375
|
+
fn test_hypernym_compression_without_target() {
|
|
376
|
+
let analyzer = SemanticAnalyzer::new("en");
|
|
377
|
+
let input = "The car drove past the dog";
|
|
378
|
+
let result = analyzer.apply_hypernym_compression(input, None);
|
|
379
|
+
assert!(!result.is_empty());
|
|
380
|
+
}
|
|
381
|
+
|
|
382
|
+
#[test]
|
|
383
|
+
fn test_technical_term_detection() {
|
|
384
|
+
let analyzer = SemanticAnalyzer::new("en");
|
|
385
|
+
|
|
386
|
+
assert!(analyzer.is_technical_term("implementation"));
|
|
387
|
+
assert!(analyzer.is_technical_term("optimization"));
|
|
388
|
+
assert!(analyzer.is_technical_term("processing"));
|
|
389
|
+
assert!(analyzer.is_technical_term("HTTP_SERVER"));
|
|
390
|
+
assert!(!analyzer.is_technical_term("cat"));
|
|
391
|
+
assert!(!analyzer.is_technical_term("dog"));
|
|
392
|
+
}
|
|
393
|
+
|
|
394
|
+
#[test]
|
|
395
|
+
fn test_clean_word() {
|
|
396
|
+
let analyzer = SemanticAnalyzer::new("en");
|
|
397
|
+
|
|
398
|
+
assert_eq!(analyzer.clean_word("Hello!"), "hello");
|
|
399
|
+
assert_eq!(analyzer.clean_word("test123"), "test123");
|
|
400
|
+
assert_eq!(analyzer.clean_word("word,"), "word");
|
|
401
|
+
assert_eq!(analyzer.clean_word("(test)"), "test");
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
#[test]
|
|
405
|
+
fn test_calculate_base_importance() {
|
|
406
|
+
let analyzer = SemanticAnalyzer::new("en");
|
|
407
|
+
|
|
408
|
+
let result_score = analyzer.calculate_base_importance("result");
|
|
409
|
+
let conclusion_score = analyzer.calculate_base_importance("conclusion");
|
|
410
|
+
|
|
411
|
+
assert!(result_score > 0.5);
|
|
412
|
+
assert!(conclusion_score > 0.5);
|
|
413
|
+
|
|
414
|
+
let process_score = analyzer.calculate_base_importance("process");
|
|
415
|
+
assert!(process_score >= 0.4);
|
|
416
|
+
|
|
417
|
+
let regular_score = analyzer.calculate_base_importance("cat");
|
|
418
|
+
assert!(regular_score < result_score);
|
|
419
|
+
}
|
|
420
|
+
|
|
421
|
+
#[test]
|
|
422
|
+
fn test_calculate_base_importance_uppercase() {
|
|
423
|
+
let analyzer = SemanticAnalyzer::new("en");
|
|
424
|
+
|
|
425
|
+
let uppercase_score = analyzer.calculate_base_importance("Test");
|
|
426
|
+
let lowercase_score = analyzer.calculate_base_importance("test");
|
|
427
|
+
|
|
428
|
+
assert!(uppercase_score > lowercase_score);
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
#[test]
|
|
432
|
+
fn test_calculate_base_importance_with_numbers() {
|
|
433
|
+
let analyzer = SemanticAnalyzer::new("en");
|
|
434
|
+
|
|
435
|
+
let with_number = analyzer.calculate_base_importance("test123");
|
|
436
|
+
let without_number = analyzer.calculate_base_importance("test");
|
|
437
|
+
|
|
438
|
+
assert!(with_number > without_number);
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
#[test]
|
|
442
|
+
fn test_calculate_base_importance_length_bonus() {
|
|
443
|
+
let analyzer = SemanticAnalyzer::new("en");
|
|
444
|
+
|
|
445
|
+
let long_word = analyzer.calculate_base_importance("verylongword");
|
|
446
|
+
let short_word = analyzer.calculate_base_importance("cat");
|
|
447
|
+
|
|
448
|
+
assert!(long_word > short_word);
|
|
449
|
+
}
|
|
450
|
+
|
|
451
|
+
#[test]
|
|
452
|
+
fn test_get_hypernym() {
|
|
453
|
+
let analyzer = SemanticAnalyzer::new("en");
|
|
454
|
+
|
|
455
|
+
assert_eq!(analyzer.get_hypernym("car"), Some("vehicle".to_string()));
|
|
456
|
+
assert_eq!(analyzer.get_hypernym("dog"), Some("animal".to_string()));
|
|
457
|
+
assert_eq!(analyzer.get_hypernym("apple"), Some("fruit".to_string()));
|
|
458
|
+
assert_eq!(analyzer.get_hypernym("unknown"), None);
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
#[test]
|
|
462
|
+
fn test_get_hypernym_case_insensitive() {
|
|
463
|
+
let analyzer = SemanticAnalyzer::new("en");
|
|
464
|
+
|
|
465
|
+
assert_eq!(analyzer.get_hypernym("CAR"), Some("vehicle".to_string()));
|
|
466
|
+
assert_eq!(analyzer.get_hypernym("Dog"), Some("animal".to_string()));
|
|
467
|
+
}
|
|
468
|
+
|
|
469
|
+
#[test]
|
|
470
|
+
fn test_tokenize_and_score_positions() {
|
|
471
|
+
let analyzer = SemanticAnalyzer::new("en");
|
|
472
|
+
let tokens = analyzer.tokenize_and_score("first middle last");
|
|
473
|
+
|
|
474
|
+
assert_eq!(tokens[0].position, 0);
|
|
475
|
+
assert_eq!(tokens[1].position, 1);
|
|
476
|
+
assert_eq!(tokens[2].position, 2);
|
|
477
|
+
}
|
|
478
|
+
|
|
479
|
+
#[test]
|
|
480
|
+
fn test_context_boost_for_edge_positions() {
|
|
481
|
+
let analyzer = SemanticAnalyzer::new("en");
|
|
482
|
+
let tokens = analyzer.tokenize_and_score("first middle last");
|
|
483
|
+
|
|
484
|
+
assert!(tokens[0].importance_score > 0.0);
|
|
485
|
+
assert!(tokens[2].importance_score > 0.0);
|
|
486
|
+
}
|
|
487
|
+
|
|
488
|
+
#[test]
|
|
489
|
+
fn test_frequency_score() {
|
|
490
|
+
let analyzer = SemanticAnalyzer::new("en");
|
|
491
|
+
let tokens = analyzer.tokenize_and_score("test test test other");
|
|
492
|
+
|
|
493
|
+
let test_token = tokens.iter().find(|t| t.token == "test").unwrap();
|
|
494
|
+
let other_token = tokens.iter().find(|t| t.token == "other").unwrap();
|
|
495
|
+
|
|
496
|
+
assert!(test_token.frequency_score > other_token.frequency_score);
|
|
497
|
+
}
|
|
498
|
+
|
|
499
|
+
#[test]
|
|
500
|
+
fn test_scored_token_ordering() {
|
|
501
|
+
let token1 = ScoredToken {
|
|
502
|
+
token: "a".to_string(),
|
|
503
|
+
position: 0,
|
|
504
|
+
importance_score: 0.5,
|
|
505
|
+
context_boost: 0.0,
|
|
506
|
+
frequency_score: 0.0,
|
|
507
|
+
};
|
|
508
|
+
|
|
509
|
+
let token2 = ScoredToken {
|
|
510
|
+
token: "b".to_string(),
|
|
511
|
+
position: 1,
|
|
512
|
+
importance_score: 0.7,
|
|
513
|
+
context_boost: 0.0,
|
|
514
|
+
frequency_score: 0.0,
|
|
515
|
+
};
|
|
516
|
+
|
|
517
|
+
assert!(token2 > token1);
|
|
518
|
+
assert_eq!(token1, token1.clone());
|
|
519
|
+
}
|
|
520
|
+
|
|
521
|
+
#[test]
|
|
522
|
+
fn test_reconstruct_text() {
|
|
523
|
+
let analyzer = SemanticAnalyzer::new("en");
|
|
524
|
+
let tokens = vec![
|
|
525
|
+
ScoredToken {
|
|
526
|
+
token: "Hello".to_string(),
|
|
527
|
+
position: 0,
|
|
528
|
+
importance_score: 0.5,
|
|
529
|
+
context_boost: 0.0,
|
|
530
|
+
frequency_score: 0.0,
|
|
531
|
+
},
|
|
532
|
+
ScoredToken {
|
|
533
|
+
token: "world".to_string(),
|
|
534
|
+
position: 1,
|
|
535
|
+
importance_score: 0.5,
|
|
536
|
+
context_boost: 0.0,
|
|
537
|
+
frequency_score: 0.0,
|
|
538
|
+
},
|
|
539
|
+
];
|
|
540
|
+
|
|
541
|
+
let result = analyzer.reconstruct_text(tokens);
|
|
542
|
+
assert_eq!(result, "Hello world");
|
|
543
|
+
}
|
|
544
|
+
|
|
545
|
+
#[test]
|
|
546
|
+
fn test_compress_with_hypernyms_respects_target() {
|
|
547
|
+
let analyzer = SemanticAnalyzer::new("en");
|
|
548
|
+
let tokens = vec![
|
|
549
|
+
ScoredToken {
|
|
550
|
+
token: "car".to_string(),
|
|
551
|
+
position: 0,
|
|
552
|
+
importance_score: 0.3,
|
|
553
|
+
context_boost: 0.0,
|
|
554
|
+
frequency_score: 0.0,
|
|
555
|
+
},
|
|
556
|
+
ScoredToken {
|
|
557
|
+
token: "dog".to_string(),
|
|
558
|
+
position: 1,
|
|
559
|
+
importance_score: 0.3,
|
|
560
|
+
context_boost: 0.0,
|
|
561
|
+
frequency_score: 0.0,
|
|
562
|
+
},
|
|
563
|
+
ScoredToken {
|
|
564
|
+
token: "test".to_string(),
|
|
565
|
+
position: 2,
|
|
566
|
+
importance_score: 0.8,
|
|
567
|
+
context_boost: 0.0,
|
|
568
|
+
frequency_score: 0.0,
|
|
569
|
+
},
|
|
570
|
+
];
|
|
571
|
+
|
|
572
|
+
let result = analyzer.compress_with_hypernyms(tokens, Some(0.5));
|
|
573
|
+
assert!(result.len() <= 2);
|
|
574
|
+
}
|
|
575
|
+
|
|
576
|
+
#[test]
|
|
577
|
+
fn test_initialize_importance_weights() {
|
|
578
|
+
let analyzer = SemanticAnalyzer::new("en");
|
|
579
|
+
|
|
580
|
+
assert!(analyzer.importance_weights.contains_key("result"));
|
|
581
|
+
assert!(analyzer.importance_weights.contains_key("conclusion"));
|
|
582
|
+
assert!(analyzer.importance_weights.contains_key("important"));
|
|
583
|
+
assert!(analyzer.importance_weights.contains_key("process"));
|
|
584
|
+
}
|
|
585
|
+
|
|
586
|
+
#[test]
|
|
587
|
+
fn test_initialize_hypernyms() {
|
|
588
|
+
let analyzer = SemanticAnalyzer::new("en");
|
|
589
|
+
|
|
590
|
+
assert!(analyzer.hypernyms.contains_key("car"));
|
|
591
|
+
assert!(analyzer.hypernyms.contains_key("dog"));
|
|
592
|
+
assert!(analyzer.hypernyms.contains_key("apple"));
|
|
593
|
+
}
|
|
594
|
+
|
|
595
|
+
#[test]
|
|
596
|
+
fn test_initialize_semantic_clusters() {
|
|
597
|
+
let analyzer = SemanticAnalyzer::new("en");
|
|
598
|
+
|
|
599
|
+
assert!(analyzer.semantic_clusters.contains_key("computing"));
|
|
600
|
+
assert!(analyzer.semantic_clusters.contains_key("analysis"));
|
|
601
|
+
assert!(analyzer.semantic_clusters.contains_key("performance"));
|
|
602
|
+
}
|
|
603
|
+
|
|
604
|
+
#[test]
|
|
605
|
+
fn test_contextual_weight_technical_terms() {
|
|
606
|
+
let analyzer = SemanticAnalyzer::new("en");
|
|
607
|
+
|
|
608
|
+
let weight = analyzer.calculate_contextual_weight("implementation", "optimization");
|
|
609
|
+
assert!(weight > 0.0);
|
|
610
|
+
}
|
|
611
|
+
|
|
612
|
+
#[test]
|
|
613
|
+
fn test_hypernym_compression_zero_target() {
|
|
614
|
+
let analyzer = SemanticAnalyzer::new("en");
|
|
615
|
+
let input = "The car drove fast";
|
|
616
|
+
let result = analyzer.apply_hypernym_compression(input, Some(0.0));
|
|
617
|
+
assert!(!result.is_empty());
|
|
618
|
+
}
|
|
619
|
+
}
|