kreuzberg 4.0.8 → 4.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (312) hide show
  1. checksums.yaml +4 -4
  2. data/Gemfile.lock +2 -2
  3. data/README.md +1 -1
  4. data/ext/kreuzberg_rb/native/Cargo.lock +94 -98
  5. data/ext/kreuzberg_rb/native/Cargo.toml +4 -2
  6. data/ext/kreuzberg_rb/native/src/batch.rs +139 -0
  7. data/ext/kreuzberg_rb/native/src/config/mod.rs +10 -0
  8. data/ext/kreuzberg_rb/native/src/config/types.rs +1058 -0
  9. data/ext/kreuzberg_rb/native/src/error_handling.rs +125 -0
  10. data/ext/kreuzberg_rb/native/src/extraction.rs +79 -0
  11. data/ext/kreuzberg_rb/native/src/gc_guarded_value.rs +35 -0
  12. data/ext/kreuzberg_rb/native/src/helpers.rs +176 -0
  13. data/ext/kreuzberg_rb/native/src/lib.rs +342 -3622
  14. data/ext/kreuzberg_rb/native/src/metadata.rs +34 -0
  15. data/ext/kreuzberg_rb/native/src/plugins/mod.rs +92 -0
  16. data/ext/kreuzberg_rb/native/src/plugins/ocr_backend.rs +159 -0
  17. data/ext/kreuzberg_rb/native/src/plugins/post_processor.rs +126 -0
  18. data/ext/kreuzberg_rb/native/src/plugins/validator.rs +99 -0
  19. data/ext/kreuzberg_rb/native/src/result.rs +326 -0
  20. data/ext/kreuzberg_rb/native/src/validation.rs +4 -0
  21. data/lib/kreuzberg/config.rb +99 -2
  22. data/lib/kreuzberg/result.rb +107 -2
  23. data/lib/kreuzberg/types.rb +104 -0
  24. data/lib/kreuzberg/version.rb +1 -1
  25. data/lib/kreuzberg.rb +0 -4
  26. data/sig/kreuzberg.rbs +105 -1
  27. data/spec/fixtures/config.toml +1 -1
  28. data/spec/fixtures/config.yaml +1 -1
  29. data/vendor/Cargo.toml +3 -3
  30. data/vendor/kreuzberg/Cargo.toml +5 -4
  31. data/vendor/kreuzberg/README.md +1 -1
  32. data/vendor/kreuzberg/src/api/config.rs +69 -0
  33. data/vendor/kreuzberg/src/api/handlers.rs +99 -2
  34. data/vendor/kreuzberg/src/api/mod.rs +14 -7
  35. data/vendor/kreuzberg/src/api/router.rs +214 -0
  36. data/vendor/kreuzberg/src/api/startup.rs +243 -0
  37. data/vendor/kreuzberg/src/api/types.rs +78 -0
  38. data/vendor/kreuzberg/src/cache/cleanup.rs +277 -0
  39. data/vendor/kreuzberg/src/cache/core.rs +428 -0
  40. data/vendor/kreuzberg/src/cache/mod.rs +21 -843
  41. data/vendor/kreuzberg/src/cache/utilities.rs +156 -0
  42. data/vendor/kreuzberg/src/chunking/boundaries.rs +301 -0
  43. data/vendor/kreuzberg/src/chunking/builder.rs +294 -0
  44. data/vendor/kreuzberg/src/chunking/config.rs +52 -0
  45. data/vendor/kreuzberg/src/chunking/core.rs +1017 -0
  46. data/vendor/kreuzberg/src/chunking/mod.rs +14 -2211
  47. data/vendor/kreuzberg/src/chunking/processor.rs +10 -0
  48. data/vendor/kreuzberg/src/chunking/validation.rs +686 -0
  49. data/vendor/kreuzberg/src/core/config/extraction/core.rs +169 -0
  50. data/vendor/kreuzberg/src/core/config/extraction/env.rs +179 -0
  51. data/vendor/kreuzberg/src/core/config/extraction/loaders.rs +204 -0
  52. data/vendor/kreuzberg/src/core/config/extraction/mod.rs +42 -0
  53. data/vendor/kreuzberg/src/core/config/extraction/types.rs +93 -0
  54. data/vendor/kreuzberg/src/core/config/formats.rs +135 -0
  55. data/vendor/kreuzberg/src/core/config/mod.rs +20 -0
  56. data/vendor/kreuzberg/src/core/config/ocr.rs +73 -0
  57. data/vendor/kreuzberg/src/core/config/page.rs +57 -0
  58. data/vendor/kreuzberg/src/core/config/pdf.rs +111 -0
  59. data/vendor/kreuzberg/src/core/config/processing.rs +312 -0
  60. data/vendor/kreuzberg/src/core/config_validation/dependencies.rs +187 -0
  61. data/vendor/kreuzberg/src/core/config_validation/mod.rs +386 -0
  62. data/vendor/kreuzberg/src/core/config_validation/sections.rs +401 -0
  63. data/vendor/kreuzberg/src/core/extractor/batch.rs +246 -0
  64. data/vendor/kreuzberg/src/core/extractor/bytes.rs +116 -0
  65. data/vendor/kreuzberg/src/core/extractor/file.rs +240 -0
  66. data/vendor/kreuzberg/src/core/extractor/helpers.rs +71 -0
  67. data/vendor/kreuzberg/src/core/extractor/legacy.rs +62 -0
  68. data/vendor/kreuzberg/src/core/extractor/mod.rs +490 -0
  69. data/vendor/kreuzberg/src/core/extractor/sync.rs +208 -0
  70. data/vendor/kreuzberg/src/core/mime.rs +15 -0
  71. data/vendor/kreuzberg/src/core/mod.rs +4 -1
  72. data/vendor/kreuzberg/src/core/pipeline/cache.rs +60 -0
  73. data/vendor/kreuzberg/src/core/pipeline/execution.rs +89 -0
  74. data/vendor/kreuzberg/src/core/pipeline/features.rs +108 -0
  75. data/vendor/kreuzberg/src/core/pipeline/format.rs +392 -0
  76. data/vendor/kreuzberg/src/core/pipeline/initialization.rs +67 -0
  77. data/vendor/kreuzberg/src/core/pipeline/mod.rs +135 -0
  78. data/vendor/kreuzberg/src/core/pipeline/tests.rs +975 -0
  79. data/vendor/kreuzberg/src/core/server_config/env.rs +90 -0
  80. data/vendor/kreuzberg/src/core/server_config/loader.rs +202 -0
  81. data/vendor/kreuzberg/src/core/server_config/mod.rs +380 -0
  82. data/vendor/kreuzberg/src/core/server_config/tests/basic_tests.rs +124 -0
  83. data/vendor/kreuzberg/src/core/server_config/tests/env_tests.rs +216 -0
  84. data/vendor/kreuzberg/src/core/server_config/tests/file_loading_tests.rs +341 -0
  85. data/vendor/kreuzberg/src/core/server_config/tests/mod.rs +5 -0
  86. data/vendor/kreuzberg/src/core/server_config/validation.rs +17 -0
  87. data/vendor/kreuzberg/src/embeddings.rs +136 -13
  88. data/vendor/kreuzberg/src/extraction/{archive.rs → archive/mod.rs} +45 -239
  89. data/vendor/kreuzberg/src/extraction/archive/sevenz.rs +98 -0
  90. data/vendor/kreuzberg/src/extraction/archive/tar.rs +118 -0
  91. data/vendor/kreuzberg/src/extraction/archive/zip.rs +101 -0
  92. data/vendor/kreuzberg/src/extraction/html/converter.rs +592 -0
  93. data/vendor/kreuzberg/src/extraction/html/image_handling.rs +95 -0
  94. data/vendor/kreuzberg/src/extraction/html/mod.rs +53 -0
  95. data/vendor/kreuzberg/src/extraction/html/processor.rs +659 -0
  96. data/vendor/kreuzberg/src/extraction/html/stack_management.rs +103 -0
  97. data/vendor/kreuzberg/src/extraction/html/types.rs +28 -0
  98. data/vendor/kreuzberg/src/extraction/mod.rs +6 -2
  99. data/vendor/kreuzberg/src/extraction/pptx/container.rs +159 -0
  100. data/vendor/kreuzberg/src/extraction/pptx/content_builder.rs +168 -0
  101. data/vendor/kreuzberg/src/extraction/pptx/elements.rs +132 -0
  102. data/vendor/kreuzberg/src/extraction/pptx/image_handling.rs +57 -0
  103. data/vendor/kreuzberg/src/extraction/pptx/metadata.rs +160 -0
  104. data/vendor/kreuzberg/src/extraction/pptx/mod.rs +558 -0
  105. data/vendor/kreuzberg/src/extraction/pptx/parser.rs +388 -0
  106. data/vendor/kreuzberg/src/extraction/transform/content.rs +205 -0
  107. data/vendor/kreuzberg/src/extraction/transform/elements.rs +211 -0
  108. data/vendor/kreuzberg/src/extraction/transform/mod.rs +480 -0
  109. data/vendor/kreuzberg/src/extraction/transform/types.rs +27 -0
  110. data/vendor/kreuzberg/src/extractors/archive.rs +2 -0
  111. data/vendor/kreuzberg/src/extractors/bibtex.rs +2 -0
  112. data/vendor/kreuzberg/src/extractors/djot_format/attributes.rs +134 -0
  113. data/vendor/kreuzberg/src/extractors/djot_format/conversion.rs +223 -0
  114. data/vendor/kreuzberg/src/extractors/djot_format/extractor.rs +172 -0
  115. data/vendor/kreuzberg/src/extractors/djot_format/mod.rs +24 -0
  116. data/vendor/kreuzberg/src/extractors/djot_format/parsing/block_handlers.rs +271 -0
  117. data/vendor/kreuzberg/src/extractors/djot_format/parsing/content_extraction.rs +257 -0
  118. data/vendor/kreuzberg/src/extractors/djot_format/parsing/event_handlers.rs +101 -0
  119. data/vendor/kreuzberg/src/extractors/djot_format/parsing/inline_handlers.rs +201 -0
  120. data/vendor/kreuzberg/src/extractors/djot_format/parsing/mod.rs +16 -0
  121. data/vendor/kreuzberg/src/extractors/djot_format/parsing/state.rs +78 -0
  122. data/vendor/kreuzberg/src/extractors/djot_format/parsing/table_extraction.rs +68 -0
  123. data/vendor/kreuzberg/src/extractors/djot_format/parsing/text_extraction.rs +61 -0
  124. data/vendor/kreuzberg/src/extractors/djot_format/rendering.rs +452 -0
  125. data/vendor/kreuzberg/src/extractors/docbook.rs +2 -0
  126. data/vendor/kreuzberg/src/extractors/docx.rs +12 -1
  127. data/vendor/kreuzberg/src/extractors/email.rs +2 -0
  128. data/vendor/kreuzberg/src/extractors/epub/content.rs +333 -0
  129. data/vendor/kreuzberg/src/extractors/epub/metadata.rs +137 -0
  130. data/vendor/kreuzberg/src/extractors/epub/mod.rs +186 -0
  131. data/vendor/kreuzberg/src/extractors/epub/parsing.rs +86 -0
  132. data/vendor/kreuzberg/src/extractors/excel.rs +4 -0
  133. data/vendor/kreuzberg/src/extractors/fictionbook.rs +2 -0
  134. data/vendor/kreuzberg/src/extractors/frontmatter_utils.rs +466 -0
  135. data/vendor/kreuzberg/src/extractors/html.rs +80 -8
  136. data/vendor/kreuzberg/src/extractors/image.rs +8 -1
  137. data/vendor/kreuzberg/src/extractors/jats/elements.rs +350 -0
  138. data/vendor/kreuzberg/src/extractors/jats/metadata.rs +21 -0
  139. data/vendor/kreuzberg/src/extractors/{jats.rs → jats/mod.rs} +10 -412
  140. data/vendor/kreuzberg/src/extractors/jats/parser.rs +52 -0
  141. data/vendor/kreuzberg/src/extractors/jupyter.rs +2 -0
  142. data/vendor/kreuzberg/src/extractors/latex/commands.rs +93 -0
  143. data/vendor/kreuzberg/src/extractors/latex/environments.rs +157 -0
  144. data/vendor/kreuzberg/src/extractors/latex/metadata.rs +27 -0
  145. data/vendor/kreuzberg/src/extractors/latex/mod.rs +146 -0
  146. data/vendor/kreuzberg/src/extractors/latex/parser.rs +231 -0
  147. data/vendor/kreuzberg/src/extractors/latex/utilities.rs +126 -0
  148. data/vendor/kreuzberg/src/extractors/markdown.rs +39 -162
  149. data/vendor/kreuzberg/src/extractors/mod.rs +9 -1
  150. data/vendor/kreuzberg/src/extractors/odt.rs +2 -0
  151. data/vendor/kreuzberg/src/extractors/opml/core.rs +165 -0
  152. data/vendor/kreuzberg/src/extractors/opml/mod.rs +31 -0
  153. data/vendor/kreuzberg/src/extractors/opml/parser.rs +479 -0
  154. data/vendor/kreuzberg/src/extractors/orgmode.rs +2 -0
  155. data/vendor/kreuzberg/src/extractors/pdf/extraction.rs +106 -0
  156. data/vendor/kreuzberg/src/extractors/{pdf.rs → pdf/mod.rs} +25 -324
  157. data/vendor/kreuzberg/src/extractors/pdf/ocr.rs +214 -0
  158. data/vendor/kreuzberg/src/extractors/pdf/pages.rs +51 -0
  159. data/vendor/kreuzberg/src/extractors/pptx.rs +9 -2
  160. data/vendor/kreuzberg/src/extractors/rst.rs +2 -0
  161. data/vendor/kreuzberg/src/extractors/rtf/encoding.rs +116 -0
  162. data/vendor/kreuzberg/src/extractors/rtf/formatting.rs +24 -0
  163. data/vendor/kreuzberg/src/extractors/rtf/images.rs +72 -0
  164. data/vendor/kreuzberg/src/extractors/rtf/metadata.rs +216 -0
  165. data/vendor/kreuzberg/src/extractors/rtf/mod.rs +142 -0
  166. data/vendor/kreuzberg/src/extractors/rtf/parser.rs +259 -0
  167. data/vendor/kreuzberg/src/extractors/rtf/tables.rs +83 -0
  168. data/vendor/kreuzberg/src/extractors/structured.rs +2 -0
  169. data/vendor/kreuzberg/src/extractors/text.rs +4 -0
  170. data/vendor/kreuzberg/src/extractors/typst.rs +2 -0
  171. data/vendor/kreuzberg/src/extractors/xml.rs +2 -0
  172. data/vendor/kreuzberg/src/keywords/processor.rs +14 -0
  173. data/vendor/kreuzberg/src/language_detection/processor.rs +10 -0
  174. data/vendor/kreuzberg/src/lib.rs +2 -2
  175. data/vendor/kreuzberg/src/mcp/errors.rs +312 -0
  176. data/vendor/kreuzberg/src/mcp/format.rs +211 -0
  177. data/vendor/kreuzberg/src/mcp/mod.rs +9 -3
  178. data/vendor/kreuzberg/src/mcp/params.rs +196 -0
  179. data/vendor/kreuzberg/src/mcp/server.rs +39 -1438
  180. data/vendor/kreuzberg/src/mcp/tools/cache.rs +179 -0
  181. data/vendor/kreuzberg/src/mcp/tools/extraction.rs +403 -0
  182. data/vendor/kreuzberg/src/mcp/tools/mime.rs +150 -0
  183. data/vendor/kreuzberg/src/mcp/tools/mod.rs +11 -0
  184. data/vendor/kreuzberg/src/ocr/backends/easyocr.rs +96 -0
  185. data/vendor/kreuzberg/src/ocr/backends/mod.rs +7 -0
  186. data/vendor/kreuzberg/src/ocr/backends/paddleocr.rs +27 -0
  187. data/vendor/kreuzberg/src/ocr/backends/tesseract.rs +134 -0
  188. data/vendor/kreuzberg/src/ocr/hocr.rs +60 -16
  189. data/vendor/kreuzberg/src/ocr/language_registry.rs +11 -235
  190. data/vendor/kreuzberg/src/ocr/mod.rs +1 -0
  191. data/vendor/kreuzberg/src/ocr/processor/config.rs +203 -0
  192. data/vendor/kreuzberg/src/ocr/processor/execution.rs +494 -0
  193. data/vendor/kreuzberg/src/ocr/processor/mod.rs +265 -0
  194. data/vendor/kreuzberg/src/ocr/processor/validation.rs +145 -0
  195. data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +41 -24
  196. data/vendor/kreuzberg/src/pdf/bindings.rs +21 -8
  197. data/vendor/kreuzberg/src/pdf/hierarchy/bounding_box.rs +289 -0
  198. data/vendor/kreuzberg/src/pdf/hierarchy/clustering.rs +199 -0
  199. data/vendor/kreuzberg/src/pdf/{hierarchy.rs → hierarchy/extraction.rs} +6 -346
  200. data/vendor/kreuzberg/src/pdf/hierarchy/mod.rs +18 -0
  201. data/vendor/kreuzberg/src/plugins/extractor/mod.rs +319 -0
  202. data/vendor/kreuzberg/src/plugins/extractor/registry.rs +434 -0
  203. data/vendor/kreuzberg/src/plugins/extractor/trait.rs +391 -0
  204. data/vendor/kreuzberg/src/plugins/mod.rs +13 -0
  205. data/vendor/kreuzberg/src/plugins/ocr.rs +11 -0
  206. data/vendor/kreuzberg/src/plugins/processor/mod.rs +365 -0
  207. data/vendor/kreuzberg/src/plugins/processor/registry.rs +37 -0
  208. data/vendor/kreuzberg/src/plugins/processor/trait.rs +284 -0
  209. data/vendor/kreuzberg/src/plugins/registry/extractor.rs +416 -0
  210. data/vendor/kreuzberg/src/plugins/registry/mod.rs +116 -0
  211. data/vendor/kreuzberg/src/plugins/registry/ocr.rs +293 -0
  212. data/vendor/kreuzberg/src/plugins/registry/processor.rs +304 -0
  213. data/vendor/kreuzberg/src/plugins/registry/validator.rs +238 -0
  214. data/vendor/kreuzberg/src/plugins/validator/mod.rs +424 -0
  215. data/vendor/kreuzberg/src/plugins/validator/registry.rs +355 -0
  216. data/vendor/kreuzberg/src/plugins/validator/trait.rs +276 -0
  217. data/vendor/kreuzberg/src/stopwords/languages/asian.rs +40 -0
  218. data/vendor/kreuzberg/src/stopwords/languages/germanic.rs +36 -0
  219. data/vendor/kreuzberg/src/stopwords/languages/mod.rs +10 -0
  220. data/vendor/kreuzberg/src/stopwords/languages/other.rs +44 -0
  221. data/vendor/kreuzberg/src/stopwords/languages/romance.rs +36 -0
  222. data/vendor/kreuzberg/src/stopwords/languages/slavic.rs +36 -0
  223. data/vendor/kreuzberg/src/stopwords/mod.rs +7 -33
  224. data/vendor/kreuzberg/src/text/quality.rs +1 -1
  225. data/vendor/kreuzberg/src/text/quality_processor.rs +10 -0
  226. data/vendor/kreuzberg/src/text/token_reduction/core/analysis.rs +238 -0
  227. data/vendor/kreuzberg/src/text/token_reduction/core/mod.rs +8 -0
  228. data/vendor/kreuzberg/src/text/token_reduction/core/punctuation.rs +54 -0
  229. data/vendor/kreuzberg/src/text/token_reduction/core/reducer.rs +384 -0
  230. data/vendor/kreuzberg/src/text/token_reduction/core/sentence_selection.rs +68 -0
  231. data/vendor/kreuzberg/src/text/token_reduction/core/word_filtering.rs +156 -0
  232. data/vendor/kreuzberg/src/text/token_reduction/filters/general.rs +377 -0
  233. data/vendor/kreuzberg/src/text/token_reduction/filters/html.rs +51 -0
  234. data/vendor/kreuzberg/src/text/token_reduction/filters/markdown.rs +285 -0
  235. data/vendor/kreuzberg/src/text/token_reduction/filters.rs +131 -246
  236. data/vendor/kreuzberg/src/types/djot.rs +209 -0
  237. data/vendor/kreuzberg/src/types/extraction.rs +301 -0
  238. data/vendor/kreuzberg/src/types/formats.rs +443 -0
  239. data/vendor/kreuzberg/src/types/metadata.rs +560 -0
  240. data/vendor/kreuzberg/src/types/mod.rs +281 -0
  241. data/vendor/kreuzberg/src/types/page.rs +182 -0
  242. data/vendor/kreuzberg/src/types/serde_helpers.rs +132 -0
  243. data/vendor/kreuzberg/src/types/tables.rs +39 -0
  244. data/vendor/kreuzberg/src/utils/quality/heuristics.rs +58 -0
  245. data/vendor/kreuzberg/src/utils/{quality.rs → quality/mod.rs} +168 -489
  246. data/vendor/kreuzberg/src/utils/quality/patterns.rs +117 -0
  247. data/vendor/kreuzberg/src/utils/quality/scoring.rs +178 -0
  248. data/vendor/kreuzberg/src/utils/string_pool/buffer_pool.rs +325 -0
  249. data/vendor/kreuzberg/src/utils/string_pool/interned.rs +102 -0
  250. data/vendor/kreuzberg/src/utils/string_pool/language_pool.rs +119 -0
  251. data/vendor/kreuzberg/src/utils/string_pool/mime_pool.rs +235 -0
  252. data/vendor/kreuzberg/src/utils/string_pool/mod.rs +41 -0
  253. data/vendor/kreuzberg/tests/api_chunk.rs +313 -0
  254. data/vendor/kreuzberg/tests/api_embed.rs +6 -9
  255. data/vendor/kreuzberg/tests/batch_orchestration.rs +1 -0
  256. data/vendor/kreuzberg/tests/concurrency_stress.rs +7 -0
  257. data/vendor/kreuzberg/tests/core_integration.rs +1 -0
  258. data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +130 -0
  259. data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +5 -14
  260. data/vendor/kreuzberg/tests/format_integration.rs +2 -0
  261. data/vendor/kreuzberg/tests/helpers/mod.rs +1 -0
  262. data/vendor/kreuzberg/tests/html_table_test.rs +11 -11
  263. data/vendor/kreuzberg/tests/ocr_configuration.rs +16 -0
  264. data/vendor/kreuzberg/tests/ocr_errors.rs +18 -0
  265. data/vendor/kreuzberg/tests/ocr_quality.rs +9 -0
  266. data/vendor/kreuzberg/tests/ocr_stress.rs +1 -0
  267. data/vendor/kreuzberg/tests/pipeline_integration.rs +50 -0
  268. data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +13 -0
  269. data/vendor/kreuzberg/tests/plugin_system.rs +12 -0
  270. data/vendor/kreuzberg/tests/pptx_regression_tests.rs +504 -0
  271. data/vendor/kreuzberg/tests/registry_integration_tests.rs +2 -0
  272. data/vendor/kreuzberg-ffi/Cargo.toml +2 -1
  273. data/vendor/kreuzberg-ffi/benches/result_view_benchmark.rs +2 -0
  274. data/vendor/kreuzberg-ffi/kreuzberg.h +347 -178
  275. data/vendor/kreuzberg-ffi/src/config/html.rs +318 -0
  276. data/vendor/kreuzberg-ffi/src/config/loader.rs +154 -0
  277. data/vendor/kreuzberg-ffi/src/config/merge.rs +104 -0
  278. data/vendor/kreuzberg-ffi/src/config/mod.rs +385 -0
  279. data/vendor/kreuzberg-ffi/src/config/parse.rs +91 -0
  280. data/vendor/kreuzberg-ffi/src/config/serialize.rs +118 -0
  281. data/vendor/kreuzberg-ffi/src/config_builder.rs +598 -0
  282. data/vendor/kreuzberg-ffi/src/error.rs +46 -14
  283. data/vendor/kreuzberg-ffi/src/helpers.rs +10 -0
  284. data/vendor/kreuzberg-ffi/src/html_options.rs +421 -0
  285. data/vendor/kreuzberg-ffi/src/lib.rs +16 -0
  286. data/vendor/kreuzberg-ffi/src/panic_shield.rs +11 -0
  287. data/vendor/kreuzberg-ffi/src/plugins/ocr_backend.rs +2 -0
  288. data/vendor/kreuzberg-ffi/src/result.rs +148 -122
  289. data/vendor/kreuzberg-ffi/src/result_view.rs +4 -0
  290. data/vendor/kreuzberg-tesseract/Cargo.toml +2 -2
  291. metadata +201 -28
  292. data/vendor/kreuzberg/src/api/server.rs +0 -518
  293. data/vendor/kreuzberg/src/core/config.rs +0 -1914
  294. data/vendor/kreuzberg/src/core/config_validation.rs +0 -949
  295. data/vendor/kreuzberg/src/core/extractor.rs +0 -1200
  296. data/vendor/kreuzberg/src/core/pipeline.rs +0 -1223
  297. data/vendor/kreuzberg/src/core/server_config.rs +0 -1220
  298. data/vendor/kreuzberg/src/extraction/html.rs +0 -1830
  299. data/vendor/kreuzberg/src/extraction/pptx.rs +0 -3102
  300. data/vendor/kreuzberg/src/extractors/epub.rs +0 -696
  301. data/vendor/kreuzberg/src/extractors/latex.rs +0 -653
  302. data/vendor/kreuzberg/src/extractors/opml.rs +0 -635
  303. data/vendor/kreuzberg/src/extractors/rtf.rs +0 -809
  304. data/vendor/kreuzberg/src/ocr/processor.rs +0 -858
  305. data/vendor/kreuzberg/src/plugins/extractor.rs +0 -1042
  306. data/vendor/kreuzberg/src/plugins/processor.rs +0 -650
  307. data/vendor/kreuzberg/src/plugins/registry.rs +0 -1339
  308. data/vendor/kreuzberg/src/plugins/validator.rs +0 -967
  309. data/vendor/kreuzberg/src/text/token_reduction/core.rs +0 -832
  310. data/vendor/kreuzberg/src/types.rs +0 -1713
  311. data/vendor/kreuzberg/src/utils/string_pool.rs +0 -762
  312. data/vendor/kreuzberg-ffi/src/config.rs +0 -1341
@@ -1,832 +0,0 @@
1
- use crate::error::Result;
2
- use crate::text::token_reduction::{
3
- cjk_utils::CjkTokenizer,
4
- config::{ReductionLevel, TokenReductionConfig},
5
- filters::FilterPipeline,
6
- semantic::SemanticAnalyzer,
7
- simd_text::{SimdTextProcessor, chunk_text_for_parallel},
8
- };
9
- use ahash::AHashMap;
10
- use once_cell::sync::Lazy;
11
- use rayon::prelude::*;
12
- use regex::Regex;
13
- use std::sync::Arc;
14
- use unicode_normalization::UnicodeNormalization;
15
-
16
- static REPEATED_EXCLAMATION: Lazy<Regex> =
17
- Lazy::new(|| Regex::new(r"[!]{2,}").expect("Repeated exclamation regex pattern is valid and should compile"));
18
- static REPEATED_QUESTION: Lazy<Regex> =
19
- Lazy::new(|| Regex::new(r"[?]{2,}").expect("Repeated question regex pattern is valid and should compile"));
20
- static REPEATED_COMMA: Lazy<Regex> =
21
- Lazy::new(|| Regex::new(r"[,]{2,}").expect("Repeated comma regex pattern is valid and should compile"));
22
-
23
- /// Bonus added for sentences at the beginning or end of the document
24
- const SENTENCE_EDGE_POSITION_BONUS: f32 = 0.3;
25
-
26
- /// Bonus added for sentences with ideal word count (neither too short nor too long)
27
- const IDEAL_WORD_COUNT_BONUS: f32 = 0.2;
28
-
29
- /// Minimum word count for ideal sentence length
30
- const MIN_IDEAL_WORD_COUNT: usize = 3;
31
-
32
- /// Maximum word count for ideal sentence length
33
- const MAX_IDEAL_WORD_COUNT: usize = 25;
34
-
35
- /// Weight multiplier for numeric content density in sentences
36
- const NUMERIC_CONTENT_WEIGHT: f32 = 0.3;
37
-
38
- /// Weight multiplier for capitalized/acronym word density in sentences
39
- const CAPS_ACRONYM_WEIGHT: f32 = 0.25;
40
-
41
- /// Weight multiplier for long word density in sentences
42
- const LONG_WORD_WEIGHT: f32 = 0.2;
43
-
44
- /// Minimum character length for a word to be considered "long"
45
- const LONG_WORD_THRESHOLD: usize = 8;
46
-
47
- /// Weight multiplier for punctuation density in sentences
48
- const PUNCTUATION_DENSITY_WEIGHT: f32 = 0.15;
49
-
50
- /// Weight multiplier for word diversity ratio (unique words / total words)
51
- const DIVERSITY_RATIO_WEIGHT: f32 = 0.15;
52
-
53
- /// Weight multiplier for character entropy (measure of text randomness/information)
54
- const CHAR_ENTROPY_WEIGHT: f32 = 0.1;
55
-
56
- pub struct TokenReducer {
57
- config: Arc<TokenReductionConfig>,
58
- text_processor: SimdTextProcessor,
59
- filter_pipeline: FilterPipeline,
60
- semantic_analyzer: Option<SemanticAnalyzer>,
61
- cjk_tokenizer: CjkTokenizer,
62
- language: String,
63
- }
64
-
65
- impl TokenReducer {
66
- pub fn new(config: &TokenReductionConfig, language_hint: Option<&str>) -> Result<Self> {
67
- let config = Arc::new(config.clone());
68
- let language = language_hint
69
- .or(config.language_hint.as_deref())
70
- .unwrap_or("en")
71
- .to_string();
72
-
73
- let text_processor = SimdTextProcessor::new();
74
- let filter_pipeline = FilterPipeline::new(&config, &language)?;
75
-
76
- let semantic_analyzer = if matches!(config.level, ReductionLevel::Aggressive | ReductionLevel::Maximum) {
77
- Some(SemanticAnalyzer::new(&language))
78
- } else {
79
- None
80
- };
81
-
82
- Ok(Self {
83
- config,
84
- text_processor,
85
- filter_pipeline,
86
- semantic_analyzer,
87
- cjk_tokenizer: CjkTokenizer::new(),
88
- language,
89
- })
90
- }
91
-
92
- /// Get the language code being used for stopwords and semantic analysis.
93
- pub fn language(&self) -> &str {
94
- &self.language
95
- }
96
-
97
- pub fn reduce(&self, text: &str) -> String {
98
- if text.is_empty() || matches!(self.config.level, ReductionLevel::Off) {
99
- return text.to_string();
100
- }
101
-
102
- let nfc_string;
103
- let working_text = if text.is_ascii() {
104
- text
105
- } else {
106
- nfc_string = text.nfc().collect::<String>();
107
- &nfc_string
108
- };
109
-
110
- match self.config.level {
111
- ReductionLevel::Off => working_text.to_string(),
112
- ReductionLevel::Light => self.apply_light_reduction_optimized(working_text),
113
- ReductionLevel::Moderate => self.apply_moderate_reduction_optimized(working_text),
114
- ReductionLevel::Aggressive => self.apply_aggressive_reduction_optimized(working_text),
115
- ReductionLevel::Maximum => self.apply_maximum_reduction_optimized(working_text),
116
- }
117
- }
118
-
119
- pub fn batch_reduce(&self, texts: &[&str]) -> Vec<String> {
120
- if !self.config.enable_parallel || texts.len() < 2 {
121
- return texts.iter().map(|text| self.reduce(text)).collect();
122
- }
123
-
124
- texts.par_iter().map(|text| self.reduce(text)).collect()
125
- }
126
-
127
- fn apply_light_reduction_optimized(&self, text: &str) -> String {
128
- let mut result = if self.config.use_simd {
129
- self.text_processor.clean_punctuation(text)
130
- } else {
131
- self.clean_punctuation_optimized(text)
132
- };
133
-
134
- result = self.filter_pipeline.apply_light_filters(&result);
135
- result.trim().to_string()
136
- }
137
-
138
- fn apply_moderate_reduction_optimized(&self, text: &str) -> String {
139
- let mut result = self.apply_light_reduction_optimized(text);
140
-
141
- result = if self.config.enable_parallel && text.len() > 1000 {
142
- self.apply_parallel_moderate_reduction(&result)
143
- } else {
144
- self.filter_pipeline.apply_moderate_filters(&result)
145
- };
146
-
147
- result
148
- }
149
-
150
- fn apply_aggressive_reduction_optimized(&self, text: &str) -> String {
151
- let mut result = self.apply_moderate_reduction_optimized(text);
152
-
153
- result = self.remove_additional_common_words(&result);
154
- result = self.apply_sentence_selection(&result);
155
-
156
- if let Some(ref analyzer) = self.semantic_analyzer {
157
- result = analyzer.apply_semantic_filtering(&result, self.config.semantic_threshold);
158
- }
159
-
160
- result
161
- }
162
-
163
- fn apply_maximum_reduction_optimized(&self, text: &str) -> String {
164
- let mut result = self.apply_aggressive_reduction_optimized(text);
165
-
166
- if let Some(ref analyzer) = self.semantic_analyzer
167
- && self.config.enable_semantic_clustering
168
- {
169
- result = analyzer.apply_hypernym_compression(&result, self.config.target_reduction);
170
- }
171
-
172
- result
173
- }
174
-
175
- fn apply_parallel_moderate_reduction(&self, text: &str) -> String {
176
- let num_threads = rayon::current_num_threads();
177
- let chunks = chunk_text_for_parallel(text, num_threads);
178
-
179
- let processed_chunks: Vec<String> = chunks
180
- .par_iter()
181
- .map(|chunk| self.filter_pipeline.apply_moderate_filters(chunk))
182
- .collect();
183
-
184
- processed_chunks.join(" ")
185
- }
186
-
187
- fn clean_punctuation_optimized(&self, text: &str) -> String {
188
- use std::borrow::Cow;
189
-
190
- let mut result = Cow::Borrowed(text);
191
-
192
- if REPEATED_EXCLAMATION.is_match(&result) {
193
- result = Cow::Owned(REPEATED_EXCLAMATION.replace_all(&result, "!").into_owned());
194
- }
195
- if REPEATED_QUESTION.is_match(&result) {
196
- result = Cow::Owned(REPEATED_QUESTION.replace_all(&result, "?").into_owned());
197
- }
198
- if REPEATED_COMMA.is_match(&result) {
199
- result = Cow::Owned(REPEATED_COMMA.replace_all(&result, ",").into_owned());
200
- }
201
-
202
- result.into_owned()
203
- }
204
-
205
- fn remove_additional_common_words(&self, text: &str) -> String {
206
- let words = self.universal_tokenize(text);
207
-
208
- if words.len() < 4 {
209
- return text.to_string();
210
- }
211
-
212
- let estimated_unique = (words.len() as f32 * 0.7).ceil() as usize;
213
- let mut word_freq = AHashMap::with_capacity(estimated_unique);
214
-
215
- let mut word_lengths = Vec::with_capacity(words.len());
216
-
217
- for word in &words {
218
- let clean_word = if word.chars().all(|c| c.is_alphabetic()) {
219
- word.to_lowercase()
220
- } else {
221
- word.chars()
222
- .filter(|c| c.is_alphabetic())
223
- .collect::<String>()
224
- .to_lowercase()
225
- };
226
-
227
- if !clean_word.is_empty() {
228
- *word_freq.entry(clean_word.clone()).or_insert(0) += 1;
229
- word_lengths.push(clean_word.chars().count());
230
- }
231
- }
232
-
233
- let avg_length = if !word_lengths.is_empty() {
234
- word_lengths.iter().sum::<usize>() as f32 / word_lengths.len() as f32
235
- } else {
236
- 5.0
237
- };
238
-
239
- let original_count = words.len();
240
- let has_cjk_content = text.chars().any(|c| c as u32 >= 0x4E00 && (c as u32) <= 0x9FFF);
241
-
242
- let mut filtered_words = Vec::with_capacity(words.len());
243
- for word in &words {
244
- let clean_word = if word.chars().all(|c| c.is_alphabetic()) {
245
- word.to_lowercase()
246
- } else {
247
- word.chars()
248
- .filter(|c| c.is_alphabetic())
249
- .collect::<String>()
250
- .to_lowercase()
251
- };
252
-
253
- if clean_word.is_empty() {
254
- filtered_words.push(word.clone());
255
- } else {
256
- let freq = word_freq.get(&clean_word).unwrap_or(&0);
257
- let word_len = clean_word.chars().count() as f32;
258
-
259
- if self.has_important_characteristics(word)
260
- || (*freq <= 2 && word_len >= avg_length * 0.8)
261
- || (word_len >= avg_length * 1.5)
262
- {
263
- filtered_words.push(word.clone());
264
- }
265
- }
266
- }
267
-
268
- let fallback_threshold = if has_cjk_content {
269
- original_count / 5
270
- } else {
271
- original_count / 3
272
- };
273
-
274
- if filtered_words.len() < fallback_threshold {
275
- let mut fallback_words = Vec::with_capacity(words.len());
276
- for word in &words {
277
- let clean_word = if word.chars().all(|c| c.is_alphabetic()) {
278
- word.to_lowercase()
279
- } else {
280
- word.chars().filter(|c| c.is_alphabetic()).collect::<String>()
281
- };
282
-
283
- if clean_word.is_empty() || clean_word.chars().count() >= 3 || self.has_important_characteristics(word)
284
- {
285
- fallback_words.push(word.clone());
286
- }
287
- }
288
- self.smart_join(&fallback_words, has_cjk_content)
289
- } else {
290
- self.smart_join(&filtered_words, has_cjk_content)
291
- }
292
- }
293
-
294
- fn smart_join(&self, tokens: &[String], has_cjk_content: bool) -> String {
295
- if has_cjk_content {
296
- tokens.join("")
297
- } else {
298
- tokens.join(" ")
299
- }
300
- }
301
-
302
- fn has_important_characteristics(&self, word: &str) -> bool {
303
- if word.len() > 1 && word.chars().all(|c| c.is_uppercase()) {
304
- return true;
305
- }
306
-
307
- if word.chars().any(|c| c.is_numeric()) {
308
- return true;
309
- }
310
-
311
- if word.len() > 10 {
312
- return true;
313
- }
314
-
315
- let uppercase_count = word.chars().filter(|c| c.is_uppercase()).count();
316
- if uppercase_count > 1 && uppercase_count < word.len() {
317
- return true;
318
- }
319
-
320
- if self.has_cjk_importance(word) {
321
- return true;
322
- }
323
-
324
- false
325
- }
326
-
327
- fn has_cjk_importance(&self, word: &str) -> bool {
328
- let chars: Vec<char> = word.chars().collect();
329
-
330
- let has_cjk = chars.iter().any(|&c| c as u32 >= 0x4E00 && (c as u32) <= 0x9FFF);
331
- if !has_cjk {
332
- return false;
333
- }
334
-
335
- let important_radicals = [
336
- '学', '智', '能', '技', '术', '法', '算', '理', '科', '研', '究', '发', '展', '系', '统', '模', '型', '方',
337
- '式', '过', '程', '结', '构', '功', '效', '应', '分', '析', '计', '算', '数', '据', '信', '息', '处', '理',
338
- '语', '言', '文', '生', '成', '产', '用', '作', '为', '成', '变', '化', '转', '换', '提', '高', '网', '络',
339
- '神', '经', '机', '器', '人', '工', '智', '能', '自', '然', '复',
340
- ];
341
-
342
- for &char in &chars {
343
- if important_radicals.contains(&char) {
344
- return true;
345
- }
346
- }
347
-
348
- if chars.len() == 2 && has_cjk {
349
- let has_technical = chars.iter().any(|&c| {
350
- let code = c as u32;
351
- (0x4E00..=0x4FFF).contains(&code)
352
- || (0x5000..=0x51FF).contains(&code)
353
- || (0x6700..=0x68FF).contains(&code)
354
- || (0x7500..=0x76FF).contains(&code)
355
- });
356
-
357
- if has_technical {
358
- return true;
359
- }
360
- }
361
-
362
- false
363
- }
364
-
365
- fn apply_sentence_selection(&self, text: &str) -> String {
366
- let sentences: Vec<&str> = text
367
- .split(['.', '!', '?'])
368
- .map(|s| s.trim())
369
- .filter(|s| !s.is_empty())
370
- .collect();
371
-
372
- if sentences.len() <= 2 {
373
- return text.to_string();
374
- }
375
-
376
- let mut scored_sentences: Vec<(usize, f32, &str)> = sentences
377
- .iter()
378
- .enumerate()
379
- .map(|(i, sentence)| {
380
- let score = self.score_sentence_importance(sentence, i, sentences.len());
381
- (i, score, *sentence)
382
- })
383
- .collect();
384
-
385
- scored_sentences.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
386
-
387
- let keep_count = ((sentences.len() as f32 * 0.4).ceil() as usize).max(1);
388
- let mut selected_indices: Vec<usize> = scored_sentences[..keep_count].iter().map(|(i, _, _)| *i).collect();
389
-
390
- selected_indices.sort();
391
-
392
- let selected_sentences: Vec<&str> = selected_indices
393
- .iter()
394
- .filter_map(|&i| sentences.get(i))
395
- .copied()
396
- .collect();
397
-
398
- if selected_sentences.is_empty() {
399
- text.to_string()
400
- } else {
401
- selected_sentences.join(". ")
402
- }
403
- }
404
-
405
- fn score_sentence_importance(&self, sentence: &str, position: usize, total_sentences: usize) -> f32 {
406
- let mut score = 0.0;
407
-
408
- if position == 0 || position == total_sentences - 1 {
409
- score += SENTENCE_EDGE_POSITION_BONUS;
410
- }
411
-
412
- let words: Vec<&str> = sentence.split_whitespace().collect();
413
- if words.is_empty() {
414
- return score;
415
- }
416
-
417
- let word_count = words.len();
418
- if (MIN_IDEAL_WORD_COUNT..=MAX_IDEAL_WORD_COUNT).contains(&word_count) {
419
- score += IDEAL_WORD_COUNT_BONUS;
420
- }
421
-
422
- let mut numeric_count = 0;
423
- let mut caps_count = 0;
424
- let mut long_word_count = 0;
425
- let mut punct_density = 0;
426
-
427
- for word in &words {
428
- if word.chars().any(|c| c.is_numeric()) {
429
- numeric_count += 1;
430
- }
431
-
432
- if word.len() > 1 && word.chars().all(|c| c.is_uppercase()) {
433
- caps_count += 1;
434
- }
435
-
436
- if word.len() > LONG_WORD_THRESHOLD {
437
- long_word_count += 1;
438
- }
439
-
440
- punct_density += word.chars().filter(|c| c.is_ascii_punctuation()).count();
441
- }
442
-
443
- score += (numeric_count as f32 / words.len() as f32) * NUMERIC_CONTENT_WEIGHT;
444
- score += (caps_count as f32 / words.len() as f32) * CAPS_ACRONYM_WEIGHT;
445
- score += (long_word_count as f32 / words.len() as f32) * LONG_WORD_WEIGHT;
446
- score += (punct_density as f32 / sentence.len() as f32) * PUNCTUATION_DENSITY_WEIGHT;
447
-
448
- let estimated_unique = (words.len() as f32 * 0.6).ceil() as usize;
449
- let mut unique_words: ahash::AHashSet<String> = ahash::AHashSet::with_capacity(estimated_unique.max(10));
450
-
451
- for w in &words {
452
- let clean = w
453
- .chars()
454
- .filter(|c| c.is_alphabetic())
455
- .collect::<String>()
456
- .to_lowercase();
457
- unique_words.insert(clean);
458
-
459
- if unique_words.len() >= estimated_unique {
460
- break;
461
- }
462
- }
463
-
464
- let final_unique_count = if unique_words.len() >= estimated_unique {
465
- unique_words.len()
466
- } else {
467
- for w in &words {
468
- let clean = w
469
- .chars()
470
- .filter(|c| c.is_alphabetic())
471
- .collect::<String>()
472
- .to_lowercase();
473
- unique_words.insert(clean);
474
- }
475
- unique_words.len()
476
- };
477
-
478
- let diversity_ratio = final_unique_count as f32 / words.len() as f32;
479
- score += diversity_ratio * DIVERSITY_RATIO_WEIGHT;
480
-
481
- let char_entropy = self.calculate_char_entropy(sentence);
482
- score += char_entropy * CHAR_ENTROPY_WEIGHT;
483
-
484
- score
485
- }
486
-
487
- fn universal_tokenize(&self, text: &str) -> Vec<String> {
488
- self.cjk_tokenizer.tokenize_mixed_text(text)
489
- }
490
-
491
- fn calculate_char_entropy(&self, text: &str) -> f32 {
492
- let chars: Vec<char> = text.chars().collect();
493
- if chars.is_empty() {
494
- return 0.0;
495
- }
496
-
497
- let estimated_unique = (chars.len() as f32 * 0.1).ceil() as usize;
498
- let mut char_freq = AHashMap::with_capacity(estimated_unique.max(26));
499
-
500
- for &ch in &chars {
501
- let lowercase_ch = ch
502
- .to_lowercase()
503
- .next()
504
- .expect("to_lowercase() must yield at least one character for valid Unicode");
505
- *char_freq.entry(lowercase_ch).or_insert(0) += 1;
506
- }
507
-
508
- let total_chars = chars.len() as f32;
509
- char_freq
510
- .values()
511
- .map(|&freq| {
512
- let p = freq as f32 / total_chars;
513
- if p > 0.0 { -p * p.log2() } else { 0.0 }
514
- })
515
- .sum::<f32>()
516
- .min(5.0)
517
- }
518
- }
519
-
520
- #[cfg(test)]
521
- mod tests {
522
- use super::*;
523
-
524
- #[test]
525
- fn test_light_reduction() {
526
- let config = TokenReductionConfig {
527
- level: ReductionLevel::Light,
528
- use_simd: false,
529
- ..Default::default()
530
- };
531
-
532
- let reducer = TokenReducer::new(&config, None).unwrap();
533
- let input = "Hello world!!! How are you???";
534
- let result = reducer.reduce(input);
535
-
536
- assert!(result.len() < input.len());
537
- assert!(!result.contains(" "));
538
- }
539
-
540
- #[test]
541
- fn test_moderate_reduction() {
542
- let config = TokenReductionConfig {
543
- level: ReductionLevel::Moderate,
544
- use_simd: false,
545
- ..Default::default()
546
- };
547
-
548
- let reducer = TokenReducer::new(&config, Some("en")).unwrap();
549
- let input = "The quick brown fox is jumping over the lazy dog";
550
- let result = reducer.reduce(input);
551
-
552
- assert!(result.len() < input.len());
553
- assert!(result.contains("quick"));
554
- assert!(result.contains("brown"));
555
- assert!(result.contains("fox"));
556
- }
557
-
558
- #[test]
559
- fn test_batch_processing() {
560
- let config = TokenReductionConfig {
561
- level: ReductionLevel::Light,
562
- enable_parallel: false,
563
- ..Default::default()
564
- };
565
-
566
- let reducer = TokenReducer::new(&config, None).unwrap();
567
- let inputs = vec!["Hello world!", "How are you?", "Fine, thanks!"];
568
- let results = reducer.batch_reduce(&inputs);
569
-
570
- assert_eq!(results.len(), inputs.len());
571
- for result in &results {
572
- assert!(!result.contains(" "));
573
- }
574
- }
575
-
576
- #[test]
577
- fn test_aggressive_reduction() {
578
- let config = TokenReductionConfig {
579
- level: ReductionLevel::Aggressive,
580
- use_simd: false,
581
- ..Default::default()
582
- };
583
-
584
- let reducer = TokenReducer::new(&config, Some("en")).unwrap();
585
- let input = "The quick brown fox is jumping over the lazy dog and running through the forest";
586
- let result = reducer.reduce(input);
587
-
588
- assert!(result.len() < input.len());
589
- assert!(!result.is_empty());
590
- }
591
-
592
- #[test]
593
- fn test_maximum_reduction() {
594
- let config = TokenReductionConfig {
595
- level: ReductionLevel::Maximum,
596
- use_simd: false,
597
- enable_semantic_clustering: true,
598
- ..Default::default()
599
- };
600
-
601
- let reducer = TokenReducer::new(&config, Some("en")).unwrap();
602
- let input = "The quick brown fox is jumping over the lazy dog and running through the forest";
603
- let result = reducer.reduce(input);
604
-
605
- assert!(result.len() < input.len());
606
- assert!(!result.is_empty());
607
- }
608
-
609
- #[test]
610
- fn test_empty_text_handling() {
611
- let config = TokenReductionConfig {
612
- level: ReductionLevel::Moderate,
613
- ..Default::default()
614
- };
615
-
616
- let reducer = TokenReducer::new(&config, None).unwrap();
617
- assert_eq!(reducer.reduce(""), "");
618
- let result = reducer.reduce(" ");
619
- assert!(result == " " || result.is_empty());
620
- }
621
-
622
- #[test]
623
- fn test_off_mode_preserves_text() {
624
- let config = TokenReductionConfig {
625
- level: ReductionLevel::Off,
626
- ..Default::default()
627
- };
628
-
629
- let reducer = TokenReducer::new(&config, None).unwrap();
630
- let input = "Text with multiple spaces!!!";
631
- assert_eq!(reducer.reduce(input), input);
632
- }
633
-
634
- #[test]
635
- fn test_parallel_batch_processing() {
636
- let config = TokenReductionConfig {
637
- level: ReductionLevel::Light,
638
- enable_parallel: true,
639
- ..Default::default()
640
- };
641
-
642
- let reducer = TokenReducer::new(&config, None).unwrap();
643
- let inputs = vec![
644
- "First text with spaces",
645
- "Second text with spaces",
646
- "Third text with spaces",
647
- ];
648
- let results = reducer.batch_reduce(&inputs);
649
-
650
- assert_eq!(results.len(), inputs.len());
651
- for result in &results {
652
- assert!(!result.contains(" "));
653
- }
654
- }
655
-
656
- #[test]
657
- fn test_cjk_text_handling() {
658
- let config = TokenReductionConfig {
659
- level: ReductionLevel::Moderate,
660
- ..Default::default()
661
- };
662
-
663
- let reducer = TokenReducer::new(&config, Some("zh")).unwrap();
664
- let input = "这是中文文本测试";
665
- let result = reducer.reduce(input);
666
-
667
- assert!(!result.is_empty());
668
- }
669
-
670
- #[test]
671
- fn test_mixed_language_text() {
672
- let config = TokenReductionConfig {
673
- level: ReductionLevel::Moderate,
674
- ..Default::default()
675
- };
676
-
677
- let reducer = TokenReducer::new(&config, None).unwrap();
678
- let input = "This is English text 这是中文 and some more English";
679
- let result = reducer.reduce(input);
680
-
681
- assert!(!result.is_empty());
682
- assert!(result.contains("English") || result.contains("中"));
683
- }
684
-
685
- #[test]
686
- fn test_punctuation_normalization() {
687
- let config = TokenReductionConfig {
688
- level: ReductionLevel::Light,
689
- ..Default::default()
690
- };
691
-
692
- let reducer = TokenReducer::new(&config, None).unwrap();
693
- let input = "Text!!!!!! with????? excessive,,,,,, punctuation";
694
- let result = reducer.reduce(input);
695
-
696
- assert!(!result.contains("!!!!!!"));
697
- assert!(!result.contains("?????"));
698
- assert!(!result.contains(",,,,,,"));
699
- }
700
-
701
- #[test]
702
- fn test_sentence_selection() {
703
- let config = TokenReductionConfig {
704
- level: ReductionLevel::Aggressive,
705
- ..Default::default()
706
- };
707
-
708
- let reducer = TokenReducer::new(&config, None).unwrap();
709
- let input = "First sentence here. Second sentence with more words. Third one. Fourth sentence is even longer than the others.";
710
- let result = reducer.reduce(input);
711
-
712
- assert!(result.len() < input.len());
713
- assert!(result.split(". ").count() < 4);
714
- }
715
-
716
- #[test]
717
- fn test_unicode_normalization_ascii() {
718
- let config = TokenReductionConfig {
719
- level: ReductionLevel::Light,
720
- ..Default::default()
721
- };
722
-
723
- let reducer = TokenReducer::new(&config, None).unwrap();
724
- let input = "Pure ASCII text without special characters";
725
- let result = reducer.reduce(input);
726
-
727
- assert!(result.contains("ASCII"));
728
- }
729
-
730
- #[test]
731
- fn test_unicode_normalization_non_ascii() {
732
- let config = TokenReductionConfig {
733
- level: ReductionLevel::Light,
734
- ..Default::default()
735
- };
736
-
737
- let reducer = TokenReducer::new(&config, None).unwrap();
738
- let input = "Café naïve résumé";
739
- let result = reducer.reduce(input);
740
-
741
- assert!(result.contains("Café") || result.contains("Cafe"));
742
- }
743
-
744
- #[test]
745
- fn test_single_text_vs_batch() {
746
- let config = TokenReductionConfig {
747
- level: ReductionLevel::Moderate,
748
- ..Default::default()
749
- };
750
-
751
- let reducer = TokenReducer::new(&config, None).unwrap();
752
- let text = "The quick brown fox jumps over the lazy dog";
753
-
754
- let single_result = reducer.reduce(text);
755
- let batch_results = reducer.batch_reduce(&[text]);
756
-
757
- assert_eq!(single_result, batch_results[0]);
758
- }
759
-
760
- #[test]
761
- fn test_important_word_preservation() {
762
- let config = TokenReductionConfig {
763
- level: ReductionLevel::Aggressive,
764
- ..Default::default()
765
- };
766
-
767
- let reducer = TokenReducer::new(&config, None).unwrap();
768
- let input = "The IMPORTANT word COVID-19 and 12345 numbers should be preserved";
769
- let result = reducer.reduce(input);
770
-
771
- assert!(result.contains("IMPORTANT") || result.contains("COVID") || result.contains("12345"));
772
- }
773
-
774
- #[test]
775
- fn test_technical_terms_preservation() {
776
- let config = TokenReductionConfig {
777
- level: ReductionLevel::Aggressive,
778
- ..Default::default()
779
- };
780
-
781
- let reducer = TokenReducer::new(&config, None).unwrap();
782
- let input = "The implementation uses PyTorch and TensorFlow frameworks";
783
- let result = reducer.reduce(input);
784
-
785
- assert!(result.contains("PyTorch") || result.contains("TensorFlow"));
786
- }
787
-
788
- #[test]
789
- fn test_calculate_char_entropy() {
790
- let config = TokenReductionConfig::default();
791
- let reducer = TokenReducer::new(&config, None).unwrap();
792
-
793
- let low_entropy = reducer.calculate_char_entropy("aaaaaaa");
794
- assert!(low_entropy < 1.0);
795
-
796
- let high_entropy = reducer.calculate_char_entropy("abcdefg123");
797
- assert!(high_entropy > low_entropy);
798
- }
799
-
800
- #[test]
801
- fn test_universal_tokenize_english() {
802
- let config = TokenReductionConfig::default();
803
- let reducer = TokenReducer::new(&config, None).unwrap();
804
-
805
- let tokens = reducer.universal_tokenize("hello world test");
806
- assert_eq!(tokens, vec!["hello", "world", "test"]);
807
- }
808
-
809
- #[test]
810
- fn test_universal_tokenize_cjk() {
811
- let config = TokenReductionConfig::default();
812
- let reducer = TokenReducer::new(&config, None).unwrap();
813
-
814
- let tokens = reducer.universal_tokenize("中文");
815
- assert!(!tokens.is_empty());
816
- }
817
-
818
- #[test]
819
- fn test_fallback_threshold() {
820
- let config = TokenReductionConfig {
821
- level: ReductionLevel::Aggressive,
822
- ..Default::default()
823
- };
824
-
825
- let reducer = TokenReducer::new(&config, None).unwrap();
826
-
827
- let input = "a the is of to in for on at by";
828
- let result = reducer.reduce(input);
829
-
830
- assert!(!result.is_empty());
831
- }
832
- }