@lokascript/semantic 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +686 -0
- package/dist/browser-ar.ar.global.js +2 -0
- package/dist/browser-core.core.global.js +2 -0
- package/dist/browser-de.de.global.js +2 -0
- package/dist/browser-east-asian.east-asian.global.js +2 -0
- package/dist/browser-en-tr.en-tr.global.js +2 -0
- package/dist/browser-en.en.global.js +2 -0
- package/dist/browser-es-en.es-en.global.js +2 -0
- package/dist/browser-es.es.global.js +2 -0
- package/dist/browser-fr.fr.global.js +2 -0
- package/dist/browser-id.id.global.js +2 -0
- package/dist/browser-ja.ja.global.js +2 -0
- package/dist/browser-ko.ko.global.js +2 -0
- package/dist/browser-lazy.lazy.global.js +2 -0
- package/dist/browser-priority.priority.global.js +2 -0
- package/dist/browser-pt.pt.global.js +2 -0
- package/dist/browser-qu.qu.global.js +2 -0
- package/dist/browser-sw.sw.global.js +2 -0
- package/dist/browser-tr.tr.global.js +2 -0
- package/dist/browser-western.western.global.js +2 -0
- package/dist/browser-zh.zh.global.js +2 -0
- package/dist/browser.global.js +3 -0
- package/dist/browser.global.js.map +1 -0
- package/dist/index.cjs +35051 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +3426 -0
- package/dist/index.d.ts +3426 -0
- package/dist/index.js +34890 -0
- package/dist/index.js.map +1 -0
- package/dist/languages/ar.d.ts +78 -0
- package/dist/languages/ar.js +1622 -0
- package/dist/languages/ar.js.map +1 -0
- package/dist/languages/de.d.ts +38 -0
- package/dist/languages/de.js +1168 -0
- package/dist/languages/de.js.map +1 -0
- package/dist/languages/en.d.ts +44 -0
- package/dist/languages/en.js +3491 -0
- package/dist/languages/en.js.map +1 -0
- package/dist/languages/es.d.ts +52 -0
- package/dist/languages/es.js +1493 -0
- package/dist/languages/es.js.map +1 -0
- package/dist/languages/fr.d.ts +37 -0
- package/dist/languages/fr.js +1159 -0
- package/dist/languages/fr.js.map +1 -0
- package/dist/languages/id.d.ts +35 -0
- package/dist/languages/id.js +1152 -0
- package/dist/languages/id.js.map +1 -0
- package/dist/languages/ja.d.ts +53 -0
- package/dist/languages/ja.js +1430 -0
- package/dist/languages/ja.js.map +1 -0
- package/dist/languages/ko.d.ts +51 -0
- package/dist/languages/ko.js +1729 -0
- package/dist/languages/ko.js.map +1 -0
- package/dist/languages/pt.d.ts +37 -0
- package/dist/languages/pt.js +1127 -0
- package/dist/languages/pt.js.map +1 -0
- package/dist/languages/qu.d.ts +36 -0
- package/dist/languages/qu.js +1143 -0
- package/dist/languages/qu.js.map +1 -0
- package/dist/languages/sw.d.ts +35 -0
- package/dist/languages/sw.js +1147 -0
- package/dist/languages/sw.js.map +1 -0
- package/dist/languages/tr.d.ts +45 -0
- package/dist/languages/tr.js +1529 -0
- package/dist/languages/tr.js.map +1 -0
- package/dist/languages/zh.d.ts +58 -0
- package/dist/languages/zh.js +1257 -0
- package/dist/languages/zh.js.map +1 -0
- package/dist/types-C4dcj53L.d.ts +600 -0
- package/package.json +202 -0
- package/src/__test-utils__/index.ts +7 -0
- package/src/__test-utils__/test-helpers.ts +8 -0
- package/src/__types__/test-helpers.ts +122 -0
- package/src/analysis/index.ts +479 -0
- package/src/ast-builder/command-mappers.ts +1133 -0
- package/src/ast-builder/expression-parser/index.ts +41 -0
- package/src/ast-builder/expression-parser/parser.ts +563 -0
- package/src/ast-builder/expression-parser/tokenizer.ts +394 -0
- package/src/ast-builder/expression-parser/types.ts +208 -0
- package/src/ast-builder/index.ts +536 -0
- package/src/ast-builder/value-converters.ts +172 -0
- package/src/bridge.ts +275 -0
- package/src/browser-ar.ts +162 -0
- package/src/browser-core.ts +231 -0
- package/src/browser-de.ts +162 -0
- package/src/browser-east-asian.ts +173 -0
- package/src/browser-en-tr.ts +165 -0
- package/src/browser-en.ts +157 -0
- package/src/browser-es-en.ts +200 -0
- package/src/browser-es.ts +170 -0
- package/src/browser-fr.ts +162 -0
- package/src/browser-id.ts +162 -0
- package/src/browser-ja.ts +162 -0
- package/src/browser-ko.ts +162 -0
- package/src/browser-lazy.ts +189 -0
- package/src/browser-priority.ts +214 -0
- package/src/browser-pt.ts +162 -0
- package/src/browser-qu.ts +162 -0
- package/src/browser-sw.ts +162 -0
- package/src/browser-tr.ts +162 -0
- package/src/browser-western.ts +181 -0
- package/src/browser-zh.ts +162 -0
- package/src/browser.ts +268 -0
- package/src/cache/index.ts +14 -0
- package/src/cache/semantic-cache.ts +344 -0
- package/src/core-bridge.ts +372 -0
- package/src/explicit/converter.ts +258 -0
- package/src/explicit/index.ts +18 -0
- package/src/explicit/parser.ts +236 -0
- package/src/explicit/renderer.ts +424 -0
- package/src/generators/command-schemas.ts +1636 -0
- package/src/generators/event-handler-generator.ts +109 -0
- package/src/generators/index.ts +117 -0
- package/src/generators/language-profiles.ts +139 -0
- package/src/generators/pattern-generator.ts +537 -0
- package/src/generators/profiles/arabic.ts +131 -0
- package/src/generators/profiles/bengali.ts +132 -0
- package/src/generators/profiles/chinese.ts +124 -0
- package/src/generators/profiles/english.ts +113 -0
- package/src/generators/profiles/french.ts +125 -0
- package/src/generators/profiles/german.ts +126 -0
- package/src/generators/profiles/hindi.ts +146 -0
- package/src/generators/profiles/index.ts +46 -0
- package/src/generators/profiles/indonesian.ts +125 -0
- package/src/generators/profiles/italian.ts +139 -0
- package/src/generators/profiles/japanese.ts +149 -0
- package/src/generators/profiles/korean.ts +127 -0
- package/src/generators/profiles/marker-templates.ts +288 -0
- package/src/generators/profiles/ms.ts +130 -0
- package/src/generators/profiles/polish.ts +249 -0
- package/src/generators/profiles/portuguese.ts +115 -0
- package/src/generators/profiles/quechua.ts +113 -0
- package/src/generators/profiles/russian.ts +260 -0
- package/src/generators/profiles/spanish.ts +130 -0
- package/src/generators/profiles/swahili.ts +129 -0
- package/src/generators/profiles/thai.ts +132 -0
- package/src/generators/profiles/tl.ts +128 -0
- package/src/generators/profiles/turkish.ts +124 -0
- package/src/generators/profiles/types.ts +165 -0
- package/src/generators/profiles/ukrainian.ts +270 -0
- package/src/generators/profiles/vietnamese.ts +133 -0
- package/src/generators/schema-error-codes.ts +160 -0
- package/src/generators/schema-validator.ts +391 -0
- package/src/index.ts +429 -0
- package/src/language-building-schema.ts +3170 -0
- package/src/language-loader.ts +394 -0
- package/src/languages/_all.ts +65 -0
- package/src/languages/ar.ts +15 -0
- package/src/languages/bn.ts +16 -0
- package/src/languages/de.ts +15 -0
- package/src/languages/en.ts +29 -0
- package/src/languages/es.ts +15 -0
- package/src/languages/fr.ts +15 -0
- package/src/languages/hi.ts +26 -0
- package/src/languages/id.ts +15 -0
- package/src/languages/index.ts +18 -0
- package/src/languages/it.ts +15 -0
- package/src/languages/ja.ts +15 -0
- package/src/languages/ko.ts +15 -0
- package/src/languages/ms.ts +16 -0
- package/src/languages/pl.ts +18 -0
- package/src/languages/pt.ts +15 -0
- package/src/languages/qu.ts +15 -0
- package/src/languages/ru.ts +26 -0
- package/src/languages/sw.ts +15 -0
- package/src/languages/th.ts +16 -0
- package/src/languages/tl.ts +16 -0
- package/src/languages/tr.ts +15 -0
- package/src/languages/uk.ts +26 -0
- package/src/languages/vi.ts +16 -0
- package/src/languages/zh.ts +15 -0
- package/src/parser/index.ts +15 -0
- package/src/parser/pattern-matcher.ts +1181 -0
- package/src/parser/semantic-parser.ts +573 -0
- package/src/parser/utils/index.ts +35 -0
- package/src/parser/utils/marker-resolution.ts +111 -0
- package/src/parser/utils/possessive-keywords.ts +43 -0
- package/src/parser/utils/role-positioning.ts +70 -0
- package/src/parser/utils/type-validation.ts +134 -0
- package/src/patterns/add/ar.ts +71 -0
- package/src/patterns/add/bn.ts +70 -0
- package/src/patterns/add/hi.ts +69 -0
- package/src/patterns/add/index.ts +87 -0
- package/src/patterns/add/it.ts +61 -0
- package/src/patterns/add/ja.ts +93 -0
- package/src/patterns/add/ko.ts +74 -0
- package/src/patterns/add/ms.ts +30 -0
- package/src/patterns/add/pl.ts +62 -0
- package/src/patterns/add/ru.ts +62 -0
- package/src/patterns/add/th.ts +49 -0
- package/src/patterns/add/tl.ts +30 -0
- package/src/patterns/add/tr.ts +71 -0
- package/src/patterns/add/uk.ts +62 -0
- package/src/patterns/add/vi.ts +61 -0
- package/src/patterns/add/zh.ts +71 -0
- package/src/patterns/builders.ts +207 -0
- package/src/patterns/decrement/bn.ts +70 -0
- package/src/patterns/decrement/de.ts +42 -0
- package/src/patterns/decrement/hi.ts +68 -0
- package/src/patterns/decrement/index.ts +79 -0
- package/src/patterns/decrement/it.ts +69 -0
- package/src/patterns/decrement/ms.ts +30 -0
- package/src/patterns/decrement/pl.ts +58 -0
- package/src/patterns/decrement/ru.ts +58 -0
- package/src/patterns/decrement/th.ts +49 -0
- package/src/patterns/decrement/tl.ts +30 -0
- package/src/patterns/decrement/tr.ts +48 -0
- package/src/patterns/decrement/uk.ts +58 -0
- package/src/patterns/decrement/vi.ts +61 -0
- package/src/patterns/decrement/zh.ts +32 -0
- package/src/patterns/en.ts +302 -0
- package/src/patterns/event-handler/ar.ts +151 -0
- package/src/patterns/event-handler/bn.ts +72 -0
- package/src/patterns/event-handler/de.ts +117 -0
- package/src/patterns/event-handler/en.ts +117 -0
- package/src/patterns/event-handler/es.ts +136 -0
- package/src/patterns/event-handler/fr.ts +117 -0
- package/src/patterns/event-handler/hi.ts +64 -0
- package/src/patterns/event-handler/id.ts +117 -0
- package/src/patterns/event-handler/index.ts +119 -0
- package/src/patterns/event-handler/it.ts +54 -0
- package/src/patterns/event-handler/ja.ts +118 -0
- package/src/patterns/event-handler/ko.ts +133 -0
- package/src/patterns/event-handler/ms.ts +30 -0
- package/src/patterns/event-handler/pl.ts +62 -0
- package/src/patterns/event-handler/pt.ts +117 -0
- package/src/patterns/event-handler/qu.ts +66 -0
- package/src/patterns/event-handler/ru.ts +62 -0
- package/src/patterns/event-handler/shared.ts +270 -0
- package/src/patterns/event-handler/sw.ts +117 -0
- package/src/patterns/event-handler/th.ts +53 -0
- package/src/patterns/event-handler/tl.ts +30 -0
- package/src/patterns/event-handler/tr.ts +170 -0
- package/src/patterns/event-handler/uk.ts +62 -0
- package/src/patterns/event-handler/vi.ts +61 -0
- package/src/patterns/event-handler/zh.ts +150 -0
- package/src/patterns/get/ar.ts +49 -0
- package/src/patterns/get/bn.ts +47 -0
- package/src/patterns/get/de.ts +32 -0
- package/src/patterns/get/hi.ts +52 -0
- package/src/patterns/get/index.ts +83 -0
- package/src/patterns/get/it.ts +56 -0
- package/src/patterns/get/ja.ts +53 -0
- package/src/patterns/get/ko.ts +53 -0
- package/src/patterns/get/ms.ts +30 -0
- package/src/patterns/get/pl.ts +57 -0
- package/src/patterns/get/ru.ts +57 -0
- package/src/patterns/get/th.ts +29 -0
- package/src/patterns/get/tl.ts +30 -0
- package/src/patterns/get/uk.ts +57 -0
- package/src/patterns/get/vi.ts +48 -0
- package/src/patterns/grammar-transformed/index.ts +39 -0
- package/src/patterns/grammar-transformed/ja.ts +1713 -0
- package/src/patterns/grammar-transformed/ko.ts +1311 -0
- package/src/patterns/grammar-transformed/tr.ts +1067 -0
- package/src/patterns/hide/ar.ts +67 -0
- package/src/patterns/hide/bn.ts +47 -0
- package/src/patterns/hide/de.ts +36 -0
- package/src/patterns/hide/hi.ts +61 -0
- package/src/patterns/hide/index.ts +91 -0
- package/src/patterns/hide/it.ts +56 -0
- package/src/patterns/hide/ja.ts +69 -0
- package/src/patterns/hide/ko.ts +69 -0
- package/src/patterns/hide/ms.ts +30 -0
- package/src/patterns/hide/pl.ts +57 -0
- package/src/patterns/hide/ru.ts +57 -0
- package/src/patterns/hide/th.ts +29 -0
- package/src/patterns/hide/tl.ts +30 -0
- package/src/patterns/hide/tr.ts +65 -0
- package/src/patterns/hide/uk.ts +57 -0
- package/src/patterns/hide/vi.ts +56 -0
- package/src/patterns/hide/zh.ts +68 -0
- package/src/patterns/increment/bn.ts +70 -0
- package/src/patterns/increment/de.ts +36 -0
- package/src/patterns/increment/hi.ts +68 -0
- package/src/patterns/increment/index.ts +79 -0
- package/src/patterns/increment/it.ts +69 -0
- package/src/patterns/increment/ms.ts +30 -0
- package/src/patterns/increment/pl.ts +58 -0
- package/src/patterns/increment/ru.ts +58 -0
- package/src/patterns/increment/th.ts +49 -0
- package/src/patterns/increment/tl.ts +30 -0
- package/src/patterns/increment/tr.ts +52 -0
- package/src/patterns/increment/uk.ts +58 -0
- package/src/patterns/increment/vi.ts +61 -0
- package/src/patterns/increment/zh.ts +32 -0
- package/src/patterns/index.ts +84 -0
- package/src/patterns/languages/en/control-flow.ts +93 -0
- package/src/patterns/languages/en/fetch.ts +62 -0
- package/src/patterns/languages/en/index.ts +42 -0
- package/src/patterns/languages/en/repeat.ts +67 -0
- package/src/patterns/languages/en/set.ts +48 -0
- package/src/patterns/languages/en/swap.ts +38 -0
- package/src/patterns/languages/en/temporal.ts +57 -0
- package/src/patterns/put/ar.ts +74 -0
- package/src/patterns/put/bn.ts +53 -0
- package/src/patterns/put/en.ts +74 -0
- package/src/patterns/put/es.ts +74 -0
- package/src/patterns/put/hi.ts +69 -0
- package/src/patterns/put/id.ts +96 -0
- package/src/patterns/put/index.ts +99 -0
- package/src/patterns/put/it.ts +56 -0
- package/src/patterns/put/ja.ts +75 -0
- package/src/patterns/put/ko.ts +67 -0
- package/src/patterns/put/ms.ts +30 -0
- package/src/patterns/put/pl.ts +81 -0
- package/src/patterns/put/ru.ts +85 -0
- package/src/patterns/put/th.ts +32 -0
- package/src/patterns/put/tl.ts +30 -0
- package/src/patterns/put/tr.ts +67 -0
- package/src/patterns/put/uk.ts +85 -0
- package/src/patterns/put/vi.ts +72 -0
- package/src/patterns/put/zh.ts +62 -0
- package/src/patterns/registry.ts +163 -0
- package/src/patterns/remove/ar.ts +71 -0
- package/src/patterns/remove/bn.ts +68 -0
- package/src/patterns/remove/hi.ts +69 -0
- package/src/patterns/remove/index.ts +87 -0
- package/src/patterns/remove/it.ts +69 -0
- package/src/patterns/remove/ja.ts +74 -0
- package/src/patterns/remove/ko.ts +78 -0
- package/src/patterns/remove/ms.ts +30 -0
- package/src/patterns/remove/pl.ts +62 -0
- package/src/patterns/remove/ru.ts +62 -0
- package/src/patterns/remove/th.ts +49 -0
- package/src/patterns/remove/tl.ts +30 -0
- package/src/patterns/remove/tr.ts +78 -0
- package/src/patterns/remove/uk.ts +62 -0
- package/src/patterns/remove/vi.ts +61 -0
- package/src/patterns/remove/zh.ts +72 -0
- package/src/patterns/set/ar.ts +84 -0
- package/src/patterns/set/bn.ts +53 -0
- package/src/patterns/set/de.ts +84 -0
- package/src/patterns/set/es.ts +92 -0
- package/src/patterns/set/fr.ts +88 -0
- package/src/patterns/set/hi.ts +56 -0
- package/src/patterns/set/id.ts +84 -0
- package/src/patterns/set/index.ts +107 -0
- package/src/patterns/set/it.ts +56 -0
- package/src/patterns/set/ja.ts +86 -0
- package/src/patterns/set/ko.ts +85 -0
- package/src/patterns/set/ms.ts +30 -0
- package/src/patterns/set/pl.ts +57 -0
- package/src/patterns/set/pt.ts +84 -0
- package/src/patterns/set/ru.ts +57 -0
- package/src/patterns/set/th.ts +31 -0
- package/src/patterns/set/tl.ts +30 -0
- package/src/patterns/set/tr.ts +107 -0
- package/src/patterns/set/uk.ts +57 -0
- package/src/patterns/set/vi.ts +53 -0
- package/src/patterns/set/zh.ts +84 -0
- package/src/patterns/show/ar.ts +67 -0
- package/src/patterns/show/bn.ts +47 -0
- package/src/patterns/show/de.ts +32 -0
- package/src/patterns/show/fr.ts +32 -0
- package/src/patterns/show/hi.ts +61 -0
- package/src/patterns/show/index.ts +95 -0
- package/src/patterns/show/it.ts +56 -0
- package/src/patterns/show/ja.ts +69 -0
- package/src/patterns/show/ko.ts +73 -0
- package/src/patterns/show/ms.ts +30 -0
- package/src/patterns/show/pl.ts +57 -0
- package/src/patterns/show/ru.ts +57 -0
- package/src/patterns/show/th.ts +29 -0
- package/src/patterns/show/tl.ts +30 -0
- package/src/patterns/show/tr.ts +65 -0
- package/src/patterns/show/uk.ts +57 -0
- package/src/patterns/show/vi.ts +56 -0
- package/src/patterns/show/zh.ts +68 -0
- package/src/patterns/take/ar.ts +51 -0
- package/src/patterns/take/index.ts +31 -0
- package/src/patterns/toggle/ar.ts +61 -0
- package/src/patterns/toggle/bn.ts +70 -0
- package/src/patterns/toggle/en.ts +61 -0
- package/src/patterns/toggle/es.ts +61 -0
- package/src/patterns/toggle/hi.ts +80 -0
- package/src/patterns/toggle/index.ts +95 -0
- package/src/patterns/toggle/it.ts +69 -0
- package/src/patterns/toggle/ja.ts +156 -0
- package/src/patterns/toggle/ko.ts +113 -0
- package/src/patterns/toggle/ms.ts +30 -0
- package/src/patterns/toggle/pl.ts +62 -0
- package/src/patterns/toggle/ru.ts +62 -0
- package/src/patterns/toggle/th.ts +50 -0
- package/src/patterns/toggle/tl.ts +30 -0
- package/src/patterns/toggle/tr.ts +88 -0
- package/src/patterns/toggle/uk.ts +62 -0
- package/src/patterns/toggle/vi.ts +61 -0
- package/src/patterns/toggle/zh.ts +99 -0
- package/src/public-api.ts +286 -0
- package/src/registry.ts +441 -0
- package/src/tokenizers/arabic.ts +723 -0
- package/src/tokenizers/base.ts +1300 -0
- package/src/tokenizers/bengali.ts +289 -0
- package/src/tokenizers/chinese.ts +481 -0
- package/src/tokenizers/english.ts +416 -0
- package/src/tokenizers/french.ts +326 -0
- package/src/tokenizers/german.ts +324 -0
- package/src/tokenizers/hindi.ts +319 -0
- package/src/tokenizers/index.ts +127 -0
- package/src/tokenizers/indonesian.ts +306 -0
- package/src/tokenizers/italian.ts +458 -0
- package/src/tokenizers/japanese.ts +447 -0
- package/src/tokenizers/korean.ts +642 -0
- package/src/tokenizers/morphology/arabic-normalizer.ts +242 -0
- package/src/tokenizers/morphology/french-normalizer.ts +268 -0
- package/src/tokenizers/morphology/german-normalizer.ts +256 -0
- package/src/tokenizers/morphology/index.ts +46 -0
- package/src/tokenizers/morphology/italian-normalizer.ts +329 -0
- package/src/tokenizers/morphology/japanese-normalizer.ts +288 -0
- package/src/tokenizers/morphology/korean-normalizer.ts +428 -0
- package/src/tokenizers/morphology/polish-normalizer.ts +264 -0
- package/src/tokenizers/morphology/portuguese-normalizer.ts +310 -0
- package/src/tokenizers/morphology/spanish-normalizer.ts +327 -0
- package/src/tokenizers/morphology/turkish-normalizer.ts +412 -0
- package/src/tokenizers/morphology/types.ts +211 -0
- package/src/tokenizers/ms.ts +198 -0
- package/src/tokenizers/polish.ts +354 -0
- package/src/tokenizers/portuguese.ts +304 -0
- package/src/tokenizers/quechua.ts +339 -0
- package/src/tokenizers/russian.ts +375 -0
- package/src/tokenizers/spanish.ts +403 -0
- package/src/tokenizers/swahili.ts +303 -0
- package/src/tokenizers/thai.ts +236 -0
- package/src/tokenizers/tl.ts +198 -0
- package/src/tokenizers/turkish.ts +411 -0
- package/src/tokenizers/ukrainian.ts +369 -0
- package/src/tokenizers/vietnamese.ts +410 -0
- package/src/types/grammar-types.ts +617 -0
- package/src/types/unified-profile.ts +267 -0
- package/src/types.ts +709 -0
- package/src/utils/confidence-calculator.ts +147 -0
- package/src/validators/command-validator.ts +380 -0
- package/src/validators/index.ts +15 -0
|
@@ -0,0 +1,1430 @@
|
|
|
1
|
+
// src/registry.ts
|
|
2
|
+
var tokenizers = /* @__PURE__ */ new Map();
|
|
3
|
+
var profiles = /* @__PURE__ */ new Map();
|
|
4
|
+
var patternCache = /* @__PURE__ */ new Map();
|
|
5
|
+
function registerLanguage(code, tokenizer, profile) {
|
|
6
|
+
tokenizers.set(code, tokenizer);
|
|
7
|
+
profiles.set(code, profile);
|
|
8
|
+
patternCache.delete(code);
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
// src/tokenizers/base.ts
|
|
12
|
+
var TokenStreamImpl = class {
|
|
13
|
+
constructor(tokens, language) {
|
|
14
|
+
this.pos = 0;
|
|
15
|
+
this.tokens = tokens;
|
|
16
|
+
this.language = language;
|
|
17
|
+
}
|
|
18
|
+
peek(offset = 0) {
|
|
19
|
+
const index = this.pos + offset;
|
|
20
|
+
if (index < 0 || index >= this.tokens.length) {
|
|
21
|
+
return null;
|
|
22
|
+
}
|
|
23
|
+
return this.tokens[index];
|
|
24
|
+
}
|
|
25
|
+
advance() {
|
|
26
|
+
if (this.isAtEnd()) {
|
|
27
|
+
throw new Error("Unexpected end of token stream");
|
|
28
|
+
}
|
|
29
|
+
return this.tokens[this.pos++];
|
|
30
|
+
}
|
|
31
|
+
isAtEnd() {
|
|
32
|
+
return this.pos >= this.tokens.length;
|
|
33
|
+
}
|
|
34
|
+
mark() {
|
|
35
|
+
return { position: this.pos };
|
|
36
|
+
}
|
|
37
|
+
reset(mark) {
|
|
38
|
+
this.pos = mark.position;
|
|
39
|
+
}
|
|
40
|
+
position() {
|
|
41
|
+
return this.pos;
|
|
42
|
+
}
|
|
43
|
+
/**
|
|
44
|
+
* Get remaining tokens as an array.
|
|
45
|
+
*/
|
|
46
|
+
remaining() {
|
|
47
|
+
return this.tokens.slice(this.pos);
|
|
48
|
+
}
|
|
49
|
+
/**
|
|
50
|
+
* Consume tokens while predicate is true.
|
|
51
|
+
*/
|
|
52
|
+
takeWhile(predicate) {
|
|
53
|
+
const result = [];
|
|
54
|
+
while (!this.isAtEnd() && predicate(this.peek())) {
|
|
55
|
+
result.push(this.advance());
|
|
56
|
+
}
|
|
57
|
+
return result;
|
|
58
|
+
}
|
|
59
|
+
/**
|
|
60
|
+
* Skip tokens while predicate is true.
|
|
61
|
+
*/
|
|
62
|
+
skipWhile(predicate) {
|
|
63
|
+
while (!this.isAtEnd() && predicate(this.peek())) {
|
|
64
|
+
this.advance();
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
};
|
|
68
|
+
function createPosition(start, end) {
|
|
69
|
+
return { start, end };
|
|
70
|
+
}
|
|
71
|
+
function createToken(value, kind, position, normalizedOrOptions) {
|
|
72
|
+
if (typeof normalizedOrOptions === "string") {
|
|
73
|
+
return { value, kind, position, normalized: normalizedOrOptions };
|
|
74
|
+
}
|
|
75
|
+
if (normalizedOrOptions) {
|
|
76
|
+
const { normalized: normalized2, stem, stemConfidence } = normalizedOrOptions;
|
|
77
|
+
const token = { value, kind, position };
|
|
78
|
+
if (normalized2 !== void 0) {
|
|
79
|
+
token.normalized = normalized2;
|
|
80
|
+
}
|
|
81
|
+
if (stem !== void 0) {
|
|
82
|
+
token.stem = stem;
|
|
83
|
+
if (stemConfidence !== void 0) {
|
|
84
|
+
token.stemConfidence = stemConfidence;
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
return token;
|
|
88
|
+
}
|
|
89
|
+
return { value, kind, position };
|
|
90
|
+
}
|
|
91
|
+
function isWhitespace(char) {
|
|
92
|
+
return /\s/.test(char);
|
|
93
|
+
}
|
|
94
|
+
function isSelectorStart(char) {
|
|
95
|
+
return char === "#" || char === "." || char === "[" || char === "@" || char === "*" || char === "<";
|
|
96
|
+
}
|
|
97
|
+
function isQuote(char) {
|
|
98
|
+
return char === '"' || char === "'" || char === "`" || char === "\u300C" || char === "\u300D";
|
|
99
|
+
}
|
|
100
|
+
function isDigit(char) {
|
|
101
|
+
return /\d/.test(char);
|
|
102
|
+
}
|
|
103
|
+
function isAsciiLetter(char) {
|
|
104
|
+
return /[a-zA-Z]/.test(char);
|
|
105
|
+
}
|
|
106
|
+
function isAsciiIdentifierChar(char) {
|
|
107
|
+
return /[a-zA-Z0-9_-]/.test(char);
|
|
108
|
+
}
|
|
109
|
+
function createUnicodeRangeClassifier(ranges) {
|
|
110
|
+
return (char) => {
|
|
111
|
+
const code = char.charCodeAt(0);
|
|
112
|
+
return ranges.some(([start, end]) => code >= start && code <= end);
|
|
113
|
+
};
|
|
114
|
+
}
|
|
115
|
+
function combineClassifiers(...classifiers) {
|
|
116
|
+
return (char) => classifiers.some((fn) => fn(char));
|
|
117
|
+
}
|
|
118
|
+
function extractCssSelector(input, startPos) {
|
|
119
|
+
if (startPos >= input.length) return null;
|
|
120
|
+
const char = input[startPos];
|
|
121
|
+
if (!isSelectorStart(char)) return null;
|
|
122
|
+
let pos = startPos;
|
|
123
|
+
let selector = "";
|
|
124
|
+
if (char === "#" || char === ".") {
|
|
125
|
+
selector += input[pos++];
|
|
126
|
+
while (pos < input.length && isAsciiIdentifierChar(input[pos])) {
|
|
127
|
+
selector += input[pos++];
|
|
128
|
+
}
|
|
129
|
+
if (selector.length <= 1) return null;
|
|
130
|
+
if (pos < input.length && input[pos] === "." && char === "#") {
|
|
131
|
+
const methodStart = pos + 1;
|
|
132
|
+
let methodEnd = methodStart;
|
|
133
|
+
while (methodEnd < input.length && isAsciiIdentifierChar(input[methodEnd])) {
|
|
134
|
+
methodEnd++;
|
|
135
|
+
}
|
|
136
|
+
if (methodEnd < input.length && input[methodEnd] === "(") {
|
|
137
|
+
return selector;
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
} else if (char === "[") {
|
|
141
|
+
let depth = 1;
|
|
142
|
+
let inQuote = false;
|
|
143
|
+
let quoteChar = null;
|
|
144
|
+
let escaped = false;
|
|
145
|
+
selector += input[pos++];
|
|
146
|
+
while (pos < input.length && depth > 0) {
|
|
147
|
+
const c = input[pos];
|
|
148
|
+
selector += c;
|
|
149
|
+
if (escaped) {
|
|
150
|
+
escaped = false;
|
|
151
|
+
} else if (c === "\\") {
|
|
152
|
+
escaped = true;
|
|
153
|
+
} else if (inQuote) {
|
|
154
|
+
if (c === quoteChar) {
|
|
155
|
+
inQuote = false;
|
|
156
|
+
quoteChar = null;
|
|
157
|
+
}
|
|
158
|
+
} else {
|
|
159
|
+
if (c === '"' || c === "'" || c === "`") {
|
|
160
|
+
inQuote = true;
|
|
161
|
+
quoteChar = c;
|
|
162
|
+
} else if (c === "[") {
|
|
163
|
+
depth++;
|
|
164
|
+
} else if (c === "]") {
|
|
165
|
+
depth--;
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
pos++;
|
|
169
|
+
}
|
|
170
|
+
if (depth !== 0) return null;
|
|
171
|
+
} else if (char === "@") {
|
|
172
|
+
selector += input[pos++];
|
|
173
|
+
while (pos < input.length && isAsciiIdentifierChar(input[pos])) {
|
|
174
|
+
selector += input[pos++];
|
|
175
|
+
}
|
|
176
|
+
if (selector.length <= 1) return null;
|
|
177
|
+
} else if (char === "*") {
|
|
178
|
+
selector += input[pos++];
|
|
179
|
+
while (pos < input.length && isAsciiIdentifierChar(input[pos])) {
|
|
180
|
+
selector += input[pos++];
|
|
181
|
+
}
|
|
182
|
+
if (selector.length <= 1) return null;
|
|
183
|
+
} else if (char === "<") {
|
|
184
|
+
selector += input[pos++];
|
|
185
|
+
if (pos >= input.length || !isAsciiLetter(input[pos])) return null;
|
|
186
|
+
while (pos < input.length && isAsciiIdentifierChar(input[pos])) {
|
|
187
|
+
selector += input[pos++];
|
|
188
|
+
}
|
|
189
|
+
while (pos < input.length) {
|
|
190
|
+
const modChar = input[pos];
|
|
191
|
+
if (modChar === ".") {
|
|
192
|
+
selector += input[pos++];
|
|
193
|
+
if (pos >= input.length || !isAsciiIdentifierChar(input[pos])) {
|
|
194
|
+
return null;
|
|
195
|
+
}
|
|
196
|
+
while (pos < input.length && isAsciiIdentifierChar(input[pos])) {
|
|
197
|
+
selector += input[pos++];
|
|
198
|
+
}
|
|
199
|
+
} else if (modChar === "#") {
|
|
200
|
+
selector += input[pos++];
|
|
201
|
+
if (pos >= input.length || !isAsciiIdentifierChar(input[pos])) {
|
|
202
|
+
return null;
|
|
203
|
+
}
|
|
204
|
+
while (pos < input.length && isAsciiIdentifierChar(input[pos])) {
|
|
205
|
+
selector += input[pos++];
|
|
206
|
+
}
|
|
207
|
+
} else if (modChar === "[") {
|
|
208
|
+
let depth = 1;
|
|
209
|
+
let inQuote = false;
|
|
210
|
+
let quoteChar = null;
|
|
211
|
+
let escaped = false;
|
|
212
|
+
selector += input[pos++];
|
|
213
|
+
while (pos < input.length && depth > 0) {
|
|
214
|
+
const c = input[pos];
|
|
215
|
+
selector += c;
|
|
216
|
+
if (escaped) {
|
|
217
|
+
escaped = false;
|
|
218
|
+
} else if (c === "\\") {
|
|
219
|
+
escaped = true;
|
|
220
|
+
} else if (inQuote) {
|
|
221
|
+
if (c === quoteChar) {
|
|
222
|
+
inQuote = false;
|
|
223
|
+
quoteChar = null;
|
|
224
|
+
}
|
|
225
|
+
} else {
|
|
226
|
+
if (c === '"' || c === "'" || c === "`") {
|
|
227
|
+
inQuote = true;
|
|
228
|
+
quoteChar = c;
|
|
229
|
+
} else if (c === "[") {
|
|
230
|
+
depth++;
|
|
231
|
+
} else if (c === "]") {
|
|
232
|
+
depth--;
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
pos++;
|
|
236
|
+
}
|
|
237
|
+
if (depth !== 0) return null;
|
|
238
|
+
} else {
|
|
239
|
+
break;
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
while (pos < input.length && isWhitespace(input[pos])) {
|
|
243
|
+
selector += input[pos++];
|
|
244
|
+
}
|
|
245
|
+
if (pos < input.length && input[pos] === "/") {
|
|
246
|
+
selector += input[pos++];
|
|
247
|
+
while (pos < input.length && isWhitespace(input[pos])) {
|
|
248
|
+
selector += input[pos++];
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
if (pos >= input.length || input[pos] !== ">") return null;
|
|
252
|
+
selector += input[pos++];
|
|
253
|
+
}
|
|
254
|
+
return selector || null;
|
|
255
|
+
}
|
|
256
|
+
function isPossessiveMarker(input, pos) {
|
|
257
|
+
if (pos >= input.length || input[pos] !== "'") return false;
|
|
258
|
+
if (pos + 1 >= input.length) return false;
|
|
259
|
+
const nextChar = input[pos + 1].toLowerCase();
|
|
260
|
+
if (nextChar !== "s") return false;
|
|
261
|
+
if (pos + 2 >= input.length) return true;
|
|
262
|
+
const afterS = input[pos + 2];
|
|
263
|
+
return isWhitespace(afterS) || afterS === "*" || !isAsciiIdentifierChar(afterS);
|
|
264
|
+
}
|
|
265
|
+
function extractStringLiteral(input, startPos) {
|
|
266
|
+
if (startPos >= input.length) return null;
|
|
267
|
+
const openQuote = input[startPos];
|
|
268
|
+
if (!isQuote(openQuote)) return null;
|
|
269
|
+
if (openQuote === "'" && isPossessiveMarker(input, startPos)) {
|
|
270
|
+
return null;
|
|
271
|
+
}
|
|
272
|
+
const closeQuoteMap = {
|
|
273
|
+
'"': '"',
|
|
274
|
+
"'": "'",
|
|
275
|
+
"`": "`",
|
|
276
|
+
"\u300C": "\u300D"
|
|
277
|
+
};
|
|
278
|
+
const closeQuote = closeQuoteMap[openQuote];
|
|
279
|
+
if (!closeQuote) return null;
|
|
280
|
+
let pos = startPos + 1;
|
|
281
|
+
let literal = openQuote;
|
|
282
|
+
let escaped = false;
|
|
283
|
+
while (pos < input.length) {
|
|
284
|
+
const char = input[pos];
|
|
285
|
+
literal += char;
|
|
286
|
+
if (escaped) {
|
|
287
|
+
escaped = false;
|
|
288
|
+
} else if (char === "\\") {
|
|
289
|
+
escaped = true;
|
|
290
|
+
} else if (char === closeQuote) {
|
|
291
|
+
return literal;
|
|
292
|
+
}
|
|
293
|
+
pos++;
|
|
294
|
+
}
|
|
295
|
+
return literal;
|
|
296
|
+
}
|
|
297
|
+
function isUrlStart(input, pos) {
|
|
298
|
+
if (pos >= input.length) return false;
|
|
299
|
+
const char = input[pos];
|
|
300
|
+
const next = input[pos + 1] || "";
|
|
301
|
+
const third = input[pos + 2] || "";
|
|
302
|
+
if (char === "/" && next !== "/" && /[a-zA-Z0-9._-]/.test(next)) {
|
|
303
|
+
return true;
|
|
304
|
+
}
|
|
305
|
+
if (char === "/" && next === "/" && /[a-zA-Z]/.test(third)) {
|
|
306
|
+
return true;
|
|
307
|
+
}
|
|
308
|
+
if (char === "." && (next === "/" || next === "." && third === "/")) {
|
|
309
|
+
return true;
|
|
310
|
+
}
|
|
311
|
+
const slice = input.slice(pos, pos + 8).toLowerCase();
|
|
312
|
+
if (slice.startsWith("http://") || slice.startsWith("https://")) {
|
|
313
|
+
return true;
|
|
314
|
+
}
|
|
315
|
+
return false;
|
|
316
|
+
}
|
|
317
|
+
function extractUrl(input, startPos) {
|
|
318
|
+
if (!isUrlStart(input, startPos)) return null;
|
|
319
|
+
let pos = startPos;
|
|
320
|
+
let url = "";
|
|
321
|
+
const urlChars = /[a-zA-Z0-9/:._\-?&=%@+~!$'()*,;[\]]/;
|
|
322
|
+
while (pos < input.length) {
|
|
323
|
+
const char = input[pos];
|
|
324
|
+
if (char === "#") {
|
|
325
|
+
if (url.length > 0 && /[a-zA-Z0-9/.]$/.test(url)) {
|
|
326
|
+
url += char;
|
|
327
|
+
pos++;
|
|
328
|
+
while (pos < input.length && /[a-zA-Z0-9_-]/.test(input[pos])) {
|
|
329
|
+
url += input[pos++];
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
break;
|
|
333
|
+
}
|
|
334
|
+
if (urlChars.test(char)) {
|
|
335
|
+
url += char;
|
|
336
|
+
pos++;
|
|
337
|
+
} else {
|
|
338
|
+
break;
|
|
339
|
+
}
|
|
340
|
+
}
|
|
341
|
+
if (url.length < 2) return null;
|
|
342
|
+
return url;
|
|
343
|
+
}
|
|
344
|
+
function extractNumber(input, startPos) {
|
|
345
|
+
if (startPos >= input.length) return null;
|
|
346
|
+
const char = input[startPos];
|
|
347
|
+
if (!isDigit(char) && char !== "-" && char !== "+") return null;
|
|
348
|
+
let pos = startPos;
|
|
349
|
+
let number = "";
|
|
350
|
+
if (input[pos] === "-" || input[pos] === "+") {
|
|
351
|
+
number += input[pos++];
|
|
352
|
+
}
|
|
353
|
+
if (pos >= input.length || !isDigit(input[pos])) {
|
|
354
|
+
return null;
|
|
355
|
+
}
|
|
356
|
+
while (pos < input.length && isDigit(input[pos])) {
|
|
357
|
+
number += input[pos++];
|
|
358
|
+
}
|
|
359
|
+
if (pos < input.length && input[pos] === ".") {
|
|
360
|
+
number += input[pos++];
|
|
361
|
+
while (pos < input.length && isDigit(input[pos])) {
|
|
362
|
+
number += input[pos++];
|
|
363
|
+
}
|
|
364
|
+
}
|
|
365
|
+
if (pos < input.length) {
|
|
366
|
+
const suffix = input.slice(pos, pos + 2);
|
|
367
|
+
if (suffix === "ms") {
|
|
368
|
+
number += "ms";
|
|
369
|
+
} else if (input[pos] === "s" || input[pos] === "m" || input[pos] === "h") {
|
|
370
|
+
number += input[pos];
|
|
371
|
+
}
|
|
372
|
+
}
|
|
373
|
+
return number;
|
|
374
|
+
}
|
|
375
|
+
var _BaseTokenizer = class _BaseTokenizer {
|
|
376
|
+
constructor() {
|
|
377
|
+
/** Keywords derived from profile, sorted longest-first for greedy matching */
|
|
378
|
+
this.profileKeywords = [];
|
|
379
|
+
/** Map for O(1) keyword lookups by lowercase native word */
|
|
380
|
+
this.profileKeywordMap = /* @__PURE__ */ new Map();
|
|
381
|
+
}
|
|
382
|
+
/**
|
|
383
|
+
* Initialize keyword mappings from a language profile.
|
|
384
|
+
* Builds a list of native→english mappings from:
|
|
385
|
+
* - profile.keywords (primary + alternatives)
|
|
386
|
+
* - profile.references (me, it, you, etc.)
|
|
387
|
+
* - profile.roleMarkers (into, from, with, etc.)
|
|
388
|
+
*
|
|
389
|
+
* Results are sorted longest-first for greedy matching (important for non-space languages).
|
|
390
|
+
* Extras take precedence over profile entries when there are duplicates.
|
|
391
|
+
*
|
|
392
|
+
* @param profile - Language profile containing keyword translations
|
|
393
|
+
* @param extras - Additional keyword entries to include (literals, positional, events)
|
|
394
|
+
*/
|
|
395
|
+
initializeKeywordsFromProfile(profile, extras = []) {
|
|
396
|
+
const keywordMap = /* @__PURE__ */ new Map();
|
|
397
|
+
if (profile.keywords) {
|
|
398
|
+
for (const [normalized2, translation] of Object.entries(profile.keywords)) {
|
|
399
|
+
keywordMap.set(translation.primary, {
|
|
400
|
+
native: translation.primary,
|
|
401
|
+
normalized: translation.normalized || normalized2
|
|
402
|
+
});
|
|
403
|
+
if (translation.alternatives) {
|
|
404
|
+
for (const alt of translation.alternatives) {
|
|
405
|
+
keywordMap.set(alt, {
|
|
406
|
+
native: alt,
|
|
407
|
+
normalized: translation.normalized || normalized2
|
|
408
|
+
});
|
|
409
|
+
}
|
|
410
|
+
}
|
|
411
|
+
}
|
|
412
|
+
}
|
|
413
|
+
if (profile.references) {
|
|
414
|
+
for (const [normalized2, native] of Object.entries(profile.references)) {
|
|
415
|
+
keywordMap.set(native, { native, normalized: normalized2 });
|
|
416
|
+
}
|
|
417
|
+
}
|
|
418
|
+
if (profile.roleMarkers) {
|
|
419
|
+
for (const [role, marker] of Object.entries(profile.roleMarkers)) {
|
|
420
|
+
if (marker.primary) {
|
|
421
|
+
keywordMap.set(marker.primary, { native: marker.primary, normalized: role });
|
|
422
|
+
}
|
|
423
|
+
if (marker.alternatives) {
|
|
424
|
+
for (const alt of marker.alternatives) {
|
|
425
|
+
keywordMap.set(alt, { native: alt, normalized: role });
|
|
426
|
+
}
|
|
427
|
+
}
|
|
428
|
+
}
|
|
429
|
+
}
|
|
430
|
+
for (const extra of extras) {
|
|
431
|
+
keywordMap.set(extra.native, extra);
|
|
432
|
+
}
|
|
433
|
+
this.profileKeywords = Array.from(keywordMap.values()).sort(
|
|
434
|
+
(a, b) => b.native.length - a.native.length
|
|
435
|
+
);
|
|
436
|
+
this.profileKeywordMap = /* @__PURE__ */ new Map();
|
|
437
|
+
for (const keyword of this.profileKeywords) {
|
|
438
|
+
this.profileKeywordMap.set(keyword.native.toLowerCase(), keyword);
|
|
439
|
+
const normalized2 = this.removeDiacritics(keyword.native);
|
|
440
|
+
if (normalized2 !== keyword.native && !this.profileKeywordMap.has(normalized2.toLowerCase())) {
|
|
441
|
+
this.profileKeywordMap.set(normalized2.toLowerCase(), keyword);
|
|
442
|
+
}
|
|
443
|
+
}
|
|
444
|
+
}
|
|
445
|
+
/**
|
|
446
|
+
* Remove diacritical marks from a word for normalization.
|
|
447
|
+
* Primarily for Arabic (shadda, fatha, kasra, damma, sukun, etc.)
|
|
448
|
+
* but could be extended for other languages.
|
|
449
|
+
*
|
|
450
|
+
* @param word - Word to normalize
|
|
451
|
+
* @returns Word without diacritics
|
|
452
|
+
*/
|
|
453
|
+
removeDiacritics(word) {
|
|
454
|
+
return word.replace(/[\u064B-\u0652\u0670]/g, "");
|
|
455
|
+
}
|
|
456
|
+
/**
|
|
457
|
+
* Try to match a keyword from profile at the current position.
|
|
458
|
+
* Uses longest-first greedy matching (important for non-space languages).
|
|
459
|
+
*
|
|
460
|
+
* @param input - Input string
|
|
461
|
+
* @param pos - Current position
|
|
462
|
+
* @returns Token if matched, null otherwise
|
|
463
|
+
*/
|
|
464
|
+
tryProfileKeyword(input, pos) {
|
|
465
|
+
for (const entry of this.profileKeywords) {
|
|
466
|
+
if (input.slice(pos).startsWith(entry.native)) {
|
|
467
|
+
return createToken(
|
|
468
|
+
entry.native,
|
|
469
|
+
"keyword",
|
|
470
|
+
createPosition(pos, pos + entry.native.length),
|
|
471
|
+
entry.normalized
|
|
472
|
+
);
|
|
473
|
+
}
|
|
474
|
+
}
|
|
475
|
+
return null;
|
|
476
|
+
}
|
|
477
|
+
/**
|
|
478
|
+
* Check if the remaining input starts with any known keyword.
|
|
479
|
+
* Useful for non-space languages to detect word boundaries.
|
|
480
|
+
*
|
|
481
|
+
* @param input - Input string
|
|
482
|
+
* @param pos - Current position
|
|
483
|
+
* @returns true if a keyword starts at this position
|
|
484
|
+
*/
|
|
485
|
+
isKeywordStart(input, pos) {
|
|
486
|
+
const remaining = input.slice(pos);
|
|
487
|
+
return this.profileKeywords.some((entry) => remaining.startsWith(entry.native));
|
|
488
|
+
}
|
|
489
|
+
/**
|
|
490
|
+
* Look up a keyword by native word (case-insensitive).
|
|
491
|
+
* O(1) lookup using the keyword map.
|
|
492
|
+
*
|
|
493
|
+
* @param native - Native word to look up
|
|
494
|
+
* @returns KeywordEntry if found, undefined otherwise
|
|
495
|
+
*/
|
|
496
|
+
lookupKeyword(native) {
|
|
497
|
+
return this.profileKeywordMap.get(native.toLowerCase());
|
|
498
|
+
}
|
|
499
|
+
/**
|
|
500
|
+
* Check if a word is a known keyword (case-insensitive).
|
|
501
|
+
* O(1) lookup using the keyword map.
|
|
502
|
+
*
|
|
503
|
+
* @param native - Native word to check
|
|
504
|
+
* @returns true if the word is a keyword
|
|
505
|
+
*/
|
|
506
|
+
isKeyword(native) {
|
|
507
|
+
return this.profileKeywordMap.has(native.toLowerCase());
|
|
508
|
+
}
|
|
509
|
+
/**
|
|
510
|
+
* Set the morphological normalizer for this tokenizer.
|
|
511
|
+
*/
|
|
512
|
+
setNormalizer(normalizer) {
|
|
513
|
+
this.normalizer = normalizer;
|
|
514
|
+
}
|
|
515
|
+
/**
|
|
516
|
+
* Try to normalize a word using the morphological normalizer.
|
|
517
|
+
* Returns null if no normalizer is set or normalization fails.
|
|
518
|
+
*
|
|
519
|
+
* Note: We don't check isNormalizable() here because the individual tokenizers
|
|
520
|
+
* historically called normalize() directly without that check. The normalize()
|
|
521
|
+
* method itself handles returning noChange() for words that can't be normalized.
|
|
522
|
+
*/
|
|
523
|
+
tryNormalize(word) {
|
|
524
|
+
if (!this.normalizer) return null;
|
|
525
|
+
const result = this.normalizer.normalize(word);
|
|
526
|
+
if (result.stem !== word && result.confidence >= 0.7) {
|
|
527
|
+
return result;
|
|
528
|
+
}
|
|
529
|
+
return null;
|
|
530
|
+
}
|
|
531
|
+
/**
|
|
532
|
+
* Try morphological normalization and keyword lookup.
|
|
533
|
+
*
|
|
534
|
+
* If the word can be normalized to a stem that matches a known keyword,
|
|
535
|
+
* returns a keyword token with morphological metadata (stem, stemConfidence).
|
|
536
|
+
*
|
|
537
|
+
* This is the common pattern for handling conjugated verbs across languages:
|
|
538
|
+
* 1. Normalize the word (e.g., "toggled" → "toggle")
|
|
539
|
+
* 2. Look up the stem in the keyword map
|
|
540
|
+
* 3. Create a token with both the original form and stem metadata
|
|
541
|
+
*
|
|
542
|
+
* @param word - The word to normalize and look up
|
|
543
|
+
* @param startPos - Start position for the token
|
|
544
|
+
* @param endPos - End position for the token
|
|
545
|
+
* @returns Token if stem matches a keyword, null otherwise
|
|
546
|
+
*/
|
|
547
|
+
tryMorphKeywordMatch(word, startPos, endPos) {
|
|
548
|
+
const result = this.tryNormalize(word);
|
|
549
|
+
if (!result) return null;
|
|
550
|
+
const stemEntry = this.lookupKeyword(result.stem);
|
|
551
|
+
if (!stemEntry) return null;
|
|
552
|
+
const tokenOptions = {
|
|
553
|
+
normalized: stemEntry.normalized,
|
|
554
|
+
stem: result.stem,
|
|
555
|
+
stemConfidence: result.confidence
|
|
556
|
+
};
|
|
557
|
+
return createToken(word, "keyword", createPosition(startPos, endPos), tokenOptions);
|
|
558
|
+
}
|
|
559
|
+
/**
|
|
560
|
+
* Try to extract a CSS selector at the current position.
|
|
561
|
+
*/
|
|
562
|
+
trySelector(input, pos) {
|
|
563
|
+
const selector = extractCssSelector(input, pos);
|
|
564
|
+
if (selector) {
|
|
565
|
+
return createToken(selector, "selector", createPosition(pos, pos + selector.length));
|
|
566
|
+
}
|
|
567
|
+
return null;
|
|
568
|
+
}
|
|
569
|
+
/**
|
|
570
|
+
* Try to extract an event modifier at the current position.
|
|
571
|
+
* Event modifiers are .once, .debounce(N), .throttle(N), .queue(strategy)
|
|
572
|
+
*/
|
|
573
|
+
tryEventModifier(input, pos) {
|
|
574
|
+
if (input[pos] !== ".") {
|
|
575
|
+
return null;
|
|
576
|
+
}
|
|
577
|
+
const match = input.slice(pos).match(/^\.(?:once|debounce|throttle|queue)(?:\(([^)]+)\))?(?:\s|$|\.)/);
|
|
578
|
+
if (!match) {
|
|
579
|
+
return null;
|
|
580
|
+
}
|
|
581
|
+
const fullMatch = match[0].replace(/(\s|\.)$/, "");
|
|
582
|
+
const modifierName = fullMatch.slice(1).split("(")[0];
|
|
583
|
+
const value = match[1];
|
|
584
|
+
const token = createToken(
|
|
585
|
+
fullMatch,
|
|
586
|
+
"event-modifier",
|
|
587
|
+
createPosition(pos, pos + fullMatch.length)
|
|
588
|
+
);
|
|
589
|
+
return {
|
|
590
|
+
...token,
|
|
591
|
+
metadata: {
|
|
592
|
+
modifierName,
|
|
593
|
+
value: value ? modifierName === "queue" ? value : parseInt(value, 10) : void 0
|
|
594
|
+
}
|
|
595
|
+
};
|
|
596
|
+
}
|
|
597
|
+
/**
|
|
598
|
+
* Try to extract a string literal at the current position.
|
|
599
|
+
*/
|
|
600
|
+
tryString(input, pos) {
|
|
601
|
+
const literal = extractStringLiteral(input, pos);
|
|
602
|
+
if (literal) {
|
|
603
|
+
return createToken(literal, "literal", createPosition(pos, pos + literal.length));
|
|
604
|
+
}
|
|
605
|
+
return null;
|
|
606
|
+
}
|
|
607
|
+
/**
|
|
608
|
+
* Try to extract a number at the current position.
|
|
609
|
+
*/
|
|
610
|
+
tryNumber(input, pos) {
|
|
611
|
+
const number = extractNumber(input, pos);
|
|
612
|
+
if (number) {
|
|
613
|
+
return createToken(number, "literal", createPosition(pos, pos + number.length));
|
|
614
|
+
}
|
|
615
|
+
return null;
|
|
616
|
+
}
|
|
617
|
+
/**
|
|
618
|
+
* Try to match a time unit from a list of patterns.
|
|
619
|
+
*
|
|
620
|
+
* @param input - Input string
|
|
621
|
+
* @param pos - Position after the number
|
|
622
|
+
* @param timeUnits - Array of time unit mappings (native pattern → standard suffix)
|
|
623
|
+
* @param skipWhitespace - Whether to skip whitespace before time unit (default: false)
|
|
624
|
+
* @returns Object with matched suffix and new position, or null if no match
|
|
625
|
+
*/
|
|
626
|
+
tryMatchTimeUnit(input, pos, timeUnits, skipWhitespace = false) {
|
|
627
|
+
let unitPos = pos;
|
|
628
|
+
if (skipWhitespace) {
|
|
629
|
+
while (unitPos < input.length && isWhitespace(input[unitPos])) {
|
|
630
|
+
unitPos++;
|
|
631
|
+
}
|
|
632
|
+
}
|
|
633
|
+
const remaining = input.slice(unitPos);
|
|
634
|
+
for (const unit of timeUnits) {
|
|
635
|
+
const candidate = remaining.slice(0, unit.length);
|
|
636
|
+
const matches = unit.caseInsensitive ? candidate.toLowerCase() === unit.pattern.toLowerCase() : candidate === unit.pattern;
|
|
637
|
+
if (matches) {
|
|
638
|
+
if (unit.notFollowedBy) {
|
|
639
|
+
const nextChar = remaining[unit.length] || "";
|
|
640
|
+
if (nextChar === unit.notFollowedBy) continue;
|
|
641
|
+
}
|
|
642
|
+
if (unit.checkBoundary) {
|
|
643
|
+
const nextChar = remaining[unit.length] || "";
|
|
644
|
+
if (isAsciiIdentifierChar(nextChar)) continue;
|
|
645
|
+
}
|
|
646
|
+
return { suffix: unit.suffix, endPos: unitPos + unit.length };
|
|
647
|
+
}
|
|
648
|
+
}
|
|
649
|
+
return null;
|
|
650
|
+
}
|
|
651
|
+
/**
|
|
652
|
+
* Parse a base number (sign, integer, decimal) without time units.
|
|
653
|
+
* Returns the number string and end position.
|
|
654
|
+
*
|
|
655
|
+
* @param input - Input string
|
|
656
|
+
* @param startPos - Start position
|
|
657
|
+
* @param allowSign - Whether to allow +/- sign (default: true)
|
|
658
|
+
* @returns Object with number string and end position, or null
|
|
659
|
+
*/
|
|
660
|
+
parseBaseNumber(input, startPos, allowSign = true) {
|
|
661
|
+
let pos = startPos;
|
|
662
|
+
let number = "";
|
|
663
|
+
if (allowSign && (input[pos] === "-" || input[pos] === "+")) {
|
|
664
|
+
number += input[pos++];
|
|
665
|
+
}
|
|
666
|
+
if (pos >= input.length || !isDigit(input[pos])) {
|
|
667
|
+
return null;
|
|
668
|
+
}
|
|
669
|
+
while (pos < input.length && isDigit(input[pos])) {
|
|
670
|
+
number += input[pos++];
|
|
671
|
+
}
|
|
672
|
+
if (pos < input.length && input[pos] === ".") {
|
|
673
|
+
number += input[pos++];
|
|
674
|
+
while (pos < input.length && isDigit(input[pos])) {
|
|
675
|
+
number += input[pos++];
|
|
676
|
+
}
|
|
677
|
+
}
|
|
678
|
+
if (!number || number === "-" || number === "+") return null;
|
|
679
|
+
return { number, endPos: pos };
|
|
680
|
+
}
|
|
681
|
+
/**
|
|
682
|
+
* Try to extract a number with native language time units.
|
|
683
|
+
*
|
|
684
|
+
* This is a template method that handles the common pattern:
|
|
685
|
+
* 1. Parse the base number (sign, integer, decimal)
|
|
686
|
+
* 2. Try to match native language time units
|
|
687
|
+
* 3. Fall back to standard time units (ms, s, m, h)
|
|
688
|
+
*
|
|
689
|
+
* @param input - Input string
|
|
690
|
+
* @param pos - Start position
|
|
691
|
+
* @param nativeTimeUnits - Language-specific time unit mappings
|
|
692
|
+
* @param options - Configuration options
|
|
693
|
+
* @returns Token if number found, null otherwise
|
|
694
|
+
*/
|
|
695
|
+
tryNumberWithTimeUnits(input, pos, nativeTimeUnits, options = {}) {
|
|
696
|
+
const { allowSign = true, skipWhitespace = false } = options;
|
|
697
|
+
const baseResult = this.parseBaseNumber(input, pos, allowSign);
|
|
698
|
+
if (!baseResult) return null;
|
|
699
|
+
let { number, endPos } = baseResult;
|
|
700
|
+
const allUnits = [...nativeTimeUnits, ..._BaseTokenizer.STANDARD_TIME_UNITS];
|
|
701
|
+
const timeMatch = this.tryMatchTimeUnit(input, endPos, allUnits, skipWhitespace);
|
|
702
|
+
if (timeMatch) {
|
|
703
|
+
number += timeMatch.suffix;
|
|
704
|
+
endPos = timeMatch.endPos;
|
|
705
|
+
}
|
|
706
|
+
return createToken(number, "literal", createPosition(pos, endPos));
|
|
707
|
+
}
|
|
708
|
+
/**
|
|
709
|
+
* Try to extract a URL at the current position.
|
|
710
|
+
* Handles /path, ./path, ../path, //domain.com, http://, https://
|
|
711
|
+
*/
|
|
712
|
+
tryUrl(input, pos) {
|
|
713
|
+
const url = extractUrl(input, pos);
|
|
714
|
+
if (url) {
|
|
715
|
+
return createToken(url, "url", createPosition(pos, pos + url.length));
|
|
716
|
+
}
|
|
717
|
+
return null;
|
|
718
|
+
}
|
|
719
|
+
/**
|
|
720
|
+
* Try to extract a variable reference (:varname) at the current position.
|
|
721
|
+
* In hyperscript, :x refers to a local variable named x.
|
|
722
|
+
*/
|
|
723
|
+
tryVariableRef(input, pos) {
|
|
724
|
+
if (input[pos] !== ":") return null;
|
|
725
|
+
if (pos + 1 >= input.length) return null;
|
|
726
|
+
if (!isAsciiIdentifierChar(input[pos + 1])) return null;
|
|
727
|
+
let endPos = pos + 1;
|
|
728
|
+
while (endPos < input.length && isAsciiIdentifierChar(input[endPos])) {
|
|
729
|
+
endPos++;
|
|
730
|
+
}
|
|
731
|
+
const varRef = input.slice(pos, endPos);
|
|
732
|
+
return createToken(varRef, "identifier", createPosition(pos, endPos));
|
|
733
|
+
}
|
|
734
|
+
/**
|
|
735
|
+
* Try to extract an operator or punctuation token at the current position.
|
|
736
|
+
* Handles two-character operators (==, !=, etc.) and single-character operators.
|
|
737
|
+
*/
|
|
738
|
+
tryOperator(input, pos) {
|
|
739
|
+
const twoChar = input.slice(pos, pos + 2);
|
|
740
|
+
if (["==", "!=", "<=", ">=", "&&", "||", "->"].includes(twoChar)) {
|
|
741
|
+
return createToken(twoChar, "operator", createPosition(pos, pos + 2));
|
|
742
|
+
}
|
|
743
|
+
const oneChar = input[pos];
|
|
744
|
+
if (["<", ">", "!", "+", "-", "*", "/", "="].includes(oneChar)) {
|
|
745
|
+
return createToken(oneChar, "operator", createPosition(pos, pos + 1));
|
|
746
|
+
}
|
|
747
|
+
if (["(", ")", "{", "}", ",", ";", ":"].includes(oneChar)) {
|
|
748
|
+
return createToken(oneChar, "punctuation", createPosition(pos, pos + 1));
|
|
749
|
+
}
|
|
750
|
+
return null;
|
|
751
|
+
}
|
|
752
|
+
/**
|
|
753
|
+
* Try to match a multi-character particle from a list.
|
|
754
|
+
*
|
|
755
|
+
* Used by languages like Japanese, Korean, and Chinese that have
|
|
756
|
+
* multi-character particles (e.g., Japanese から, まで, より).
|
|
757
|
+
*
|
|
758
|
+
* @param input - Input string
|
|
759
|
+
* @param pos - Current position
|
|
760
|
+
* @param particles - Array of multi-character particles to match
|
|
761
|
+
* @returns Token if matched, null otherwise
|
|
762
|
+
*/
|
|
763
|
+
tryMultiCharParticle(input, pos, particles) {
|
|
764
|
+
for (const particle of particles) {
|
|
765
|
+
if (input.slice(pos, pos + particle.length) === particle) {
|
|
766
|
+
return createToken(particle, "particle", createPosition(pos, pos + particle.length));
|
|
767
|
+
}
|
|
768
|
+
}
|
|
769
|
+
return null;
|
|
770
|
+
}
|
|
771
|
+
};
|
|
772
|
+
/**
|
|
773
|
+
* Configuration for native language time units.
|
|
774
|
+
* Maps patterns to their standard suffix (ms, s, m, h).
|
|
775
|
+
*/
|
|
776
|
+
_BaseTokenizer.STANDARD_TIME_UNITS = [
|
|
777
|
+
{ pattern: "ms", suffix: "ms", length: 2 },
|
|
778
|
+
{ pattern: "s", suffix: "s", length: 1, checkBoundary: true },
|
|
779
|
+
{ pattern: "m", suffix: "m", length: 1, checkBoundary: true, notFollowedBy: "s" },
|
|
780
|
+
{ pattern: "h", suffix: "h", length: 1, checkBoundary: true }
|
|
781
|
+
];
|
|
782
|
+
var BaseTokenizer = _BaseTokenizer;
|
|
783
|
+
|
|
784
|
+
// src/tokenizers/morphology/types.ts
|
|
785
|
+
function noChange(word) {
|
|
786
|
+
return { stem: word, confidence: 1 };
|
|
787
|
+
}
|
|
788
|
+
function normalized(stem, confidence, metadata) {
|
|
789
|
+
if (metadata) {
|
|
790
|
+
return { stem, confidence, metadata };
|
|
791
|
+
}
|
|
792
|
+
return { stem, confidence };
|
|
793
|
+
}
|
|
794
|
+
|
|
795
|
+
// src/tokenizers/morphology/japanese-normalizer.ts
|
|
796
|
+
var JAPANESE_SUFFIX_RULES = [
|
|
797
|
+
// Conditional forms - very common for event handlers (longest first)
|
|
798
|
+
// したら/すると/すれば are する verb conditionals
|
|
799
|
+
{ pattern: "\u3057\u305F\u3089", confidence: 0.88, conjugationType: "conditional-tara", minStemLength: 2 },
|
|
800
|
+
{ pattern: "\u3059\u308B\u3068", confidence: 0.88, conjugationType: "conditional-to", minStemLength: 2 },
|
|
801
|
+
{ pattern: "\u3059\u308C\u3070", confidence: 0.85, conjugationType: "conditional-ba", minStemLength: 2 },
|
|
802
|
+
// たら/れば are regular verb conditionals
|
|
803
|
+
{ pattern: "\u305F\u3089", confidence: 0.85, conjugationType: "conditional-tara", minStemLength: 2 },
|
|
804
|
+
{ pattern: "\u308C\u3070", confidence: 0.82, conjugationType: "conditional-ba", minStemLength: 2 },
|
|
805
|
+
// Compound forms (longest first)
|
|
806
|
+
{ pattern: "\u3066\u3044\u307E\u3057\u305F", confidence: 0.82, conjugationType: "past", minStemLength: 2 },
|
|
807
|
+
{ pattern: "\u3066\u3044\u307E\u3059", confidence: 0.85, conjugationType: "progressive", minStemLength: 2 },
|
|
808
|
+
{ pattern: "\u3066\u304F\u3060\u3055\u3044", confidence: 0.85, conjugationType: "request", minStemLength: 2 },
|
|
809
|
+
{ pattern: "\u3067\u304F\u3060\u3055\u3044", confidence: 0.85, conjugationType: "request", minStemLength: 2 },
|
|
810
|
+
{ pattern: "\u3066\u3044\u308B", confidence: 0.85, conjugationType: "progressive", minStemLength: 2 },
|
|
811
|
+
{ pattern: "\u3066\u304A\u304F", confidence: 0.82, conjugationType: "progressive", minStemLength: 2 },
|
|
812
|
+
{ pattern: "\u3066\u307F\u308B", confidence: 0.82, conjugationType: "progressive", minStemLength: 2 },
|
|
813
|
+
{ pattern: "\u3066\u3042\u308B", confidence: 0.82, conjugationType: "progressive", minStemLength: 2 },
|
|
814
|
+
// Casual request forms
|
|
815
|
+
{ pattern: "\u3066\u304F\u308C", confidence: 0.8, conjugationType: "casual-request", minStemLength: 2 },
|
|
816
|
+
{ pattern: "\u3067\u304F\u308C", confidence: 0.8, conjugationType: "casual-request", minStemLength: 2 },
|
|
817
|
+
// Contracted/colloquial forms (ちゃう/じゃう = てしまう/でしまう)
|
|
818
|
+
{ pattern: "\u3061\u3083\u3063\u305F", confidence: 0.82, conjugationType: "contracted-past", minStemLength: 2 },
|
|
819
|
+
{ pattern: "\u3058\u3083\u3063\u305F", confidence: 0.82, conjugationType: "contracted-past", minStemLength: 2 },
|
|
820
|
+
{ pattern: "\u3061\u3083\u3046", confidence: 0.82, conjugationType: "contracted", minStemLength: 2 },
|
|
821
|
+
{ pattern: "\u3058\u3083\u3046", confidence: 0.82, conjugationType: "contracted", minStemLength: 2 },
|
|
822
|
+
// Polite forms
|
|
823
|
+
{ pattern: "\u307E\u3057\u305F", confidence: 0.85, conjugationType: "past", minStemLength: 2 },
|
|
824
|
+
{ pattern: "\u307E\u305B\u3093", confidence: 0.85, conjugationType: "negative", minStemLength: 2 },
|
|
825
|
+
{ pattern: "\u307E\u3059", confidence: 0.85, conjugationType: "polite", minStemLength: 2 },
|
|
826
|
+
// て/た forms (very common)
|
|
827
|
+
{ pattern: "\u3066", confidence: 0.85, conjugationType: "te-form", minStemLength: 2 },
|
|
828
|
+
{ pattern: "\u305F", confidence: 0.85, conjugationType: "past", minStemLength: 2 },
|
|
829
|
+
// Negative forms
|
|
830
|
+
{ pattern: "\u306A\u3044", confidence: 0.82, conjugationType: "negative", minStemLength: 2 },
|
|
831
|
+
{ pattern: "\u306A\u304B\u3063\u305F", confidence: 0.82, conjugationType: "past", minStemLength: 2 },
|
|
832
|
+
// Potential forms
|
|
833
|
+
{ pattern: "\u3089\u308C\u308B", confidence: 0.8, conjugationType: "potential", minStemLength: 2 },
|
|
834
|
+
{ pattern: "\u308C\u308B", confidence: 0.78, conjugationType: "potential", minStemLength: 2 },
|
|
835
|
+
// Passive forms
|
|
836
|
+
{ pattern: "\u3089\u308C\u305F", confidence: 0.8, conjugationType: "passive", minStemLength: 2 },
|
|
837
|
+
// Causative forms
|
|
838
|
+
{ pattern: "\u3055\u305B\u308B", confidence: 0.8, conjugationType: "causative", minStemLength: 2 },
|
|
839
|
+
{ pattern: "\u305B\u308B", confidence: 0.78, conjugationType: "causative", minStemLength: 2 },
|
|
840
|
+
// Volitional forms
|
|
841
|
+
{ pattern: "\u3088\u3046", confidence: 0.8, conjugationType: "volitional", minStemLength: 2 },
|
|
842
|
+
// Dictionary form ending (る-verbs) - lower confidence due to ambiguity
|
|
843
|
+
{ pattern: "\u308B", confidence: 0.75, conjugationType: "dictionary", minStemLength: 3 }
|
|
844
|
+
];
|
|
845
|
+
var SURU_PATTERNS = [
|
|
846
|
+
// Conditional forms (most important for native idioms)
|
|
847
|
+
{ pattern: "\u3057\u305F\u3089", confidence: 0.88, conjugationType: "conditional-tara" },
|
|
848
|
+
{ pattern: "\u3059\u308B\u3068", confidence: 0.88, conjugationType: "conditional-to" },
|
|
849
|
+
{ pattern: "\u3059\u308C\u3070", confidence: 0.85, conjugationType: "conditional-ba" },
|
|
850
|
+
// Progressive forms
|
|
851
|
+
{ pattern: "\u3057\u3066\u3044\u307E\u3059", confidence: 0.85, conjugationType: "progressive" },
|
|
852
|
+
{ pattern: "\u3057\u3066\u3044\u308B", confidence: 0.85, conjugationType: "progressive" },
|
|
853
|
+
// Other forms
|
|
854
|
+
{ pattern: "\u3057\u307E\u3057\u305F", confidence: 0.85, conjugationType: "past" },
|
|
855
|
+
{ pattern: "\u3057\u307E\u3059", confidence: 0.85, conjugationType: "polite" },
|
|
856
|
+
{ pattern: "\u3057\u306A\u3044", confidence: 0.82, conjugationType: "negative" },
|
|
857
|
+
{ pattern: "\u3057\u3066", confidence: 0.85, conjugationType: "te-form" },
|
|
858
|
+
{ pattern: "\u3057\u305F", confidence: 0.85, conjugationType: "past" },
|
|
859
|
+
{ pattern: "\u3059\u308B", confidence: 0.88, conjugationType: "dictionary" }
|
|
860
|
+
];
|
|
861
|
+
function isHiragana(char) {
|
|
862
|
+
const code = char.charCodeAt(0);
|
|
863
|
+
return code >= 12352 && code <= 12447;
|
|
864
|
+
}
|
|
865
|
+
function isKatakana(char) {
|
|
866
|
+
const code = char.charCodeAt(0);
|
|
867
|
+
return code >= 12448 && code <= 12543;
|
|
868
|
+
}
|
|
869
|
+
function isKanji(char) {
|
|
870
|
+
const code = char.charCodeAt(0);
|
|
871
|
+
return code >= 19968 && code <= 40959 || code >= 13312 && code <= 19903;
|
|
872
|
+
}
|
|
873
|
+
function containsJapanese(word) {
|
|
874
|
+
for (const char of word) {
|
|
875
|
+
if (isHiragana(char) || isKatakana(char) || isKanji(char)) {
|
|
876
|
+
return true;
|
|
877
|
+
}
|
|
878
|
+
}
|
|
879
|
+
return false;
|
|
880
|
+
}
|
|
881
|
+
var JapaneseMorphologicalNormalizer = class {
|
|
882
|
+
constructor() {
|
|
883
|
+
this.language = "ja";
|
|
884
|
+
}
|
|
885
|
+
/**
|
|
886
|
+
* Check if a word might be a Japanese verb that can be normalized.
|
|
887
|
+
*/
|
|
888
|
+
isNormalizable(word) {
|
|
889
|
+
if (!containsJapanese(word)) return false;
|
|
890
|
+
if (word.length < 2) return false;
|
|
891
|
+
const lastChar = word[word.length - 1];
|
|
892
|
+
return isHiragana(lastChar);
|
|
893
|
+
}
|
|
894
|
+
/**
|
|
895
|
+
* Normalize a Japanese word to its stem form.
|
|
896
|
+
*/
|
|
897
|
+
normalize(word) {
|
|
898
|
+
const compoundResult = this.normalizeCompound(word);
|
|
899
|
+
if (compoundResult) return compoundResult;
|
|
900
|
+
const suruResult = this.trySuruNormalization(word);
|
|
901
|
+
if (suruResult) return suruResult;
|
|
902
|
+
for (const rule of JAPANESE_SUFFIX_RULES) {
|
|
903
|
+
if (word.endsWith(rule.pattern)) {
|
|
904
|
+
const stem = word.slice(0, -rule.pattern.length);
|
|
905
|
+
const minLength = rule.minStemLength ?? 2;
|
|
906
|
+
if (stem.length < minLength) continue;
|
|
907
|
+
const metadata = {
|
|
908
|
+
removedSuffixes: [rule.pattern]
|
|
909
|
+
};
|
|
910
|
+
if (rule.conjugationType) {
|
|
911
|
+
metadata.conjugationType = rule.conjugationType;
|
|
912
|
+
}
|
|
913
|
+
return normalized(stem, rule.confidence, metadata);
|
|
914
|
+
}
|
|
915
|
+
}
|
|
916
|
+
return noChange(word);
|
|
917
|
+
}
|
|
918
|
+
/**
|
|
919
|
+
* Try to normalize a する verb.
|
|
920
|
+
*/
|
|
921
|
+
trySuruNormalization(word) {
|
|
922
|
+
for (const pattern of SURU_PATTERNS) {
|
|
923
|
+
if (word.endsWith(pattern.pattern)) {
|
|
924
|
+
const stem = word.slice(0, -pattern.pattern.length);
|
|
925
|
+
if (stem.length < 1) continue;
|
|
926
|
+
return normalized(stem, pattern.confidence, {
|
|
927
|
+
removedSuffixes: [pattern.pattern],
|
|
928
|
+
conjugationType: pattern.conjugationType,
|
|
929
|
+
originalForm: "suru-verb"
|
|
930
|
+
});
|
|
931
|
+
}
|
|
932
|
+
}
|
|
933
|
+
return null;
|
|
934
|
+
}
|
|
935
|
+
/**
|
|
936
|
+
* Normalize compound conjugations (multi-layer suffixes).
|
|
937
|
+
* These are combinations like ていなかった (was not doing), でいない (is not doing).
|
|
938
|
+
* Handles cases that single-suffix rules miss.
|
|
939
|
+
*/
|
|
940
|
+
normalizeCompound(word) {
|
|
941
|
+
const compoundPatterns = [
|
|
942
|
+
// Progressive negative past forms
|
|
943
|
+
{
|
|
944
|
+
pattern: "\u3066\u3044\u306A\u304B\u3063\u305F",
|
|
945
|
+
suffixes: ["\u3066", "\u3044", "\u306A\u304B\u3063\u305F"],
|
|
946
|
+
confidence: 0.8,
|
|
947
|
+
minStemLength: 2
|
|
948
|
+
},
|
|
949
|
+
{
|
|
950
|
+
pattern: "\u3067\u3044\u306A\u304B\u3063\u305F",
|
|
951
|
+
suffixes: ["\u3067", "\u3044", "\u306A\u304B\u3063\u305F"],
|
|
952
|
+
confidence: 0.8,
|
|
953
|
+
minStemLength: 2
|
|
954
|
+
},
|
|
955
|
+
// Progressive negative forms
|
|
956
|
+
{ pattern: "\u3066\u3044\u306A\u3044", suffixes: ["\u3066", "\u3044", "\u306A\u3044"], confidence: 0.82, minStemLength: 2 },
|
|
957
|
+
{ pattern: "\u3067\u3044\u306A\u3044", suffixes: ["\u3067", "\u3044", "\u306A\u3044"], confidence: 0.82, minStemLength: 2 },
|
|
958
|
+
// Progressive past forms
|
|
959
|
+
{ pattern: "\u3066\u3044\u305F", suffixes: ["\u3066", "\u3044", "\u305F"], confidence: 0.85, minStemLength: 2 },
|
|
960
|
+
{ pattern: "\u3067\u3044\u305F", suffixes: ["\u3067", "\u3044", "\u305F"], confidence: 0.85, minStemLength: 2 }
|
|
961
|
+
];
|
|
962
|
+
for (const { pattern, suffixes, confidence, minStemLength } of compoundPatterns) {
|
|
963
|
+
if (word.endsWith(pattern)) {
|
|
964
|
+
const stem = word.slice(0, -pattern.length);
|
|
965
|
+
if (stem.length < minStemLength) continue;
|
|
966
|
+
return normalized(stem, confidence, {
|
|
967
|
+
removedSuffixes: suffixes,
|
|
968
|
+
conjugationType: "compound"
|
|
969
|
+
});
|
|
970
|
+
}
|
|
971
|
+
}
|
|
972
|
+
return null;
|
|
973
|
+
}
|
|
974
|
+
};
|
|
975
|
+
var japaneseMorphologicalNormalizer = new JapaneseMorphologicalNormalizer();
|
|
976
|
+
|
|
977
|
+
// src/generators/profiles/japanese.ts
|
|
978
|
+
var japaneseProfile = {
|
|
979
|
+
code: "ja",
|
|
980
|
+
name: "Japanese",
|
|
981
|
+
nativeName: "\u65E5\u672C\u8A9E",
|
|
982
|
+
direction: "ltr",
|
|
983
|
+
wordOrder: "SOV",
|
|
984
|
+
markingStrategy: "particle",
|
|
985
|
+
usesSpaces: false,
|
|
986
|
+
// Japanese uses verb stem/masu-stem form, no clear infinitive/imperative distinction
|
|
987
|
+
// for UI commands. Uses katakana loanwords (トグル) or native stems (切り替え)
|
|
988
|
+
defaultVerbForm: "base",
|
|
989
|
+
verb: {
|
|
990
|
+
position: "end",
|
|
991
|
+
suffixes: ["\u308B", "\u3066", "\u305F", "\u307E\u3059", "\u306A\u3044"],
|
|
992
|
+
subjectDrop: true
|
|
993
|
+
},
|
|
994
|
+
references: {
|
|
995
|
+
me: "\u81EA\u5206",
|
|
996
|
+
// "self" - in hyperscript context, refers to current element
|
|
997
|
+
it: "\u305D\u308C",
|
|
998
|
+
// "it"
|
|
999
|
+
you: "\u3042\u306A\u305F",
|
|
1000
|
+
// "you"
|
|
1001
|
+
result: "\u7D50\u679C",
|
|
1002
|
+
event: "\u30A4\u30D9\u30F3\u30C8",
|
|
1003
|
+
target: "\u30BF\u30FC\u30B2\u30C3\u30C8",
|
|
1004
|
+
body: "\u30DC\u30C7\u30A3"
|
|
1005
|
+
},
|
|
1006
|
+
possessive: {
|
|
1007
|
+
marker: "\u306E",
|
|
1008
|
+
markerPosition: "between",
|
|
1009
|
+
// In Japanese: 自分の value (jibun no value) = "my value"
|
|
1010
|
+
keywords: {
|
|
1011
|
+
\u79C1\u306E: "me",
|
|
1012
|
+
// watashi no (my)
|
|
1013
|
+
\u3042\u306A\u305F\u306E: "you",
|
|
1014
|
+
// anata no (your)
|
|
1015
|
+
\u305D\u306E: "it"
|
|
1016
|
+
// sono (its)
|
|
1017
|
+
}
|
|
1018
|
+
},
|
|
1019
|
+
roleMarkers: {
|
|
1020
|
+
patient: { primary: "\u3092", position: "after" },
|
|
1021
|
+
destination: { primary: "\u306B", alternatives: ["\u3078", "\u3067"], position: "after" },
|
|
1022
|
+
source: { primary: "\u304B\u3089", position: "after" },
|
|
1023
|
+
style: { primary: "\u3067", position: "after" },
|
|
1024
|
+
event: { primary: "\u3092", position: "after" }
|
|
1025
|
+
// Event as object marker
|
|
1026
|
+
// Possession marker for "X's Y" patterns
|
|
1027
|
+
// Note: の is used between target and patient: #button の .active
|
|
1028
|
+
},
|
|
1029
|
+
keywords: {
|
|
1030
|
+
// Class/Attribute operations
|
|
1031
|
+
toggle: {
|
|
1032
|
+
primary: "\u5207\u308A\u66FF\u3048",
|
|
1033
|
+
alternatives: ["\u5207\u308A\u66FF\u3048\u308B", "\u30C8\u30B0\u30EB", "\u30C8\u30B0\u30EB\u3059\u308B"],
|
|
1034
|
+
normalized: "toggle"
|
|
1035
|
+
},
|
|
1036
|
+
add: { primary: "\u8FFD\u52A0", alternatives: ["\u8FFD\u52A0\u3059\u308B", "\u52A0\u3048\u308B"], normalized: "add" },
|
|
1037
|
+
remove: { primary: "\u524A\u9664", alternatives: ["\u524A\u9664\u3059\u308B", "\u53D6\u308A\u9664\u304F"], normalized: "remove" },
|
|
1038
|
+
// Content operations
|
|
1039
|
+
put: { primary: "\u7F6E\u304F", alternatives: ["\u5165\u308C\u308B", "\u30BB\u30C3\u30C8"], normalized: "put" },
|
|
1040
|
+
append: { primary: "\u672B\u5C3E\u8FFD\u52A0", alternatives: ["\u672B\u5C3E\u306B\u8FFD\u52A0", "\u30A2\u30DA\u30F3\u30C9"], normalized: "append" },
|
|
1041
|
+
prepend: {
|
|
1042
|
+
primary: "\u5148\u982D\u8FFD\u52A0",
|
|
1043
|
+
alternatives: ["\u5148\u982D\u306B\u8FFD\u52A0", "\u30D7\u30EA\u30DA\u30F3\u30C9"],
|
|
1044
|
+
normalized: "prepend"
|
|
1045
|
+
},
|
|
1046
|
+
take: { primary: "\u53D6\u308B", alternatives: ["\u53D6\u5F97"], normalized: "take" },
|
|
1047
|
+
make: { primary: "\u4F5C\u308B", alternatives: ["\u4F5C\u6210"], normalized: "make" },
|
|
1048
|
+
clone: { primary: "\u8907\u88FD", alternatives: ["\u30AF\u30ED\u30FC\u30F3"], normalized: "clone" },
|
|
1049
|
+
swap: { primary: "\u4EA4\u63DB", alternatives: ["\u30B9\u30EF\u30C3\u30D7", "\u5165\u308C\u66FF\u3048"], normalized: "swap" },
|
|
1050
|
+
morph: { primary: "\u5909\u5F62", alternatives: ["\u30E2\u30FC\u30D5", "\u5909\u63DB"], normalized: "morph" },
|
|
1051
|
+
// Variable operations
|
|
1052
|
+
set: { primary: "\u8A2D\u5B9A", alternatives: ["\u8A2D\u5B9A\u3059\u308B", "\u30BB\u30C3\u30C8"], normalized: "set" },
|
|
1053
|
+
get: { primary: "\u53D6\u5F97", alternatives: ["\u53D6\u5F97\u3059\u308B", "\u30B2\u30C3\u30C8"], normalized: "get" },
|
|
1054
|
+
increment: {
|
|
1055
|
+
primary: "\u5897\u52A0",
|
|
1056
|
+
alternatives: ["\u5897\u3084\u3059", "\u30A4\u30F3\u30AF\u30EA\u30E1\u30F3\u30C8"],
|
|
1057
|
+
normalized: "increment"
|
|
1058
|
+
},
|
|
1059
|
+
decrement: {
|
|
1060
|
+
primary: "\u6E1B\u5C11",
|
|
1061
|
+
alternatives: ["\u6E1B\u3089\u3059", "\u30C7\u30AF\u30EA\u30E1\u30F3\u30C8"],
|
|
1062
|
+
normalized: "decrement"
|
|
1063
|
+
},
|
|
1064
|
+
log: { primary: "\u8A18\u9332", alternatives: ["\u30ED\u30B0", "\u51FA\u529B"], normalized: "log" },
|
|
1065
|
+
// Visibility
|
|
1066
|
+
show: { primary: "\u8868\u793A", alternatives: ["\u8868\u793A\u3059\u308B", "\u898B\u305B\u308B"], normalized: "show" },
|
|
1067
|
+
hide: { primary: "\u96A0\u3059", alternatives: ["\u975E\u8868\u793A", "\u975E\u8868\u793A\u306B\u3059\u308B"], normalized: "hide" },
|
|
1068
|
+
transition: {
|
|
1069
|
+
primary: "\u9077\u79FB",
|
|
1070
|
+
alternatives: ["\u30C8\u30E9\u30F3\u30B8\u30B7\u30E7\u30F3", "\u30A2\u30CB\u30E1\u30FC\u30B7\u30E7\u30F3"],
|
|
1071
|
+
normalized: "transition"
|
|
1072
|
+
},
|
|
1073
|
+
// Events
|
|
1074
|
+
on: { primary: "\u3067", alternatives: ["\u6642", "\u3068\u304D"], normalized: "on" },
|
|
1075
|
+
trigger: { primary: "\u5F15\u304D\u91D1", alternatives: ["\u767A\u706B", "\u30C8\u30EA\u30AC\u30FC"], normalized: "trigger" },
|
|
1076
|
+
send: { primary: "\u9001\u308B", alternatives: ["\u9001\u4FE1"], normalized: "send" },
|
|
1077
|
+
// DOM focus
|
|
1078
|
+
focus: { primary: "\u30D5\u30A9\u30FC\u30AB\u30B9", alternatives: ["\u96C6\u4E2D"], normalized: "focus" },
|
|
1079
|
+
blur: { primary: "\u307C\u304B\u3057", alternatives: ["\u30D5\u30A9\u30FC\u30AB\u30B9\u89E3\u9664"], normalized: "blur" },
|
|
1080
|
+
// Navigation
|
|
1081
|
+
go: { primary: "\u79FB\u52D5", alternatives: ["\u884C\u304F", "\u30CA\u30D3\u30B2\u30FC\u30C8"], normalized: "go" },
|
|
1082
|
+
// Async
|
|
1083
|
+
wait: { primary: "\u5F85\u3064", alternatives: ["\u5F85\u6A5F"], normalized: "wait" },
|
|
1084
|
+
fetch: { primary: "\u53D6\u5F97", alternatives: ["\u30D5\u30A7\u30C3\u30C1"], normalized: "fetch" },
|
|
1085
|
+
settle: { primary: "\u5B89\u5B9A", alternatives: ["\u843D\u3061\u7740\u304F"], normalized: "settle" },
|
|
1086
|
+
// Control flow
|
|
1087
|
+
if: { primary: "\u3082\u3057", alternatives: ["\u6761\u4EF6"], normalized: "if" },
|
|
1088
|
+
when: { primary: "\u3068\u304D", normalized: "when" },
|
|
1089
|
+
where: { primary: "\u3069\u3053", normalized: "where" },
|
|
1090
|
+
else: { primary: "\u305D\u3046\u3067\u306A\u3051\u308C\u3070", alternatives: ["\u305D\u308C\u4EE5\u5916"], normalized: "else" },
|
|
1091
|
+
repeat: { primary: "\u7E70\u308A\u8FD4\u3057", alternatives: ["\u7E70\u308A\u8FD4\u3059", "\u30EA\u30D4\u30FC\u30C8"], normalized: "repeat" },
|
|
1092
|
+
for: { primary: "\u305F\u3081\u306B", alternatives: ["\u5404"], normalized: "for" },
|
|
1093
|
+
while: { primary: "\u306E\u9593", alternatives: ["\u9593"], normalized: "while" },
|
|
1094
|
+
continue: { primary: "\u7D9A\u3051\u308B", alternatives: ["\u7D99\u7D9A"], normalized: "continue" },
|
|
1095
|
+
halt: { primary: "\u505C\u6B62", alternatives: ["\u6B62\u3081\u308B", "\u30CF\u30EB\u30C8"], normalized: "halt" },
|
|
1096
|
+
throw: { primary: "\u6295\u3052\u308B", alternatives: ["\u30B9\u30ED\u30FC"], normalized: "throw" },
|
|
1097
|
+
call: { primary: "\u547C\u3073\u51FA\u3057", alternatives: ["\u30B3\u30FC\u30EB", "\u547C\u3076"], normalized: "call" },
|
|
1098
|
+
return: { primary: "\u623B\u308B", alternatives: ["\u8FD4\u3059", "\u30EA\u30BF\u30FC\u30F3"], normalized: "return" },
|
|
1099
|
+
then: { primary: "\u305D\u308C\u304B\u3089", alternatives: ["\u6B21\u306B", "\u305D\u3057\u3066"], normalized: "then" },
|
|
1100
|
+
and: { primary: "\u305D\u3057\u3066", alternatives: ["\u3068", "\u307E\u305F"], normalized: "and" },
|
|
1101
|
+
end: { primary: "\u7D42\u308F\u308A", alternatives: ["\u7D42\u4E86", "\u304A\u308F\u308A"], normalized: "end" },
|
|
1102
|
+
// Advanced
|
|
1103
|
+
js: { primary: "JS\u5B9F\u884C", alternatives: ["js"], normalized: "js" },
|
|
1104
|
+
async: { primary: "\u975E\u540C\u671F", alternatives: ["\u30A2\u30B7\u30F3\u30AF"], normalized: "async" },
|
|
1105
|
+
tell: { primary: "\u4F1D\u3048\u308B", alternatives: ["\u30C6\u30EB"], normalized: "tell" },
|
|
1106
|
+
default: { primary: "\u65E2\u5B9A", alternatives: ["\u30C7\u30D5\u30A9\u30EB\u30C8"], normalized: "default" },
|
|
1107
|
+
init: { primary: "\u521D\u671F\u5316", alternatives: ["\u30A4\u30CB\u30C3\u30C8"], normalized: "init" },
|
|
1108
|
+
behavior: { primary: "\u632F\u308B\u821E\u3044", alternatives: ["\u30D3\u30D8\u30A4\u30D3\u30A2"], normalized: "behavior" },
|
|
1109
|
+
install: { primary: "\u30A4\u30F3\u30B9\u30C8\u30FC\u30EB", alternatives: ["\u5C0E\u5165"], normalized: "install" },
|
|
1110
|
+
measure: { primary: "\u6E2C\u5B9A", alternatives: ["\u8A08\u6E2C", "\u30E1\u30B8\u30E3\u30FC"], normalized: "measure" },
|
|
1111
|
+
// Modifiers
|
|
1112
|
+
into: { primary: "\u3078", alternatives: ["\u306B"], normalized: "into" },
|
|
1113
|
+
before: { primary: "\u524D\u306B", alternatives: ["\u524D"], normalized: "before" },
|
|
1114
|
+
after: { primary: "\u5F8C\u306B", alternatives: ["\u5F8C"], normalized: "after" },
|
|
1115
|
+
// Event modifiers (for repeat until event)
|
|
1116
|
+
until: { primary: "\u307E\u3067", alternatives: ["\u8FC4"], normalized: "until" },
|
|
1117
|
+
event: { primary: "\u30A4\u30D9\u30F3\u30C8", alternatives: ["\u4E8B\u8C61"], normalized: "event" },
|
|
1118
|
+
from: { primary: "\u304B\u3089", normalized: "from" }
|
|
1119
|
+
},
|
|
1120
|
+
tokenization: {
|
|
1121
|
+
particles: ["\u3092", "\u306B", "\u3067", "\u304B\u3089", "\u306E", "\u304C", "\u306F", "\u3082", "\u3078", "\u3068"],
|
|
1122
|
+
boundaryStrategy: "particle"
|
|
1123
|
+
}
|
|
1124
|
+
};
|
|
1125
|
+
|
|
1126
|
+
// src/tokenizers/japanese.ts
|
|
1127
|
+
var isHiragana2 = createUnicodeRangeClassifier([[12352, 12447]]);
|
|
1128
|
+
var isKatakana2 = createUnicodeRangeClassifier([[12448, 12543]]);
|
|
1129
|
+
var isKanji2 = createUnicodeRangeClassifier([
|
|
1130
|
+
[19968, 40959],
|
|
1131
|
+
// CJK Unified Ideographs
|
|
1132
|
+
[13312, 19903]
|
|
1133
|
+
// CJK Unified Ideographs Extension A
|
|
1134
|
+
]);
|
|
1135
|
+
var isJapanese = combineClassifiers(isHiragana2, isKatakana2, isKanji2);
|
|
1136
|
+
var PARTICLES = /* @__PURE__ */ new Set([
|
|
1137
|
+
"\u3092",
|
|
1138
|
+
// wo - object marker
|
|
1139
|
+
"\u306B",
|
|
1140
|
+
// ni - destination, time
|
|
1141
|
+
"\u3067",
|
|
1142
|
+
// de - location of action, means
|
|
1143
|
+
"\u304B\u3089",
|
|
1144
|
+
// kara - from
|
|
1145
|
+
"\u307E\u3067",
|
|
1146
|
+
// made - until
|
|
1147
|
+
"\u3078",
|
|
1148
|
+
// e - direction
|
|
1149
|
+
"\u3068",
|
|
1150
|
+
// to - and, with
|
|
1151
|
+
"\u306E",
|
|
1152
|
+
// no - possessive
|
|
1153
|
+
"\u304C",
|
|
1154
|
+
// ga - subject marker
|
|
1155
|
+
"\u306F",
|
|
1156
|
+
// wa - topic marker
|
|
1157
|
+
"\u3082",
|
|
1158
|
+
// mo - also
|
|
1159
|
+
"\u3088\u308A"
|
|
1160
|
+
// yori - than, from
|
|
1161
|
+
]);
|
|
1162
|
+
var SINGLE_CHAR_PARTICLES = /* @__PURE__ */ new Set(["\u3092", "\u306B", "\u3067", "\u3078", "\u3068", "\u306E", "\u304C", "\u306F", "\u3082"]);
|
|
1163
|
+
var MULTI_CHAR_PARTICLES = ["\u304B\u3089", "\u307E\u3067", "\u3088\u308A"];
|
|
1164
|
+
var PARTICLE_ROLES = /* @__PURE__ */ new Map([
|
|
1165
|
+
["\u3092", { role: "patient", confidence: 0.95, description: "object marker" }],
|
|
1166
|
+
["\u306B", { role: "destination", confidence: 0.85, description: "destination/time marker" }],
|
|
1167
|
+
["\u3067", { role: "manner", confidence: 0.88, description: "means/location marker" }],
|
|
1168
|
+
["\u304B\u3089", { role: "source", confidence: 0.9, description: "from/source marker" }],
|
|
1169
|
+
["\u307E\u3067", { role: "destination", confidence: 0.75, description: "until/boundary marker" }],
|
|
1170
|
+
["\u3078", { role: "destination", confidence: 0.9, description: "direction marker" }],
|
|
1171
|
+
["\u3068", { role: "style", confidence: 0.7, description: "with/and marker" }],
|
|
1172
|
+
["\u306E", { role: "patient", confidence: 0.6, description: "possessive marker" }],
|
|
1173
|
+
["\u304C", { role: "agent", confidence: 0.85, description: "subject marker" }],
|
|
1174
|
+
["\u306F", { role: "agent", confidence: 0.75, description: "topic marker" }],
|
|
1175
|
+
["\u3082", { role: "patient", confidence: 0.65, description: "also/too marker" }],
|
|
1176
|
+
["\u3088\u308A", { role: "source", confidence: 0.85, description: "from/than marker" }]
|
|
1177
|
+
]);
|
|
1178
|
+
var JAPANESE_EXTRAS = [
|
|
1179
|
+
// Values/Literals
|
|
1180
|
+
{ native: "\u771F", normalized: "true" },
|
|
1181
|
+
{ native: "\u507D", normalized: "false" },
|
|
1182
|
+
{ native: "\u30CC\u30EB", normalized: "null" },
|
|
1183
|
+
{ native: "\u672A\u5B9A\u7FA9", normalized: "undefined" },
|
|
1184
|
+
// Positional
|
|
1185
|
+
{ native: "\u6700\u521D", normalized: "first" },
|
|
1186
|
+
{ native: "\u6700\u5F8C", normalized: "last" },
|
|
1187
|
+
{ native: "\u6B21", normalized: "next" },
|
|
1188
|
+
{ native: "\u524D", normalized: "previous" },
|
|
1189
|
+
{ native: "\u6700\u3082\u8FD1\u3044", normalized: "closest" },
|
|
1190
|
+
{ native: "\u89AA", normalized: "parent" },
|
|
1191
|
+
// Events
|
|
1192
|
+
{ native: "\u30AF\u30EA\u30C3\u30AF", normalized: "click" },
|
|
1193
|
+
{ native: "\u5909\u66F4", normalized: "change" },
|
|
1194
|
+
{ native: "\u9001\u4FE1", normalized: "submit" },
|
|
1195
|
+
{ native: "\u5165\u529B", normalized: "input" },
|
|
1196
|
+
{ native: "\u30ED\u30FC\u30C9", normalized: "load" },
|
|
1197
|
+
{ native: "\u30B9\u30AF\u30ED\u30FC\u30EB", normalized: "scroll" },
|
|
1198
|
+
{ native: "\u30AD\u30FC\u30C0\u30A6\u30F3", normalized: "keydown" },
|
|
1199
|
+
{ native: "\u30AD\u30FC\u30A2\u30C3\u30D7", normalized: "keyup" },
|
|
1200
|
+
{ native: "\u30DE\u30A6\u30B9\u30AA\u30FC\u30D0\u30FC", normalized: "mouseover" },
|
|
1201
|
+
{ native: "\u30DE\u30A6\u30B9\u30A2\u30A6\u30C8", normalized: "mouseout" },
|
|
1202
|
+
{ native: "\u30D6\u30E9\u30FC", normalized: "blur" },
|
|
1203
|
+
// References (additional forms)
|
|
1204
|
+
{ native: "\u79C1", normalized: "me" },
|
|
1205
|
+
{ native: "\u79C1\u306E", normalized: "my" },
|
|
1206
|
+
{ native: "\u305D\u306E", normalized: "its" },
|
|
1207
|
+
// Note: Attached particle forms (を切り替え, を追加, etc.) are intentionally NOT included
|
|
1208
|
+
// because they would cause ambiguous parsing. The separate particle + verb pattern
|
|
1209
|
+
// (を + 切り替え) is preferred for consistent semantic analysis.
|
|
1210
|
+
// Conditional event forms
|
|
1211
|
+
{ native: "\u3057\u305F\u3089", normalized: "on" },
|
|
1212
|
+
{ native: "\u3059\u308B\u3068", normalized: "on" },
|
|
1213
|
+
{ native: "\u6642\u306B", normalized: "on" },
|
|
1214
|
+
// Control flow helpers
|
|
1215
|
+
{ native: "\u3082\u3057", normalized: "if" },
|
|
1216
|
+
// Starts with particle も, needs explicit entry
|
|
1217
|
+
{ native: "\u306A\u3089\u3070", normalized: "then" },
|
|
1218
|
+
{ native: "\u306A\u3089", normalized: "then" },
|
|
1219
|
+
// Time units
|
|
1220
|
+
{ native: "\u79D2", normalized: "s" },
|
|
1221
|
+
{ native: "\u30DF\u30EA\u79D2", normalized: "ms" },
|
|
1222
|
+
{ native: "\u5206", normalized: "m" },
|
|
1223
|
+
{ native: "\u6642\u9593", normalized: "h" }
|
|
1224
|
+
];
|
|
1225
|
+
var JAPANESE_TIME_UNITS = [
|
|
1226
|
+
{ pattern: "\u30DF\u30EA\u79D2", suffix: "ms", length: 3 },
|
|
1227
|
+
{ pattern: "\u6642\u9593", suffix: "h", length: 2 },
|
|
1228
|
+
{ pattern: "\u79D2", suffix: "s", length: 1 },
|
|
1229
|
+
{ pattern: "\u5206", suffix: "m", length: 1 }
|
|
1230
|
+
];
|
|
1231
|
+
var JapaneseTokenizer = class extends BaseTokenizer {
|
|
1232
|
+
constructor() {
|
|
1233
|
+
super();
|
|
1234
|
+
this.language = "ja";
|
|
1235
|
+
this.direction = "ltr";
|
|
1236
|
+
this.initializeKeywordsFromProfile(japaneseProfile, JAPANESE_EXTRAS);
|
|
1237
|
+
this.normalizer = new JapaneseMorphologicalNormalizer();
|
|
1238
|
+
}
|
|
1239
|
+
tokenize(input) {
|
|
1240
|
+
const tokens = [];
|
|
1241
|
+
let pos = 0;
|
|
1242
|
+
while (pos < input.length) {
|
|
1243
|
+
if (isWhitespace(input[pos])) {
|
|
1244
|
+
pos++;
|
|
1245
|
+
continue;
|
|
1246
|
+
}
|
|
1247
|
+
if (isSelectorStart(input[pos])) {
|
|
1248
|
+
const modifierToken = this.tryEventModifier(input, pos);
|
|
1249
|
+
if (modifierToken) {
|
|
1250
|
+
tokens.push(modifierToken);
|
|
1251
|
+
pos = modifierToken.position.end;
|
|
1252
|
+
continue;
|
|
1253
|
+
}
|
|
1254
|
+
const selectorToken = this.trySelector(input, pos);
|
|
1255
|
+
if (selectorToken) {
|
|
1256
|
+
tokens.push(selectorToken);
|
|
1257
|
+
pos = selectorToken.position.end;
|
|
1258
|
+
continue;
|
|
1259
|
+
}
|
|
1260
|
+
}
|
|
1261
|
+
if (isQuote(input[pos])) {
|
|
1262
|
+
const stringToken = this.tryString(input, pos);
|
|
1263
|
+
if (stringToken) {
|
|
1264
|
+
tokens.push(stringToken);
|
|
1265
|
+
pos = stringToken.position.end;
|
|
1266
|
+
continue;
|
|
1267
|
+
}
|
|
1268
|
+
}
|
|
1269
|
+
if (isUrlStart(input, pos)) {
|
|
1270
|
+
const urlToken = this.tryUrl(input, pos);
|
|
1271
|
+
if (urlToken) {
|
|
1272
|
+
tokens.push(urlToken);
|
|
1273
|
+
pos = urlToken.position.end;
|
|
1274
|
+
continue;
|
|
1275
|
+
}
|
|
1276
|
+
}
|
|
1277
|
+
if (isDigit(input[pos])) {
|
|
1278
|
+
const numberToken = this.extractJapaneseNumber(input, pos);
|
|
1279
|
+
if (numberToken) {
|
|
1280
|
+
tokens.push(numberToken);
|
|
1281
|
+
pos = numberToken.position.end;
|
|
1282
|
+
continue;
|
|
1283
|
+
}
|
|
1284
|
+
}
|
|
1285
|
+
const varToken = this.tryVariableRef(input, pos);
|
|
1286
|
+
if (varToken) {
|
|
1287
|
+
tokens.push(varToken);
|
|
1288
|
+
pos = varToken.position.end;
|
|
1289
|
+
continue;
|
|
1290
|
+
}
|
|
1291
|
+
const multiParticle = this.tryMultiCharParticle(input, pos, MULTI_CHAR_PARTICLES);
|
|
1292
|
+
if (multiParticle) {
|
|
1293
|
+
const metadata = PARTICLE_ROLES.get(multiParticle.value);
|
|
1294
|
+
if (metadata) {
|
|
1295
|
+
tokens.push({
|
|
1296
|
+
...multiParticle,
|
|
1297
|
+
metadata: {
|
|
1298
|
+
particleRole: metadata.role,
|
|
1299
|
+
particleConfidence: metadata.confidence
|
|
1300
|
+
}
|
|
1301
|
+
});
|
|
1302
|
+
} else {
|
|
1303
|
+
tokens.push(multiParticle);
|
|
1304
|
+
}
|
|
1305
|
+
pos = multiParticle.position.end;
|
|
1306
|
+
continue;
|
|
1307
|
+
}
|
|
1308
|
+
if (SINGLE_CHAR_PARTICLES.has(input[pos])) {
|
|
1309
|
+
const keywordToken = this.tryProfileKeyword(input, pos);
|
|
1310
|
+
if (keywordToken && keywordToken.value.length > 1) {
|
|
1311
|
+
tokens.push(keywordToken);
|
|
1312
|
+
pos = keywordToken.position.end;
|
|
1313
|
+
continue;
|
|
1314
|
+
}
|
|
1315
|
+
const particle = input[pos];
|
|
1316
|
+
const metadata = PARTICLE_ROLES.get(particle);
|
|
1317
|
+
if (metadata) {
|
|
1318
|
+
tokens.push({
|
|
1319
|
+
...createToken(particle, "particle", createPosition(pos, pos + 1)),
|
|
1320
|
+
metadata: {
|
|
1321
|
+
particleRole: metadata.role,
|
|
1322
|
+
particleConfidence: metadata.confidence
|
|
1323
|
+
}
|
|
1324
|
+
});
|
|
1325
|
+
} else {
|
|
1326
|
+
tokens.push(createToken(particle, "particle", createPosition(pos, pos + 1)));
|
|
1327
|
+
}
|
|
1328
|
+
pos++;
|
|
1329
|
+
continue;
|
|
1330
|
+
}
|
|
1331
|
+
if (isJapanese(input[pos])) {
|
|
1332
|
+
const wordToken = this.extractJapaneseWord(input, pos);
|
|
1333
|
+
if (wordToken) {
|
|
1334
|
+
tokens.push(wordToken);
|
|
1335
|
+
pos = wordToken.position.end;
|
|
1336
|
+
continue;
|
|
1337
|
+
}
|
|
1338
|
+
}
|
|
1339
|
+
if (isAsciiIdentifierChar(input[pos])) {
|
|
1340
|
+
const asciiToken = this.extractAsciiWord(input, pos);
|
|
1341
|
+
if (asciiToken) {
|
|
1342
|
+
tokens.push(asciiToken);
|
|
1343
|
+
pos = asciiToken.position.end;
|
|
1344
|
+
continue;
|
|
1345
|
+
}
|
|
1346
|
+
}
|
|
1347
|
+
pos++;
|
|
1348
|
+
}
|
|
1349
|
+
return new TokenStreamImpl(tokens, "ja");
|
|
1350
|
+
}
|
|
1351
|
+
classifyToken(token) {
|
|
1352
|
+
if (PARTICLES.has(token)) return "particle";
|
|
1353
|
+
if (this.isKeyword(token)) return "keyword";
|
|
1354
|
+
if (token.startsWith("#") || token.startsWith(".") || token.startsWith("[")) return "selector";
|
|
1355
|
+
if (token.startsWith('"') || token.startsWith("'") || token.startsWith("\u300C")) return "literal";
|
|
1356
|
+
if (/^\d/.test(token)) return "literal";
|
|
1357
|
+
return "identifier";
|
|
1358
|
+
}
|
|
1359
|
+
/**
|
|
1360
|
+
* Extract a Japanese word (sequence of kanji/kana).
|
|
1361
|
+
* Stops at particles, ASCII, or whitespace.
|
|
1362
|
+
*
|
|
1363
|
+
* Uses morphological normalization to handle verb conjugations:
|
|
1364
|
+
* 1. First checks if the exact word is in the keyword map
|
|
1365
|
+
* 2. If not found, tries to strip conjugation suffixes and check again
|
|
1366
|
+
*/
|
|
1367
|
+
extractJapaneseWord(input, startPos) {
|
|
1368
|
+
let pos = startPos;
|
|
1369
|
+
let word = "";
|
|
1370
|
+
while (pos < input.length) {
|
|
1371
|
+
const char = input[pos];
|
|
1372
|
+
if (SINGLE_CHAR_PARTICLES.has(char) && word.length > 0) {
|
|
1373
|
+
break;
|
|
1374
|
+
}
|
|
1375
|
+
let foundMulti = false;
|
|
1376
|
+
for (const particle of MULTI_CHAR_PARTICLES) {
|
|
1377
|
+
if (input.slice(pos, pos + particle.length) === particle && word.length > 0) {
|
|
1378
|
+
foundMulti = true;
|
|
1379
|
+
break;
|
|
1380
|
+
}
|
|
1381
|
+
}
|
|
1382
|
+
if (foundMulti) break;
|
|
1383
|
+
if (isJapanese(char)) {
|
|
1384
|
+
word += char;
|
|
1385
|
+
pos++;
|
|
1386
|
+
} else {
|
|
1387
|
+
break;
|
|
1388
|
+
}
|
|
1389
|
+
}
|
|
1390
|
+
if (!word) return null;
|
|
1391
|
+
const keywordEntry = this.lookupKeyword(word);
|
|
1392
|
+
if (keywordEntry) {
|
|
1393
|
+
return createToken(word, "keyword", createPosition(startPos, pos), keywordEntry.normalized);
|
|
1394
|
+
}
|
|
1395
|
+
const morphToken = this.tryMorphKeywordMatch(word, startPos, pos);
|
|
1396
|
+
if (morphToken) return morphToken;
|
|
1397
|
+
return createToken(word, "identifier", createPosition(startPos, pos));
|
|
1398
|
+
}
|
|
1399
|
+
/**
|
|
1400
|
+
* Extract an ASCII word (for mixed Japanese/English content).
|
|
1401
|
+
*/
|
|
1402
|
+
extractAsciiWord(input, startPos) {
|
|
1403
|
+
let pos = startPos;
|
|
1404
|
+
let word = "";
|
|
1405
|
+
while (pos < input.length && isAsciiIdentifierChar(input[pos])) {
|
|
1406
|
+
word += input[pos++];
|
|
1407
|
+
}
|
|
1408
|
+
if (!word) return null;
|
|
1409
|
+
return createToken(word, "identifier", createPosition(startPos, pos));
|
|
1410
|
+
}
|
|
1411
|
+
/**
|
|
1412
|
+
* Extract a number, including Japanese time unit suffixes.
|
|
1413
|
+
* Japanese time units attach directly without whitespace.
|
|
1414
|
+
*/
|
|
1415
|
+
extractJapaneseNumber(input, startPos) {
|
|
1416
|
+
return this.tryNumberWithTimeUnits(input, startPos, JAPANESE_TIME_UNITS, {
|
|
1417
|
+
allowSign: false,
|
|
1418
|
+
skipWhitespace: false
|
|
1419
|
+
});
|
|
1420
|
+
}
|
|
1421
|
+
};
|
|
1422
|
+
var japaneseTokenizer = new JapaneseTokenizer();
|
|
1423
|
+
|
|
1424
|
+
// src/languages/ja.ts
|
|
1425
|
+
registerLanguage("ja", japaneseTokenizer, japaneseProfile);
|
|
1426
|
+
export {
|
|
1427
|
+
japaneseProfile,
|
|
1428
|
+
japaneseTokenizer
|
|
1429
|
+
};
|
|
1430
|
+
//# sourceMappingURL=ja.js.map
|