@lokascript/semantic 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +686 -0
- package/dist/browser-ar.ar.global.js +2 -0
- package/dist/browser-core.core.global.js +2 -0
- package/dist/browser-de.de.global.js +2 -0
- package/dist/browser-east-asian.east-asian.global.js +2 -0
- package/dist/browser-en-tr.en-tr.global.js +2 -0
- package/dist/browser-en.en.global.js +2 -0
- package/dist/browser-es-en.es-en.global.js +2 -0
- package/dist/browser-es.es.global.js +2 -0
- package/dist/browser-fr.fr.global.js +2 -0
- package/dist/browser-id.id.global.js +2 -0
- package/dist/browser-ja.ja.global.js +2 -0
- package/dist/browser-ko.ko.global.js +2 -0
- package/dist/browser-lazy.lazy.global.js +2 -0
- package/dist/browser-priority.priority.global.js +2 -0
- package/dist/browser-pt.pt.global.js +2 -0
- package/dist/browser-qu.qu.global.js +2 -0
- package/dist/browser-sw.sw.global.js +2 -0
- package/dist/browser-tr.tr.global.js +2 -0
- package/dist/browser-western.western.global.js +2 -0
- package/dist/browser-zh.zh.global.js +2 -0
- package/dist/browser.global.js +3 -0
- package/dist/browser.global.js.map +1 -0
- package/dist/index.cjs +35051 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +3426 -0
- package/dist/index.d.ts +3426 -0
- package/dist/index.js +34890 -0
- package/dist/index.js.map +1 -0
- package/dist/languages/ar.d.ts +78 -0
- package/dist/languages/ar.js +1622 -0
- package/dist/languages/ar.js.map +1 -0
- package/dist/languages/de.d.ts +38 -0
- package/dist/languages/de.js +1168 -0
- package/dist/languages/de.js.map +1 -0
- package/dist/languages/en.d.ts +44 -0
- package/dist/languages/en.js +3491 -0
- package/dist/languages/en.js.map +1 -0
- package/dist/languages/es.d.ts +52 -0
- package/dist/languages/es.js +1493 -0
- package/dist/languages/es.js.map +1 -0
- package/dist/languages/fr.d.ts +37 -0
- package/dist/languages/fr.js +1159 -0
- package/dist/languages/fr.js.map +1 -0
- package/dist/languages/id.d.ts +35 -0
- package/dist/languages/id.js +1152 -0
- package/dist/languages/id.js.map +1 -0
- package/dist/languages/ja.d.ts +53 -0
- package/dist/languages/ja.js +1430 -0
- package/dist/languages/ja.js.map +1 -0
- package/dist/languages/ko.d.ts +51 -0
- package/dist/languages/ko.js +1729 -0
- package/dist/languages/ko.js.map +1 -0
- package/dist/languages/pt.d.ts +37 -0
- package/dist/languages/pt.js +1127 -0
- package/dist/languages/pt.js.map +1 -0
- package/dist/languages/qu.d.ts +36 -0
- package/dist/languages/qu.js +1143 -0
- package/dist/languages/qu.js.map +1 -0
- package/dist/languages/sw.d.ts +35 -0
- package/dist/languages/sw.js +1147 -0
- package/dist/languages/sw.js.map +1 -0
- package/dist/languages/tr.d.ts +45 -0
- package/dist/languages/tr.js +1529 -0
- package/dist/languages/tr.js.map +1 -0
- package/dist/languages/zh.d.ts +58 -0
- package/dist/languages/zh.js +1257 -0
- package/dist/languages/zh.js.map +1 -0
- package/dist/types-C4dcj53L.d.ts +600 -0
- package/package.json +202 -0
- package/src/__test-utils__/index.ts +7 -0
- package/src/__test-utils__/test-helpers.ts +8 -0
- package/src/__types__/test-helpers.ts +122 -0
- package/src/analysis/index.ts +479 -0
- package/src/ast-builder/command-mappers.ts +1133 -0
- package/src/ast-builder/expression-parser/index.ts +41 -0
- package/src/ast-builder/expression-parser/parser.ts +563 -0
- package/src/ast-builder/expression-parser/tokenizer.ts +394 -0
- package/src/ast-builder/expression-parser/types.ts +208 -0
- package/src/ast-builder/index.ts +536 -0
- package/src/ast-builder/value-converters.ts +172 -0
- package/src/bridge.ts +275 -0
- package/src/browser-ar.ts +162 -0
- package/src/browser-core.ts +231 -0
- package/src/browser-de.ts +162 -0
- package/src/browser-east-asian.ts +173 -0
- package/src/browser-en-tr.ts +165 -0
- package/src/browser-en.ts +157 -0
- package/src/browser-es-en.ts +200 -0
- package/src/browser-es.ts +170 -0
- package/src/browser-fr.ts +162 -0
- package/src/browser-id.ts +162 -0
- package/src/browser-ja.ts +162 -0
- package/src/browser-ko.ts +162 -0
- package/src/browser-lazy.ts +189 -0
- package/src/browser-priority.ts +214 -0
- package/src/browser-pt.ts +162 -0
- package/src/browser-qu.ts +162 -0
- package/src/browser-sw.ts +162 -0
- package/src/browser-tr.ts +162 -0
- package/src/browser-western.ts +181 -0
- package/src/browser-zh.ts +162 -0
- package/src/browser.ts +268 -0
- package/src/cache/index.ts +14 -0
- package/src/cache/semantic-cache.ts +344 -0
- package/src/core-bridge.ts +372 -0
- package/src/explicit/converter.ts +258 -0
- package/src/explicit/index.ts +18 -0
- package/src/explicit/parser.ts +236 -0
- package/src/explicit/renderer.ts +424 -0
- package/src/generators/command-schemas.ts +1636 -0
- package/src/generators/event-handler-generator.ts +109 -0
- package/src/generators/index.ts +117 -0
- package/src/generators/language-profiles.ts +139 -0
- package/src/generators/pattern-generator.ts +537 -0
- package/src/generators/profiles/arabic.ts +131 -0
- package/src/generators/profiles/bengali.ts +132 -0
- package/src/generators/profiles/chinese.ts +124 -0
- package/src/generators/profiles/english.ts +113 -0
- package/src/generators/profiles/french.ts +125 -0
- package/src/generators/profiles/german.ts +126 -0
- package/src/generators/profiles/hindi.ts +146 -0
- package/src/generators/profiles/index.ts +46 -0
- package/src/generators/profiles/indonesian.ts +125 -0
- package/src/generators/profiles/italian.ts +139 -0
- package/src/generators/profiles/japanese.ts +149 -0
- package/src/generators/profiles/korean.ts +127 -0
- package/src/generators/profiles/marker-templates.ts +288 -0
- package/src/generators/profiles/ms.ts +130 -0
- package/src/generators/profiles/polish.ts +249 -0
- package/src/generators/profiles/portuguese.ts +115 -0
- package/src/generators/profiles/quechua.ts +113 -0
- package/src/generators/profiles/russian.ts +260 -0
- package/src/generators/profiles/spanish.ts +130 -0
- package/src/generators/profiles/swahili.ts +129 -0
- package/src/generators/profiles/thai.ts +132 -0
- package/src/generators/profiles/tl.ts +128 -0
- package/src/generators/profiles/turkish.ts +124 -0
- package/src/generators/profiles/types.ts +165 -0
- package/src/generators/profiles/ukrainian.ts +270 -0
- package/src/generators/profiles/vietnamese.ts +133 -0
- package/src/generators/schema-error-codes.ts +160 -0
- package/src/generators/schema-validator.ts +391 -0
- package/src/index.ts +429 -0
- package/src/language-building-schema.ts +3170 -0
- package/src/language-loader.ts +394 -0
- package/src/languages/_all.ts +65 -0
- package/src/languages/ar.ts +15 -0
- package/src/languages/bn.ts +16 -0
- package/src/languages/de.ts +15 -0
- package/src/languages/en.ts +29 -0
- package/src/languages/es.ts +15 -0
- package/src/languages/fr.ts +15 -0
- package/src/languages/hi.ts +26 -0
- package/src/languages/id.ts +15 -0
- package/src/languages/index.ts +18 -0
- package/src/languages/it.ts +15 -0
- package/src/languages/ja.ts +15 -0
- package/src/languages/ko.ts +15 -0
- package/src/languages/ms.ts +16 -0
- package/src/languages/pl.ts +18 -0
- package/src/languages/pt.ts +15 -0
- package/src/languages/qu.ts +15 -0
- package/src/languages/ru.ts +26 -0
- package/src/languages/sw.ts +15 -0
- package/src/languages/th.ts +16 -0
- package/src/languages/tl.ts +16 -0
- package/src/languages/tr.ts +15 -0
- package/src/languages/uk.ts +26 -0
- package/src/languages/vi.ts +16 -0
- package/src/languages/zh.ts +15 -0
- package/src/parser/index.ts +15 -0
- package/src/parser/pattern-matcher.ts +1181 -0
- package/src/parser/semantic-parser.ts +573 -0
- package/src/parser/utils/index.ts +35 -0
- package/src/parser/utils/marker-resolution.ts +111 -0
- package/src/parser/utils/possessive-keywords.ts +43 -0
- package/src/parser/utils/role-positioning.ts +70 -0
- package/src/parser/utils/type-validation.ts +134 -0
- package/src/patterns/add/ar.ts +71 -0
- package/src/patterns/add/bn.ts +70 -0
- package/src/patterns/add/hi.ts +69 -0
- package/src/patterns/add/index.ts +87 -0
- package/src/patterns/add/it.ts +61 -0
- package/src/patterns/add/ja.ts +93 -0
- package/src/patterns/add/ko.ts +74 -0
- package/src/patterns/add/ms.ts +30 -0
- package/src/patterns/add/pl.ts +62 -0
- package/src/patterns/add/ru.ts +62 -0
- package/src/patterns/add/th.ts +49 -0
- package/src/patterns/add/tl.ts +30 -0
- package/src/patterns/add/tr.ts +71 -0
- package/src/patterns/add/uk.ts +62 -0
- package/src/patterns/add/vi.ts +61 -0
- package/src/patterns/add/zh.ts +71 -0
- package/src/patterns/builders.ts +207 -0
- package/src/patterns/decrement/bn.ts +70 -0
- package/src/patterns/decrement/de.ts +42 -0
- package/src/patterns/decrement/hi.ts +68 -0
- package/src/patterns/decrement/index.ts +79 -0
- package/src/patterns/decrement/it.ts +69 -0
- package/src/patterns/decrement/ms.ts +30 -0
- package/src/patterns/decrement/pl.ts +58 -0
- package/src/patterns/decrement/ru.ts +58 -0
- package/src/patterns/decrement/th.ts +49 -0
- package/src/patterns/decrement/tl.ts +30 -0
- package/src/patterns/decrement/tr.ts +48 -0
- package/src/patterns/decrement/uk.ts +58 -0
- package/src/patterns/decrement/vi.ts +61 -0
- package/src/patterns/decrement/zh.ts +32 -0
- package/src/patterns/en.ts +302 -0
- package/src/patterns/event-handler/ar.ts +151 -0
- package/src/patterns/event-handler/bn.ts +72 -0
- package/src/patterns/event-handler/de.ts +117 -0
- package/src/patterns/event-handler/en.ts +117 -0
- package/src/patterns/event-handler/es.ts +136 -0
- package/src/patterns/event-handler/fr.ts +117 -0
- package/src/patterns/event-handler/hi.ts +64 -0
- package/src/patterns/event-handler/id.ts +117 -0
- package/src/patterns/event-handler/index.ts +119 -0
- package/src/patterns/event-handler/it.ts +54 -0
- package/src/patterns/event-handler/ja.ts +118 -0
- package/src/patterns/event-handler/ko.ts +133 -0
- package/src/patterns/event-handler/ms.ts +30 -0
- package/src/patterns/event-handler/pl.ts +62 -0
- package/src/patterns/event-handler/pt.ts +117 -0
- package/src/patterns/event-handler/qu.ts +66 -0
- package/src/patterns/event-handler/ru.ts +62 -0
- package/src/patterns/event-handler/shared.ts +270 -0
- package/src/patterns/event-handler/sw.ts +117 -0
- package/src/patterns/event-handler/th.ts +53 -0
- package/src/patterns/event-handler/tl.ts +30 -0
- package/src/patterns/event-handler/tr.ts +170 -0
- package/src/patterns/event-handler/uk.ts +62 -0
- package/src/patterns/event-handler/vi.ts +61 -0
- package/src/patterns/event-handler/zh.ts +150 -0
- package/src/patterns/get/ar.ts +49 -0
- package/src/patterns/get/bn.ts +47 -0
- package/src/patterns/get/de.ts +32 -0
- package/src/patterns/get/hi.ts +52 -0
- package/src/patterns/get/index.ts +83 -0
- package/src/patterns/get/it.ts +56 -0
- package/src/patterns/get/ja.ts +53 -0
- package/src/patterns/get/ko.ts +53 -0
- package/src/patterns/get/ms.ts +30 -0
- package/src/patterns/get/pl.ts +57 -0
- package/src/patterns/get/ru.ts +57 -0
- package/src/patterns/get/th.ts +29 -0
- package/src/patterns/get/tl.ts +30 -0
- package/src/patterns/get/uk.ts +57 -0
- package/src/patterns/get/vi.ts +48 -0
- package/src/patterns/grammar-transformed/index.ts +39 -0
- package/src/patterns/grammar-transformed/ja.ts +1713 -0
- package/src/patterns/grammar-transformed/ko.ts +1311 -0
- package/src/patterns/grammar-transformed/tr.ts +1067 -0
- package/src/patterns/hide/ar.ts +67 -0
- package/src/patterns/hide/bn.ts +47 -0
- package/src/patterns/hide/de.ts +36 -0
- package/src/patterns/hide/hi.ts +61 -0
- package/src/patterns/hide/index.ts +91 -0
- package/src/patterns/hide/it.ts +56 -0
- package/src/patterns/hide/ja.ts +69 -0
- package/src/patterns/hide/ko.ts +69 -0
- package/src/patterns/hide/ms.ts +30 -0
- package/src/patterns/hide/pl.ts +57 -0
- package/src/patterns/hide/ru.ts +57 -0
- package/src/patterns/hide/th.ts +29 -0
- package/src/patterns/hide/tl.ts +30 -0
- package/src/patterns/hide/tr.ts +65 -0
- package/src/patterns/hide/uk.ts +57 -0
- package/src/patterns/hide/vi.ts +56 -0
- package/src/patterns/hide/zh.ts +68 -0
- package/src/patterns/increment/bn.ts +70 -0
- package/src/patterns/increment/de.ts +36 -0
- package/src/patterns/increment/hi.ts +68 -0
- package/src/patterns/increment/index.ts +79 -0
- package/src/patterns/increment/it.ts +69 -0
- package/src/patterns/increment/ms.ts +30 -0
- package/src/patterns/increment/pl.ts +58 -0
- package/src/patterns/increment/ru.ts +58 -0
- package/src/patterns/increment/th.ts +49 -0
- package/src/patterns/increment/tl.ts +30 -0
- package/src/patterns/increment/tr.ts +52 -0
- package/src/patterns/increment/uk.ts +58 -0
- package/src/patterns/increment/vi.ts +61 -0
- package/src/patterns/increment/zh.ts +32 -0
- package/src/patterns/index.ts +84 -0
- package/src/patterns/languages/en/control-flow.ts +93 -0
- package/src/patterns/languages/en/fetch.ts +62 -0
- package/src/patterns/languages/en/index.ts +42 -0
- package/src/patterns/languages/en/repeat.ts +67 -0
- package/src/patterns/languages/en/set.ts +48 -0
- package/src/patterns/languages/en/swap.ts +38 -0
- package/src/patterns/languages/en/temporal.ts +57 -0
- package/src/patterns/put/ar.ts +74 -0
- package/src/patterns/put/bn.ts +53 -0
- package/src/patterns/put/en.ts +74 -0
- package/src/patterns/put/es.ts +74 -0
- package/src/patterns/put/hi.ts +69 -0
- package/src/patterns/put/id.ts +96 -0
- package/src/patterns/put/index.ts +99 -0
- package/src/patterns/put/it.ts +56 -0
- package/src/patterns/put/ja.ts +75 -0
- package/src/patterns/put/ko.ts +67 -0
- package/src/patterns/put/ms.ts +30 -0
- package/src/patterns/put/pl.ts +81 -0
- package/src/patterns/put/ru.ts +85 -0
- package/src/patterns/put/th.ts +32 -0
- package/src/patterns/put/tl.ts +30 -0
- package/src/patterns/put/tr.ts +67 -0
- package/src/patterns/put/uk.ts +85 -0
- package/src/patterns/put/vi.ts +72 -0
- package/src/patterns/put/zh.ts +62 -0
- package/src/patterns/registry.ts +163 -0
- package/src/patterns/remove/ar.ts +71 -0
- package/src/patterns/remove/bn.ts +68 -0
- package/src/patterns/remove/hi.ts +69 -0
- package/src/patterns/remove/index.ts +87 -0
- package/src/patterns/remove/it.ts +69 -0
- package/src/patterns/remove/ja.ts +74 -0
- package/src/patterns/remove/ko.ts +78 -0
- package/src/patterns/remove/ms.ts +30 -0
- package/src/patterns/remove/pl.ts +62 -0
- package/src/patterns/remove/ru.ts +62 -0
- package/src/patterns/remove/th.ts +49 -0
- package/src/patterns/remove/tl.ts +30 -0
- package/src/patterns/remove/tr.ts +78 -0
- package/src/patterns/remove/uk.ts +62 -0
- package/src/patterns/remove/vi.ts +61 -0
- package/src/patterns/remove/zh.ts +72 -0
- package/src/patterns/set/ar.ts +84 -0
- package/src/patterns/set/bn.ts +53 -0
- package/src/patterns/set/de.ts +84 -0
- package/src/patterns/set/es.ts +92 -0
- package/src/patterns/set/fr.ts +88 -0
- package/src/patterns/set/hi.ts +56 -0
- package/src/patterns/set/id.ts +84 -0
- package/src/patterns/set/index.ts +107 -0
- package/src/patterns/set/it.ts +56 -0
- package/src/patterns/set/ja.ts +86 -0
- package/src/patterns/set/ko.ts +85 -0
- package/src/patterns/set/ms.ts +30 -0
- package/src/patterns/set/pl.ts +57 -0
- package/src/patterns/set/pt.ts +84 -0
- package/src/patterns/set/ru.ts +57 -0
- package/src/patterns/set/th.ts +31 -0
- package/src/patterns/set/tl.ts +30 -0
- package/src/patterns/set/tr.ts +107 -0
- package/src/patterns/set/uk.ts +57 -0
- package/src/patterns/set/vi.ts +53 -0
- package/src/patterns/set/zh.ts +84 -0
- package/src/patterns/show/ar.ts +67 -0
- package/src/patterns/show/bn.ts +47 -0
- package/src/patterns/show/de.ts +32 -0
- package/src/patterns/show/fr.ts +32 -0
- package/src/patterns/show/hi.ts +61 -0
- package/src/patterns/show/index.ts +95 -0
- package/src/patterns/show/it.ts +56 -0
- package/src/patterns/show/ja.ts +69 -0
- package/src/patterns/show/ko.ts +73 -0
- package/src/patterns/show/ms.ts +30 -0
- package/src/patterns/show/pl.ts +57 -0
- package/src/patterns/show/ru.ts +57 -0
- package/src/patterns/show/th.ts +29 -0
- package/src/patterns/show/tl.ts +30 -0
- package/src/patterns/show/tr.ts +65 -0
- package/src/patterns/show/uk.ts +57 -0
- package/src/patterns/show/vi.ts +56 -0
- package/src/patterns/show/zh.ts +68 -0
- package/src/patterns/take/ar.ts +51 -0
- package/src/patterns/take/index.ts +31 -0
- package/src/patterns/toggle/ar.ts +61 -0
- package/src/patterns/toggle/bn.ts +70 -0
- package/src/patterns/toggle/en.ts +61 -0
- package/src/patterns/toggle/es.ts +61 -0
- package/src/patterns/toggle/hi.ts +80 -0
- package/src/patterns/toggle/index.ts +95 -0
- package/src/patterns/toggle/it.ts +69 -0
- package/src/patterns/toggle/ja.ts +156 -0
- package/src/patterns/toggle/ko.ts +113 -0
- package/src/patterns/toggle/ms.ts +30 -0
- package/src/patterns/toggle/pl.ts +62 -0
- package/src/patterns/toggle/ru.ts +62 -0
- package/src/patterns/toggle/th.ts +50 -0
- package/src/patterns/toggle/tl.ts +30 -0
- package/src/patterns/toggle/tr.ts +88 -0
- package/src/patterns/toggle/uk.ts +62 -0
- package/src/patterns/toggle/vi.ts +61 -0
- package/src/patterns/toggle/zh.ts +99 -0
- package/src/public-api.ts +286 -0
- package/src/registry.ts +441 -0
- package/src/tokenizers/arabic.ts +723 -0
- package/src/tokenizers/base.ts +1300 -0
- package/src/tokenizers/bengali.ts +289 -0
- package/src/tokenizers/chinese.ts +481 -0
- package/src/tokenizers/english.ts +416 -0
- package/src/tokenizers/french.ts +326 -0
- package/src/tokenizers/german.ts +324 -0
- package/src/tokenizers/hindi.ts +319 -0
- package/src/tokenizers/index.ts +127 -0
- package/src/tokenizers/indonesian.ts +306 -0
- package/src/tokenizers/italian.ts +458 -0
- package/src/tokenizers/japanese.ts +447 -0
- package/src/tokenizers/korean.ts +642 -0
- package/src/tokenizers/morphology/arabic-normalizer.ts +242 -0
- package/src/tokenizers/morphology/french-normalizer.ts +268 -0
- package/src/tokenizers/morphology/german-normalizer.ts +256 -0
- package/src/tokenizers/morphology/index.ts +46 -0
- package/src/tokenizers/morphology/italian-normalizer.ts +329 -0
- package/src/tokenizers/morphology/japanese-normalizer.ts +288 -0
- package/src/tokenizers/morphology/korean-normalizer.ts +428 -0
- package/src/tokenizers/morphology/polish-normalizer.ts +264 -0
- package/src/tokenizers/morphology/portuguese-normalizer.ts +310 -0
- package/src/tokenizers/morphology/spanish-normalizer.ts +327 -0
- package/src/tokenizers/morphology/turkish-normalizer.ts +412 -0
- package/src/tokenizers/morphology/types.ts +211 -0
- package/src/tokenizers/ms.ts +198 -0
- package/src/tokenizers/polish.ts +354 -0
- package/src/tokenizers/portuguese.ts +304 -0
- package/src/tokenizers/quechua.ts +339 -0
- package/src/tokenizers/russian.ts +375 -0
- package/src/tokenizers/spanish.ts +403 -0
- package/src/tokenizers/swahili.ts +303 -0
- package/src/tokenizers/thai.ts +236 -0
- package/src/tokenizers/tl.ts +198 -0
- package/src/tokenizers/turkish.ts +411 -0
- package/src/tokenizers/ukrainian.ts +369 -0
- package/src/tokenizers/vietnamese.ts +410 -0
- package/src/types/grammar-types.ts +617 -0
- package/src/types/unified-profile.ts +267 -0
- package/src/types.ts +709 -0
- package/src/utils/confidence-calculator.ts +147 -0
- package/src/validators/command-validator.ts +380 -0
- package/src/validators/index.ts +15 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../src/registry.ts","../../src/tokenizers/base.ts","../../src/tokenizers/morphology/types.ts","../../src/tokenizers/morphology/japanese-normalizer.ts","../../src/generators/profiles/japanese.ts","../../src/tokenizers/japanese.ts","../../src/languages/ja.ts"],"sourcesContent":["/**\n * Language Registry\n *\n * Central registration point for language support in the semantic parser.\n * Languages self-register when their modules are imported, enabling\n * tree-shaking for minimal bundles.\n *\n * @example\n * ```typescript\n * // Import only the languages you need\n * import '@lokascript/semantic/languages/en';\n * import '@lokascript/semantic/languages/es';\n *\n * // Now parse works for registered languages\n * import { parse } from '@lokascript/semantic';\n * parse('toggle .active', 'en'); // Works\n * parse('alternar .activo', 'es'); // Works\n * parse('切り替え .active', 'ja'); // Error: Language not registered\n * ```\n */\n\nimport type { LanguageTokenizer, LanguagePattern, TokenStream } from './types';\n\n// Re-export profile types from generators for convenience\nexport type {\n LanguageProfile,\n WordOrder,\n MarkingStrategy,\n RoleMarker,\n VerbConfig,\n PossessiveConfig,\n KeywordTranslation,\n TokenizationConfig,\n} from './generators/language-profiles';\n\nimport type { LanguageProfile } from './generators/language-profiles';\n\n// =============================================================================\n// External Pattern Source Interface\n// =============================================================================\n\n/**\n * Interface for external pattern sources (e.g., @lokascript/patterns-reference database).\n * External sources can provide additional patterns at runtime.\n */\nexport interface ExternalPatternsSource {\n /** Unique identifier for the source */\n id: string;\n /** Human-readable name */\n name: string;\n /** Get patterns for a specific language */\n getPatternsForLanguage(language: string): Promise<ExternalPatternEntry[]>;\n /** Get patterns for a specific command */\n getPatternsForCommand(command: string, language?: string): Promise<ExternalPatternEntry[]>;\n /** Check if source has patterns for a language */\n hasPatterns(language: string): Promise<boolean>;\n /** Get all supported languages */\n getSupportedLanguages(): Promise<string[]>;\n}\n\n/**\n * Pattern entry from external source.\n */\nexport interface ExternalPatternEntry {\n id: string;\n code: string;\n command: string | null;\n language: string;\n confidence: number;\n verified: boolean;\n title?: string;\n category?: string;\n}\n\n// =============================================================================\n// Registry State\n// =============================================================================\n\nconst tokenizers = new Map<string, LanguageTokenizer>();\nconst profiles = new Map<string, LanguageProfile>();\nconst patternCache = new Map<string, LanguagePattern[]>();\n\n// External pattern sources (e.g., @lokascript/patterns-reference database)\nconst externalSources = new Map<string, ExternalPatternsSource>();\n\n// Pattern generator function - set by patterns module to avoid circular deps\nlet patternGenerator: ((profile: LanguageProfile) => LanguagePattern[]) | null = null;\n\n// =============================================================================\n// Registration Functions\n// =============================================================================\n\n/**\n * Register a language with its tokenizer and profile.\n * Called automatically by language modules when imported.\n */\nexport function registerLanguage(\n code: string,\n tokenizer: LanguageTokenizer,\n profile: LanguageProfile\n): void {\n tokenizers.set(code, tokenizer);\n profiles.set(code, profile);\n // Clear pattern cache for this language if it was previously cached\n patternCache.delete(code);\n}\n\n/**\n * Register only a tokenizer (for backwards compatibility).\n */\nexport function registerTokenizer(tokenizer: LanguageTokenizer): void {\n tokenizers.set(tokenizer.language, tokenizer);\n}\n\n/**\n * Register only a profile (for backwards compatibility).\n */\nexport function registerProfile(profile: LanguageProfile): void {\n profiles.set(profile.code, profile);\n patternCache.delete(profile.code);\n}\n\n/**\n * Set the pattern generator function.\n * Called by patterns module to inject the generator without circular deps.\n */\nexport function setPatternGenerator(\n generator: (profile: LanguageProfile) => LanguagePattern[]\n): void {\n patternGenerator = generator;\n}\n\n// Direct pattern registration map (for tree-shaking)\nconst registeredPatterns = new Map<string, LanguagePattern[]>();\n\n/**\n * Register patterns directly for a language.\n * This enables tree-shaking by allowing each language module to register\n * only its own patterns.\n */\nexport function registerPatterns(code: string, patterns: LanguagePattern[]): void {\n registeredPatterns.set(code, patterns);\n // Clear cached patterns if any\n patternCache.delete(code);\n}\n\n/**\n * Check if patterns are directly registered for a language.\n */\nexport function hasRegisteredPatterns(code: string): boolean {\n return registeredPatterns.has(code);\n}\n\n/**\n * Get directly registered patterns for a language.\n */\nexport function getRegisteredPatterns(code: string): LanguagePattern[] | undefined {\n return registeredPatterns.get(code);\n}\n\n// =============================================================================\n// External Pattern Sources\n// =============================================================================\n\n/**\n * Register an external pattern source.\n * External sources (like @lokascript/patterns-reference) can provide\n * additional patterns at runtime.\n *\n * @example\n * ```typescript\n * import { registerPatternsSource } from '@lokascript/semantic';\n * import { createPatternsProvider } from '@lokascript/patterns-reference';\n *\n * const provider = createPatternsProvider();\n * registerPatternsSource(provider);\n * ```\n */\nexport function registerPatternsSource(source: ExternalPatternsSource): void {\n externalSources.set(source.id, source);\n}\n\n/**\n * Unregister an external pattern source.\n */\nexport function unregisterPatternsSource(sourceId: string): boolean {\n return externalSources.delete(sourceId);\n}\n\n/**\n * Get a registered external pattern source.\n */\nexport function getPatternsSource(sourceId: string): ExternalPatternsSource | undefined {\n return externalSources.get(sourceId);\n}\n\n/**\n * Get all registered external pattern sources.\n */\nexport function getAllPatternsSources(): ExternalPatternsSource[] {\n return Array.from(externalSources.values());\n}\n\n/**\n * Check if any external pattern sources are registered.\n */\nexport function hasExternalSources(): boolean {\n return externalSources.size > 0;\n}\n\n/**\n * Query patterns from all external sources for a language.\n * Returns patterns sorted by confidence.\n */\nexport async function queryExternalPatterns(language: string): Promise<ExternalPatternEntry[]> {\n if (externalSources.size === 0) {\n return [];\n }\n\n const allPatterns: ExternalPatternEntry[] = [];\n\n for (const source of externalSources.values()) {\n try {\n const patterns = await source.getPatternsForLanguage(language);\n allPatterns.push(...patterns);\n } catch (error) {\n console.warn(\n `[Registry] Failed to query patterns from ${source.name}:`,\n error instanceof Error ? error.message : String(error)\n );\n }\n }\n\n // Sort by confidence (highest first)\n return allPatterns.sort((a, b) => b.confidence - a.confidence);\n}\n\n/**\n * Query patterns from all external sources for a command.\n */\nexport async function queryExternalPatternsForCommand(\n command: string,\n language?: string\n): Promise<ExternalPatternEntry[]> {\n if (externalSources.size === 0) {\n return [];\n }\n\n const allPatterns: ExternalPatternEntry[] = [];\n\n for (const source of externalSources.values()) {\n try {\n const patterns = await source.getPatternsForCommand(command, language);\n allPatterns.push(...patterns);\n } catch (error) {\n console.warn(\n `[Registry] Failed to query patterns from ${source.name}:`,\n error instanceof Error ? error.message : String(error)\n );\n }\n }\n\n return allPatterns.sort((a, b) => b.confidence - a.confidence);\n}\n\n// =============================================================================\n// Query Functions\n// =============================================================================\n\n/**\n * Get a tokenizer for the specified language.\n * @throws Error if language is not registered\n */\nexport function getTokenizer(code: string): LanguageTokenizer {\n const tokenizer = tokenizers.get(code);\n if (!tokenizer) {\n const registered = Array.from(tokenizers.keys()).join(', ');\n throw new Error(\n `Language '${code}' is not registered. ` +\n `Registered languages: ${registered || 'none'}. ` +\n `Import the language module first: import '@lokascript/semantic/languages/${code}';`\n );\n }\n return tokenizer;\n}\n\n/**\n * Get a profile for the specified language.\n * @throws Error if language is not registered\n */\nexport function getProfile(code: string): LanguageProfile {\n const profile = profiles.get(code);\n if (!profile) {\n const registered = Array.from(profiles.keys()).join(', ');\n throw new Error(\n `Language profile '${code}' is not registered. ` +\n `Registered languages: ${registered || 'none'}. ` +\n `Import the language module first: import '@lokascript/semantic/languages/${code}';`\n );\n }\n return profile;\n}\n\n/**\n * Try to get a tokenizer, returning undefined if not registered.\n */\nexport function tryGetTokenizer(code: string): LanguageTokenizer | undefined {\n return tokenizers.get(code);\n}\n\n/**\n * Try to get a profile, returning undefined if not registered.\n */\nexport function tryGetProfile(code: string): LanguageProfile | undefined {\n return profiles.get(code);\n}\n\n/**\n * Get all registered language codes.\n */\nexport function getRegisteredLanguages(): string[] {\n return Array.from(tokenizers.keys());\n}\n\n/**\n * Check if a language is registered.\n */\nexport function isLanguageRegistered(code: string): boolean {\n return tokenizers.has(code) && profiles.has(code);\n}\n\n/**\n * Check if a language is supported (alias for isLanguageRegistered).\n * For backwards compatibility with tokenizers API.\n */\nexport function isLanguageSupported(code: string): boolean {\n return tokenizers.has(code);\n}\n\n// =============================================================================\n// Tokenization\n// =============================================================================\n\n/**\n * Tokenize input in the specified language.\n * @throws Error if language is not registered\n */\nexport function tokenize(input: string, language: string): TokenStream {\n const tokenizer = getTokenizer(language);\n return tokenizer.tokenize(input);\n}\n\n// =============================================================================\n// Pattern Access (Lazy Generation)\n// =============================================================================\n\n/**\n * Get patterns for a specific language.\n * First checks for directly registered patterns (for tree-shaking),\n * then falls back to pattern generator.\n * @throws Error if language is not registered\n */\nexport function getPatternsForLanguage(code: string): LanguagePattern[] {\n // Check cache first\n const cached = patternCache.get(code);\n if (cached) {\n return cached;\n }\n\n // Check for directly registered patterns (tree-shakeable path)\n const registered = registeredPatterns.get(code);\n if (registered) {\n patternCache.set(code, registered);\n return registered;\n }\n\n // Fall back to pattern generator\n if (!patternGenerator) {\n throw new Error(\n `No patterns registered for language '${code}'. ` +\n 'Either import the language module or set a pattern generator.'\n );\n }\n\n // Get profile (throws if not registered)\n const profile = getProfile(code);\n const patterns = patternGenerator(profile);\n patternCache.set(code, patterns);\n return patterns;\n}\n\n/**\n * Get patterns for a specific language and command.\n */\nexport function getPatternsForLanguageAndCommand(\n language: string,\n command: string\n): LanguagePattern[] {\n return getPatternsForLanguage(language)\n .filter(p => p.command === command)\n .sort((a, b) => b.priority - a.priority);\n}\n\n/**\n * Clear the pattern cache for a language (useful for testing).\n */\nexport function clearPatternCache(code?: string): void {\n if (code) {\n patternCache.delete(code);\n } else {\n patternCache.clear();\n }\n}\n\n// =============================================================================\n// Backwards Compatibility\n// =============================================================================\n\n/**\n * Get all profiles as a record (for backwards compatibility).\n * Note: Only returns registered profiles.\n */\nexport function getAllProfiles(): Record<string, LanguageProfile> {\n const result: Record<string, LanguageProfile> = {};\n for (const [code, profile] of profiles) {\n result[code] = profile;\n }\n return result;\n}\n\n/**\n * Get all tokenizers as a record (for backwards compatibility).\n * Note: Only returns registered tokenizers.\n */\nexport function getAllTokenizers(): Record<string, LanguageTokenizer> {\n const result: Record<string, LanguageTokenizer> = {};\n for (const [code, tokenizer] of tokenizers) {\n result[code] = tokenizer;\n }\n return result;\n}\n","/**\n * Base Tokenizer\n *\n * Provides the TokenStream implementation and shared tokenization utilities.\n * Language-specific tokenizers extend these base utilities.\n */\n\nimport type {\n LanguageToken,\n TokenKind,\n TokenStream,\n StreamMark,\n SourcePosition,\n LanguageTokenizer,\n} from '../types';\nimport type { MorphologicalNormalizer, NormalizationResult } from './morphology/types';\n\n// =============================================================================\n// Time Unit Configuration\n// =============================================================================\n\n/**\n * Configuration for a native language time unit pattern.\n * Used by tryNumberWithTimeUnits() to match language-specific time units.\n */\nexport interface TimeUnitMapping {\n /** The pattern to match (e.g., 'segundos', 'ミリ秒') */\n readonly pattern: string;\n /** The standard suffix to use (ms, s, m, h) */\n readonly suffix: string;\n /** Length of the pattern (for optimization) */\n readonly length: number;\n /** Whether to check for word boundary after the pattern */\n readonly checkBoundary?: boolean;\n /** Character that cannot follow the pattern (e.g., 's' for 'm' to avoid 'ms') */\n readonly notFollowedBy?: string;\n /** Whether to do case-insensitive matching */\n readonly caseInsensitive?: boolean;\n}\n\n// =============================================================================\n// Token Stream Implementation\n// =============================================================================\n\n/**\n * Concrete implementation of TokenStream.\n */\nexport class TokenStreamImpl implements TokenStream {\n readonly tokens: readonly LanguageToken[];\n readonly language: string;\n private pos: number = 0;\n\n constructor(tokens: LanguageToken[], language: string) {\n this.tokens = tokens;\n this.language = language;\n }\n\n peek(offset: number = 0): LanguageToken | null {\n const index = this.pos + offset;\n if (index < 0 || index >= this.tokens.length) {\n return null;\n }\n return this.tokens[index];\n }\n\n advance(): LanguageToken {\n if (this.isAtEnd()) {\n throw new Error('Unexpected end of token stream');\n }\n return this.tokens[this.pos++];\n }\n\n isAtEnd(): boolean {\n return this.pos >= this.tokens.length;\n }\n\n mark(): StreamMark {\n return { position: this.pos };\n }\n\n reset(mark: StreamMark): void {\n this.pos = mark.position;\n }\n\n position(): number {\n return this.pos;\n }\n\n /**\n * Get remaining tokens as an array.\n */\n remaining(): LanguageToken[] {\n return this.tokens.slice(this.pos);\n }\n\n /**\n * Consume tokens while predicate is true.\n */\n takeWhile(predicate: (token: LanguageToken) => boolean): LanguageToken[] {\n const result: LanguageToken[] = [];\n while (!this.isAtEnd() && predicate(this.peek()!)) {\n result.push(this.advance());\n }\n return result;\n }\n\n /**\n * Skip tokens while predicate is true.\n */\n skipWhile(predicate: (token: LanguageToken) => boolean): void {\n while (!this.isAtEnd() && predicate(this.peek()!)) {\n this.advance();\n }\n }\n}\n\n// =============================================================================\n// Shared Tokenization Utilities\n// =============================================================================\n\n/**\n * Create a source position from start and end offsets.\n */\nexport function createPosition(start: number, end: number): SourcePosition {\n return { start, end };\n}\n\n/**\n * Options for creating a token with optional morphological data.\n */\nexport interface CreateTokenOptions {\n /** Explicitly normalized form from keyword map */\n normalized?: string;\n /** Morphologically normalized stem */\n stem?: string;\n /** Confidence in the stem (0.0-1.0) */\n stemConfidence?: number;\n}\n\n/**\n * Create a language token.\n */\nexport function createToken(\n value: string,\n kind: TokenKind,\n position: SourcePosition,\n normalizedOrOptions?: string | CreateTokenOptions\n): LanguageToken {\n // Handle legacy string argument for backward compatibility\n if (typeof normalizedOrOptions === 'string') {\n return { value, kind, position, normalized: normalizedOrOptions };\n }\n\n // Handle options object\n if (normalizedOrOptions) {\n const { normalized, stem, stemConfidence } = normalizedOrOptions;\n const token: LanguageToken = { value, kind, position };\n\n // Build token with only defined properties\n if (normalized !== undefined) {\n (token as any).normalized = normalized;\n }\n if (stem !== undefined) {\n (token as any).stem = stem;\n if (stemConfidence !== undefined) {\n (token as any).stemConfidence = stemConfidence;\n }\n }\n\n return token;\n }\n\n return { value, kind, position };\n}\n\n/**\n * Check if a character is whitespace.\n */\nexport function isWhitespace(char: string): boolean {\n return /\\s/.test(char);\n}\n\n/**\n * Check if a string starts with a CSS selector prefix.\n * Includes JSX-style element selectors: <form />, <div>\n */\nexport function isSelectorStart(char: string): boolean {\n return (\n char === '#' || char === '.' || char === '[' || char === '@' || char === '*' || char === '<'\n );\n}\n\n/**\n * Check if a character is a quote (string delimiter).\n */\nexport function isQuote(char: string): boolean {\n return char === '\"' || char === \"'\" || char === '`' || char === '「' || char === '」';\n}\n\n/**\n * Check if a character is a digit.\n */\nexport function isDigit(char: string): boolean {\n return /\\d/.test(char);\n}\n\n/**\n * Check if a character is an ASCII letter.\n */\nexport function isAsciiLetter(char: string): boolean {\n return /[a-zA-Z]/.test(char);\n}\n\n/**\n * Check if a character is part of an ASCII identifier.\n */\nexport function isAsciiIdentifierChar(char: string): boolean {\n return /[a-zA-Z0-9_-]/.test(char);\n}\n\n// =============================================================================\n// Unicode Range Classification\n// =============================================================================\n\n/**\n * Unicode range tuple: [start, end] (inclusive).\n */\nexport type UnicodeRange = readonly [number, number];\n\n/**\n * Create a character classifier for Unicode ranges.\n * Returns a function that checks if a character's code point falls within any of the ranges.\n *\n * @example\n * // Japanese Hiragana\n * const isHiragana = createUnicodeRangeClassifier([[0x3040, 0x309f]]);\n *\n * // Korean (Hangul syllables + Jamo)\n * const isKorean = createUnicodeRangeClassifier([\n * [0xac00, 0xd7a3], // Hangul syllables\n * [0x1100, 0x11ff], // Hangul Jamo\n * [0x3130, 0x318f], // Hangul Compatibility Jamo\n * ]);\n */\nexport function createUnicodeRangeClassifier(\n ranges: readonly UnicodeRange[]\n): (char: string) => boolean {\n return (char: string): boolean => {\n const code = char.charCodeAt(0);\n return ranges.some(([start, end]) => code >= start && code <= end);\n };\n}\n\n/**\n * Combine multiple character classifiers into one.\n * Returns true if any of the classifiers return true.\n *\n * @example\n * const isJapanese = combineClassifiers(isHiragana, isKatakana, isKanji);\n */\nexport function combineClassifiers(\n ...classifiers: Array<(char: string) => boolean>\n): (char: string) => boolean {\n return (char: string): boolean => classifiers.some(fn => fn(char));\n}\n\n/**\n * Character classifiers for a Latin-based language.\n */\nexport interface LatinCharClassifiers {\n /** Check if character is a letter in this language (including accented chars). */\n isLetter: (char: string) => boolean;\n /** Check if character is part of an identifier (letter, digit, underscore, hyphen). */\n isIdentifierChar: (char: string) => boolean;\n}\n\n/**\n * Create character classifiers for a Latin-based language.\n * Returns isLetter and isIdentifierChar functions based on the provided regex.\n *\n * @example\n * // Spanish letters\n * const { isLetter, isIdentifierChar } = createLatinCharClassifiers(/[a-zA-ZáéíóúüñÁÉÍÓÚÜÑ]/);\n *\n * // German letters\n * const { isLetter, isIdentifierChar } = createLatinCharClassifiers(/[a-zA-ZäöüÄÖÜß]/);\n */\nexport function createLatinCharClassifiers(letterPattern: RegExp): LatinCharClassifiers {\n const isLetter = (char: string): boolean => letterPattern.test(char);\n const isIdentifierChar = (char: string): boolean => isLetter(char) || /[0-9_-]/.test(char);\n return { isLetter, isIdentifierChar };\n}\n\n// =============================================================================\n// CSS Selector Tokenization\n// =============================================================================\n\n/**\n * Extract a CSS selector from the input string starting at pos.\n * CSS selectors are universal across languages.\n *\n * Supported formats:\n * - #id\n * - .class\n * - [attribute]\n * - [attribute=value]\n * - @attribute (shorthand)\n * - *property (CSS property shorthand)\n * - Complex selectors with combinators (limited)\n *\n * Method call handling:\n * - #dialog.showModal() → stops after #dialog (method call, not compound selector)\n * - #box.active → compound selector (no parens)\n */\nexport function extractCssSelector(input: string, startPos: number): string | null {\n if (startPos >= input.length) return null;\n\n const char = input[startPos];\n if (!isSelectorStart(char)) return null;\n\n let pos = startPos;\n let selector = '';\n\n // Handle different selector types\n if (char === '#' || char === '.') {\n // ID or class selector: #id, .class\n selector += input[pos++];\n while (pos < input.length && isAsciiIdentifierChar(input[pos])) {\n selector += input[pos++];\n }\n // Must have at least one character after prefix\n if (selector.length <= 1) return null;\n\n // Check for method call pattern: #id.method() or .class.method()\n // If we see .identifier followed by (, don't consume it - it's a method call\n if (pos < input.length && input[pos] === '.' && char === '#') {\n // Look ahead to see if this is a method call\n const methodStart = pos + 1;\n let methodEnd = methodStart;\n while (methodEnd < input.length && isAsciiIdentifierChar(input[methodEnd])) {\n methodEnd++;\n }\n // If followed by (, it's a method call - stop here\n if (methodEnd < input.length && input[methodEnd] === '(') {\n return selector;\n }\n }\n } else if (char === '[') {\n // Attribute selector: [attr] or [attr=value] or [attr=\"value\"]\n // Need to track quote state to avoid counting brackets inside quotes\n let depth = 1;\n let inQuote = false;\n let quoteChar: string | null = null;\n let escaped = false;\n\n selector += input[pos++]; // [\n\n while (pos < input.length && depth > 0) {\n const c = input[pos];\n selector += c;\n\n if (escaped) {\n // Skip escaped character\n escaped = false;\n } else if (c === '\\\\') {\n // Next character is escaped\n escaped = true;\n } else if (inQuote) {\n // Inside a quoted string\n if (c === quoteChar) {\n inQuote = false;\n quoteChar = null;\n }\n } else {\n // Not inside a quoted string\n if (c === '\"' || c === \"'\" || c === '`') {\n inQuote = true;\n quoteChar = c;\n } else if (c === '[') {\n depth++;\n } else if (c === ']') {\n depth--;\n }\n }\n pos++;\n }\n if (depth !== 0) return null;\n } else if (char === '@') {\n // Attribute shorthand: @disabled\n selector += input[pos++];\n while (pos < input.length && isAsciiIdentifierChar(input[pos])) {\n selector += input[pos++];\n }\n if (selector.length <= 1) return null;\n } else if (char === '*') {\n // CSS property shorthand: *display\n selector += input[pos++];\n while (pos < input.length && isAsciiIdentifierChar(input[pos])) {\n selector += input[pos++];\n }\n if (selector.length <= 1) return null;\n } else if (char === '<') {\n // HTML literal selector with optional modifiers and attributes:\n // - <div>\n // - <div.class>\n // - <div#id>\n // - <div.class#id>\n // - <button[disabled]/>\n // - <div.card/>\n // - <div.class#id[attr=\"value\"]/>\n selector += input[pos++]; // <\n\n // Must be followed by an identifier (tag name)\n if (pos >= input.length || !isAsciiLetter(input[pos])) return null;\n\n // Extract tag name\n while (pos < input.length && isAsciiIdentifierChar(input[pos])) {\n selector += input[pos++];\n }\n\n // Process modifiers and attributes\n // Can have multiple .class, one #id, and multiple [attr] in any order\n while (pos < input.length) {\n const modChar = input[pos];\n\n if (modChar === '.') {\n // Class modifier\n selector += input[pos++]; // .\n if (pos >= input.length || !isAsciiIdentifierChar(input[pos])) {\n return null; // Invalid - class name required after .\n }\n while (pos < input.length && isAsciiIdentifierChar(input[pos])) {\n selector += input[pos++];\n }\n } else if (modChar === '#') {\n // ID modifier\n selector += input[pos++]; // #\n if (pos >= input.length || !isAsciiIdentifierChar(input[pos])) {\n return null; // Invalid - ID required after #\n }\n while (pos < input.length && isAsciiIdentifierChar(input[pos])) {\n selector += input[pos++];\n }\n } else if (modChar === '[') {\n // Attribute modifier: [disabled] or [type=\"button\"]\n // Need to track quote state to avoid counting brackets inside quotes\n let depth = 1;\n let inQuote = false;\n let quoteChar: string | null = null;\n let escaped = false;\n\n selector += input[pos++]; // [\n\n while (pos < input.length && depth > 0) {\n const c = input[pos];\n selector += c;\n\n if (escaped) {\n escaped = false;\n } else if (c === '\\\\') {\n escaped = true;\n } else if (inQuote) {\n if (c === quoteChar) {\n inQuote = false;\n quoteChar = null;\n }\n } else {\n if (c === '\"' || c === \"'\" || c === '`') {\n inQuote = true;\n quoteChar = c;\n } else if (c === '[') {\n depth++;\n } else if (c === ']') {\n depth--;\n }\n }\n pos++;\n }\n if (depth !== 0) return null; // Unclosed bracket\n } else {\n // No more modifiers\n break;\n }\n }\n\n // Skip whitespace before optional self-closing /\n while (pos < input.length && isWhitespace(input[pos])) {\n selector += input[pos++];\n }\n\n // Optional self-closing /\n if (pos < input.length && input[pos] === '/') {\n selector += input[pos++];\n // Skip whitespace after /\n while (pos < input.length && isWhitespace(input[pos])) {\n selector += input[pos++];\n }\n }\n\n // Must end with >\n if (pos >= input.length || input[pos] !== '>') return null;\n selector += input[pos++]; // >\n }\n\n return selector || null;\n}\n\n// =============================================================================\n// String Literal Tokenization\n// =============================================================================\n\n/**\n * Check if a single quote at pos is a possessive marker ('s).\n * Returns true if this looks like possessive, not a string start.\n *\n * Examples:\n * - #element's *opacity → possessive (returns true)\n * - 'hello' → string (returns false)\n * - it's value → possessive (returns true)\n */\nexport function isPossessiveMarker(input: string, pos: number): boolean {\n if (pos >= input.length || input[pos] !== \"'\") return false;\n\n // Check if followed by 's' or 'S'\n if (pos + 1 >= input.length) return false;\n const nextChar = input[pos + 1].toLowerCase();\n if (nextChar !== 's') return false;\n\n // After 's, should be end, whitespace, or special char (not alphanumeric)\n if (pos + 2 >= input.length) return true; // end of input\n const afterS = input[pos + 2];\n return isWhitespace(afterS) || afterS === '*' || !isAsciiIdentifierChar(afterS);\n}\n\n/**\n * Extract a string literal from the input starting at pos.\n * Handles both ASCII quotes and Unicode quotes.\n *\n * Note: Single quotes that look like possessive markers ('s) are skipped.\n */\nexport function extractStringLiteral(input: string, startPos: number): string | null {\n if (startPos >= input.length) return null;\n\n const openQuote = input[startPos];\n if (!isQuote(openQuote)) return null;\n\n // Check for possessive marker - don't treat as string\n if (openQuote === \"'\" && isPossessiveMarker(input, startPos)) {\n return null;\n }\n\n // Map opening quotes to closing quotes\n const closeQuoteMap: Record<string, string> = {\n '\"': '\"',\n \"'\": \"'\",\n '`': '`',\n '「': '」',\n };\n\n const closeQuote = closeQuoteMap[openQuote];\n if (!closeQuote) return null;\n\n let pos = startPos + 1;\n let literal = openQuote;\n let escaped = false;\n\n while (pos < input.length) {\n const char = input[pos];\n literal += char;\n\n if (escaped) {\n escaped = false;\n } else if (char === '\\\\') {\n escaped = true;\n } else if (char === closeQuote) {\n // Found closing quote\n return literal;\n }\n pos++;\n }\n\n // Unclosed string - return what we have\n return literal;\n}\n\n// =============================================================================\n// URL Tokenization\n// =============================================================================\n\n/**\n * Check if the input at position starts a URL.\n * Detects: /path, ./path, ../path, //domain.com, http://, https://\n */\nexport function isUrlStart(input: string, pos: number): boolean {\n if (pos >= input.length) return false;\n\n const char = input[pos];\n const next = input[pos + 1] || '';\n const third = input[pos + 2] || '';\n\n // Absolute path: /something (but not just /)\n // Must be followed by alphanumeric or path char, not another / (that's protocol-relative)\n if (char === '/' && next !== '/' && /[a-zA-Z0-9._-]/.test(next)) {\n return true;\n }\n\n // Protocol-relative: //domain.com\n if (char === '/' && next === '/' && /[a-zA-Z]/.test(third)) {\n return true;\n }\n\n // Relative path: ./ or ../\n if (char === '.' && (next === '/' || (next === '.' && third === '/'))) {\n return true;\n }\n\n // Full URL: http:// or https://\n const slice = input.slice(pos, pos + 8).toLowerCase();\n if (slice.startsWith('http://') || slice.startsWith('https://')) {\n return true;\n }\n\n return false;\n}\n\n/**\n * Extract a URL from the input starting at pos.\n * Handles paths, query strings, and fragments.\n *\n * Fragment (#) handling:\n * - /page#section → includes fragment as part of URL\n * - #id alone → not a URL (CSS selector)\n */\nexport function extractUrl(input: string, startPos: number): string | null {\n if (!isUrlStart(input, startPos)) return null;\n\n let pos = startPos;\n let url = '';\n\n // Core URL characters (RFC 3986 unreserved + sub-delims + path/query chars)\n // Includes: letters, digits, and - . _ ~ : / ? # [ ] @ ! $ & ' ( ) * + , ; = %\n const urlChars = /[a-zA-Z0-9/:._\\-?&=%@+~!$'()*,;[\\]]/;\n\n while (pos < input.length) {\n const char = input[pos];\n\n // Special handling for #\n if (char === '#') {\n // Only include # if we have path content before it (it's a fragment)\n // If # appears at URL start or after certain chars, stop (might be CSS selector)\n if (url.length > 0 && /[a-zA-Z0-9/.]$/.test(url)) {\n // Include fragment\n url += char;\n pos++;\n // Consume fragment identifier (letters, digits, underscore, hyphen)\n while (pos < input.length && /[a-zA-Z0-9_-]/.test(input[pos])) {\n url += input[pos++];\n }\n }\n // Stop either way - fragment consumed or # is separate token\n break;\n }\n\n if (urlChars.test(char)) {\n url += char;\n pos++;\n } else {\n break;\n }\n }\n\n // Minimum length validation\n if (url.length < 2) return null;\n\n return url;\n}\n\n// =============================================================================\n// Number Tokenization\n// =============================================================================\n\n/**\n * Extract a number from the input starting at pos.\n * Handles integers and decimals.\n */\nexport function extractNumber(input: string, startPos: number): string | null {\n if (startPos >= input.length) return null;\n\n const char = input[startPos];\n if (!isDigit(char) && char !== '-' && char !== '+') return null;\n\n let pos = startPos;\n let number = '';\n\n // Optional sign\n if (input[pos] === '-' || input[pos] === '+') {\n number += input[pos++];\n }\n\n // Must have at least one digit\n if (pos >= input.length || !isDigit(input[pos])) {\n return null;\n }\n\n // Integer part\n while (pos < input.length && isDigit(input[pos])) {\n number += input[pos++];\n }\n\n // Optional decimal part\n if (pos < input.length && input[pos] === '.') {\n number += input[pos++];\n while (pos < input.length && isDigit(input[pos])) {\n number += input[pos++];\n }\n }\n\n // Optional duration suffix (s, ms, m, h)\n if (pos < input.length) {\n const suffix = input.slice(pos, pos + 2);\n if (suffix === 'ms') {\n number += 'ms';\n } else if (input[pos] === 's' || input[pos] === 'm' || input[pos] === 'h') {\n number += input[pos];\n }\n }\n\n return number;\n}\n\n// =============================================================================\n// Base Tokenizer Class\n// =============================================================================\n\n/**\n * Keyword entry for tokenizer - maps native word to normalized English form.\n */\nexport interface KeywordEntry {\n readonly native: string;\n readonly normalized: string;\n}\n\n/**\n * Profile interface for keyword derivation.\n * Matches the structure of LanguageProfile but only includes fields needed for tokenization.\n */\nexport interface TokenizerProfile {\n readonly keywords?: Record<\n string,\n { primary: string; alternatives?: string[]; normalized?: string }\n >;\n readonly references?: Record<string, string>;\n readonly roleMarkers?: Record<\n string,\n { primary: string; alternatives?: string[]; position?: string }\n >;\n}\n\n/**\n * Abstract base class for language-specific tokenizers.\n * Provides common functionality for CSS selectors, strings, and numbers.\n */\nexport abstract class BaseTokenizer implements LanguageTokenizer {\n abstract readonly language: string;\n abstract readonly direction: 'ltr' | 'rtl';\n\n /** Optional morphological normalizer for this language */\n protected normalizer?: MorphologicalNormalizer;\n\n /** Keywords derived from profile, sorted longest-first for greedy matching */\n protected profileKeywords: KeywordEntry[] = [];\n\n /** Map for O(1) keyword lookups by lowercase native word */\n protected profileKeywordMap: Map<string, KeywordEntry> = new Map();\n\n abstract tokenize(input: string): TokenStream;\n abstract classifyToken(token: string): TokenKind;\n\n /**\n * Initialize keyword mappings from a language profile.\n * Builds a list of native→english mappings from:\n * - profile.keywords (primary + alternatives)\n * - profile.references (me, it, you, etc.)\n * - profile.roleMarkers (into, from, with, etc.)\n *\n * Results are sorted longest-first for greedy matching (important for non-space languages).\n * Extras take precedence over profile entries when there are duplicates.\n *\n * @param profile - Language profile containing keyword translations\n * @param extras - Additional keyword entries to include (literals, positional, events)\n */\n protected initializeKeywordsFromProfile(\n profile: TokenizerProfile,\n extras: KeywordEntry[] = []\n ): void {\n // Use a Map to deduplicate, with extras taking precedence\n const keywordMap = new Map<string, KeywordEntry>();\n\n // Extract from keywords (command translations)\n if (profile.keywords) {\n for (const [normalized, translation] of Object.entries(profile.keywords)) {\n // Primary translation\n keywordMap.set(translation.primary, {\n native: translation.primary,\n normalized: translation.normalized || normalized,\n });\n\n // Alternative forms\n if (translation.alternatives) {\n for (const alt of translation.alternatives) {\n keywordMap.set(alt, {\n native: alt,\n normalized: translation.normalized || normalized,\n });\n }\n }\n }\n }\n\n // Extract from references (me, it, you, etc.)\n if (profile.references) {\n for (const [normalized, native] of Object.entries(profile.references)) {\n keywordMap.set(native, { native, normalized });\n }\n }\n\n // Extract from roleMarkers (into, from, with, etc.)\n if (profile.roleMarkers) {\n for (const [role, marker] of Object.entries(profile.roleMarkers)) {\n if (marker.primary) {\n keywordMap.set(marker.primary, { native: marker.primary, normalized: role });\n }\n if (marker.alternatives) {\n for (const alt of marker.alternatives) {\n keywordMap.set(alt, { native: alt, normalized: role });\n }\n }\n }\n }\n\n // Add extra entries (literals, positional, events) - these OVERRIDE profile entries\n for (const extra of extras) {\n keywordMap.set(extra.native, extra);\n }\n\n // Convert to array and sort longest-first for greedy matching\n this.profileKeywords = Array.from(keywordMap.values()).sort(\n (a, b) => b.native.length - a.native.length\n );\n\n // Build Map for O(1) lookups (case-insensitive + diacritic-insensitive)\n // This allows matching both 'بدّل' (with shadda) and 'بدل' (without) to the same entry\n this.profileKeywordMap = new Map();\n for (const keyword of this.profileKeywords) {\n // Add original form (with diacritics if present)\n this.profileKeywordMap.set(keyword.native.toLowerCase(), keyword);\n\n // Add diacritic-normalized form (for Arabic, Turkish, etc.)\n const normalized = this.removeDiacritics(keyword.native);\n if (normalized !== keyword.native && !this.profileKeywordMap.has(normalized.toLowerCase())) {\n this.profileKeywordMap.set(normalized.toLowerCase(), keyword);\n }\n }\n }\n\n /**\n * Remove diacritical marks from a word for normalization.\n * Primarily for Arabic (shadda, fatha, kasra, damma, sukun, etc.)\n * but could be extended for other languages.\n *\n * @param word - Word to normalize\n * @returns Word without diacritics\n */\n protected removeDiacritics(word: string): string {\n // Arabic diacritics: U+064B-U+0652 (fatha, kasra, damma, sukun, shadda, etc.)\n // U+0670 (superscript alif)\n return word.replace(/[\\u064B-\\u0652\\u0670]/g, '');\n }\n\n /**\n * Try to match a keyword from profile at the current position.\n * Uses longest-first greedy matching (important for non-space languages).\n *\n * @param input - Input string\n * @param pos - Current position\n * @returns Token if matched, null otherwise\n */\n protected tryProfileKeyword(input: string, pos: number): LanguageToken | null {\n for (const entry of this.profileKeywords) {\n if (input.slice(pos).startsWith(entry.native)) {\n return createToken(\n entry.native,\n 'keyword',\n createPosition(pos, pos + entry.native.length),\n entry.normalized\n );\n }\n }\n return null;\n }\n\n /**\n * Check if the remaining input starts with any known keyword.\n * Useful for non-space languages to detect word boundaries.\n *\n * @param input - Input string\n * @param pos - Current position\n * @returns true if a keyword starts at this position\n */\n protected isKeywordStart(input: string, pos: number): boolean {\n const remaining = input.slice(pos);\n return this.profileKeywords.some(entry => remaining.startsWith(entry.native));\n }\n\n /**\n * Look up a keyword by native word (case-insensitive).\n * O(1) lookup using the keyword map.\n *\n * @param native - Native word to look up\n * @returns KeywordEntry if found, undefined otherwise\n */\n protected lookupKeyword(native: string): KeywordEntry | undefined {\n return this.profileKeywordMap.get(native.toLowerCase());\n }\n\n /**\n * Check if a word is a known keyword (case-insensitive).\n * O(1) lookup using the keyword map.\n *\n * @param native - Native word to check\n * @returns true if the word is a keyword\n */\n protected isKeyword(native: string): boolean {\n return this.profileKeywordMap.has(native.toLowerCase());\n }\n\n /**\n * Set the morphological normalizer for this tokenizer.\n */\n setNormalizer(normalizer: MorphologicalNormalizer): void {\n this.normalizer = normalizer;\n }\n\n /**\n * Try to normalize a word using the morphological normalizer.\n * Returns null if no normalizer is set or normalization fails.\n *\n * Note: We don't check isNormalizable() here because the individual tokenizers\n * historically called normalize() directly without that check. The normalize()\n * method itself handles returning noChange() for words that can't be normalized.\n */\n protected tryNormalize(word: string): NormalizationResult | null {\n if (!this.normalizer) return null;\n\n const result = this.normalizer.normalize(word);\n\n // Only return if actually normalized (stem differs from input)\n if (result.stem !== word && result.confidence >= 0.7) {\n return result;\n }\n\n return null;\n }\n\n /**\n * Try morphological normalization and keyword lookup.\n *\n * If the word can be normalized to a stem that matches a known keyword,\n * returns a keyword token with morphological metadata (stem, stemConfidence).\n *\n * This is the common pattern for handling conjugated verbs across languages:\n * 1. Normalize the word (e.g., \"toggled\" → \"toggle\")\n * 2. Look up the stem in the keyword map\n * 3. Create a token with both the original form and stem metadata\n *\n * @param word - The word to normalize and look up\n * @param startPos - Start position for the token\n * @param endPos - End position for the token\n * @returns Token if stem matches a keyword, null otherwise\n */\n protected tryMorphKeywordMatch(\n word: string,\n startPos: number,\n endPos: number\n ): LanguageToken | null {\n const result = this.tryNormalize(word);\n if (!result) return null;\n\n // Check if the stem is a known keyword\n const stemEntry = this.lookupKeyword(result.stem);\n if (!stemEntry) return null;\n\n const tokenOptions: CreateTokenOptions = {\n normalized: stemEntry.normalized,\n stem: result.stem,\n stemConfidence: result.confidence,\n };\n return createToken(word, 'keyword', createPosition(startPos, endPos), tokenOptions);\n }\n\n /**\n * Try to extract a CSS selector at the current position.\n */\n protected trySelector(input: string, pos: number): LanguageToken | null {\n const selector = extractCssSelector(input, pos);\n if (selector) {\n return createToken(selector, 'selector', createPosition(pos, pos + selector.length));\n }\n return null;\n }\n\n /**\n * Try to extract an event modifier at the current position.\n * Event modifiers are .once, .debounce(N), .throttle(N), .queue(strategy)\n */\n protected tryEventModifier(input: string, pos: number): LanguageToken | null {\n // Must start with a dot\n if (input[pos] !== '.') {\n return null;\n }\n\n // Match pattern: .(once|debounce|throttle|queue) followed by optional (value)\n const match = input\n .slice(pos)\n .match(/^\\.(?:once|debounce|throttle|queue)(?:\\(([^)]+)\\))?(?:\\s|$|\\.)/);\n if (!match) {\n return null;\n }\n\n const fullMatch = match[0].replace(/(\\s|\\.)$/, ''); // Remove trailing space or dot\n const modifierName = fullMatch.slice(1).split('(')[0]; // Extract modifier name\n const value = match[1]; // Extract value from parentheses if present\n\n // Create token with metadata\n const token = createToken(\n fullMatch,\n 'event-modifier',\n createPosition(pos, pos + fullMatch.length)\n );\n\n // Add metadata for the modifier\n return {\n ...token,\n metadata: {\n modifierName,\n value: value ? (modifierName === 'queue' ? value : parseInt(value, 10)) : undefined,\n },\n };\n }\n\n /**\n * Try to extract a string literal at the current position.\n */\n protected tryString(input: string, pos: number): LanguageToken | null {\n const literal = extractStringLiteral(input, pos);\n if (literal) {\n return createToken(literal, 'literal', createPosition(pos, pos + literal.length));\n }\n return null;\n }\n\n /**\n * Try to extract a number at the current position.\n */\n protected tryNumber(input: string, pos: number): LanguageToken | null {\n const number = extractNumber(input, pos);\n if (number) {\n return createToken(number, 'literal', createPosition(pos, pos + number.length));\n }\n return null;\n }\n\n /**\n * Configuration for native language time units.\n * Maps patterns to their standard suffix (ms, s, m, h).\n */\n protected static readonly STANDARD_TIME_UNITS: readonly TimeUnitMapping[] = [\n { pattern: 'ms', suffix: 'ms', length: 2 },\n { pattern: 's', suffix: 's', length: 1, checkBoundary: true },\n { pattern: 'm', suffix: 'm', length: 1, checkBoundary: true, notFollowedBy: 's' },\n { pattern: 'h', suffix: 'h', length: 1, checkBoundary: true },\n ];\n\n /**\n * Try to match a time unit from a list of patterns.\n *\n * @param input - Input string\n * @param pos - Position after the number\n * @param timeUnits - Array of time unit mappings (native pattern → standard suffix)\n * @param skipWhitespace - Whether to skip whitespace before time unit (default: false)\n * @returns Object with matched suffix and new position, or null if no match\n */\n protected tryMatchTimeUnit(\n input: string,\n pos: number,\n timeUnits: readonly TimeUnitMapping[],\n skipWhitespace = false\n ): { suffix: string; endPos: number } | null {\n let unitPos = pos;\n\n // Optionally skip whitespace before time unit\n if (skipWhitespace) {\n while (unitPos < input.length && isWhitespace(input[unitPos])) {\n unitPos++;\n }\n }\n\n const remaining = input.slice(unitPos);\n\n // Check each time unit pattern\n for (const unit of timeUnits) {\n const candidate = remaining.slice(0, unit.length);\n const matches = unit.caseInsensitive\n ? candidate.toLowerCase() === unit.pattern.toLowerCase()\n : candidate === unit.pattern;\n\n if (matches) {\n // Check notFollowedBy constraint (e.g., 'm' should not match 'ms')\n if (unit.notFollowedBy) {\n const nextChar = remaining[unit.length] || '';\n if (nextChar === unit.notFollowedBy) continue;\n }\n\n // Check word boundary if required\n if (unit.checkBoundary) {\n const nextChar = remaining[unit.length] || '';\n if (isAsciiIdentifierChar(nextChar)) continue;\n }\n\n return { suffix: unit.suffix, endPos: unitPos + unit.length };\n }\n }\n\n return null;\n }\n\n /**\n * Parse a base number (sign, integer, decimal) without time units.\n * Returns the number string and end position.\n *\n * @param input - Input string\n * @param startPos - Start position\n * @param allowSign - Whether to allow +/- sign (default: true)\n * @returns Object with number string and end position, or null\n */\n protected parseBaseNumber(\n input: string,\n startPos: number,\n allowSign = true\n ): { number: string; endPos: number } | null {\n let pos = startPos;\n let number = '';\n\n // Optional sign\n if (allowSign && (input[pos] === '-' || input[pos] === '+')) {\n number += input[pos++];\n }\n\n // Must have at least one digit\n if (pos >= input.length || !isDigit(input[pos])) {\n return null;\n }\n\n // Integer part\n while (pos < input.length && isDigit(input[pos])) {\n number += input[pos++];\n }\n\n // Optional decimal\n if (pos < input.length && input[pos] === '.') {\n number += input[pos++];\n while (pos < input.length && isDigit(input[pos])) {\n number += input[pos++];\n }\n }\n\n if (!number || number === '-' || number === '+') return null;\n\n return { number, endPos: pos };\n }\n\n /**\n * Try to extract a number with native language time units.\n *\n * This is a template method that handles the common pattern:\n * 1. Parse the base number (sign, integer, decimal)\n * 2. Try to match native language time units\n * 3. Fall back to standard time units (ms, s, m, h)\n *\n * @param input - Input string\n * @param pos - Start position\n * @param nativeTimeUnits - Language-specific time unit mappings\n * @param options - Configuration options\n * @returns Token if number found, null otherwise\n */\n protected tryNumberWithTimeUnits(\n input: string,\n pos: number,\n nativeTimeUnits: readonly TimeUnitMapping[],\n options: { allowSign?: boolean; skipWhitespace?: boolean } = {}\n ): LanguageToken | null {\n const { allowSign = true, skipWhitespace = false } = options;\n\n // Parse base number\n const baseResult = this.parseBaseNumber(input, pos, allowSign);\n if (!baseResult) return null;\n\n let { number, endPos } = baseResult;\n\n // Try native time units first, then standard\n const allUnits = [...nativeTimeUnits, ...BaseTokenizer.STANDARD_TIME_UNITS];\n const timeMatch = this.tryMatchTimeUnit(input, endPos, allUnits, skipWhitespace);\n\n if (timeMatch) {\n number += timeMatch.suffix;\n endPos = timeMatch.endPos;\n }\n\n return createToken(number, 'literal', createPosition(pos, endPos));\n }\n\n /**\n * Try to extract a URL at the current position.\n * Handles /path, ./path, ../path, //domain.com, http://, https://\n */\n protected tryUrl(input: string, pos: number): LanguageToken | null {\n const url = extractUrl(input, pos);\n if (url) {\n return createToken(url, 'url', createPosition(pos, pos + url.length));\n }\n return null;\n }\n\n /**\n * Try to extract a variable reference (:varname) at the current position.\n * In hyperscript, :x refers to a local variable named x.\n */\n protected tryVariableRef(input: string, pos: number): LanguageToken | null {\n if (input[pos] !== ':') return null;\n if (pos + 1 >= input.length) return null;\n if (!isAsciiIdentifierChar(input[pos + 1])) return null;\n\n let endPos = pos + 1;\n while (endPos < input.length && isAsciiIdentifierChar(input[endPos])) {\n endPos++;\n }\n\n const varRef = input.slice(pos, endPos);\n return createToken(varRef, 'identifier', createPosition(pos, endPos));\n }\n\n /**\n * Try to extract an operator or punctuation token at the current position.\n * Handles two-character operators (==, !=, etc.) and single-character operators.\n */\n protected tryOperator(input: string, pos: number): LanguageToken | null {\n // Two-character operators\n const twoChar = input.slice(pos, pos + 2);\n if (['==', '!=', '<=', '>=', '&&', '||', '->'].includes(twoChar)) {\n return createToken(twoChar, 'operator', createPosition(pos, pos + 2));\n }\n\n // Single-character operators\n const oneChar = input[pos];\n if (['<', '>', '!', '+', '-', '*', '/', '='].includes(oneChar)) {\n return createToken(oneChar, 'operator', createPosition(pos, pos + 1));\n }\n\n // Punctuation\n if (['(', ')', '{', '}', ',', ';', ':'].includes(oneChar)) {\n return createToken(oneChar, 'punctuation', createPosition(pos, pos + 1));\n }\n\n return null;\n }\n\n /**\n * Try to match a multi-character particle from a list.\n *\n * Used by languages like Japanese, Korean, and Chinese that have\n * multi-character particles (e.g., Japanese から, まで, より).\n *\n * @param input - Input string\n * @param pos - Current position\n * @param particles - Array of multi-character particles to match\n * @returns Token if matched, null otherwise\n */\n protected tryMultiCharParticle(\n input: string,\n pos: number,\n particles: readonly string[]\n ): LanguageToken | null {\n for (const particle of particles) {\n if (input.slice(pos, pos + particle.length) === particle) {\n return createToken(particle, 'particle', createPosition(pos, pos + particle.length));\n }\n }\n return null;\n }\n}\n","/**\n * Morphological Normalizer Types\n *\n * Defines interfaces for language-specific morphological analysis.\n * Normalizers reduce conjugated/inflected forms to canonical stems\n * that can be matched against keyword dictionaries.\n */\n\n/**\n * Result of morphological normalization.\n */\nexport interface NormalizationResult {\n /** The extracted stem/root form */\n readonly stem: string;\n\n /** Confidence in the normalization (0.0-1.0) */\n readonly confidence: number;\n\n /** Optional metadata about the transformation */\n readonly metadata?: NormalizationMetadata;\n}\n\n/**\n * Metadata about morphological transformations applied.\n */\nexport interface NormalizationMetadata {\n /** Prefixes that were removed */\n readonly removedPrefixes?: readonly string[];\n\n /** Suffixes that were removed */\n readonly removedSuffixes?: readonly string[];\n\n /** Type of conjugation detected */\n readonly conjugationType?: ConjugationType;\n\n /** Original form classification */\n readonly originalForm?: string;\n\n /** Applied transformation rules (for debugging) */\n readonly appliedRules?: readonly string[];\n}\n\n/**\n * Types of verb conjugation/inflection.\n */\nexport type ConjugationType =\n // Tense\n | 'present'\n | 'past'\n | 'future'\n | 'progressive'\n | 'perfect'\n // Mood\n | 'imperative'\n | 'subjunctive'\n | 'conditional'\n // Voice\n | 'passive'\n | 'causative'\n // Politeness (Japanese/Korean)\n | 'polite'\n | 'humble'\n | 'honorific'\n // Form\n | 'negative'\n | 'potential'\n | 'volitional'\n // Japanese conditional forms\n | 'conditional-tara' // たら/したら - if/when (completed action)\n | 'conditional-to' // と/すると - when (habitual/expected)\n | 'conditional-ba' // ば/すれば - if (hypothetical)\n // Korean-specific\n | 'connective' // 하고, 해서 etc.\n | 'conditional-myeon' // -(으)면 - if/when (general conditional)\n | 'temporal-ttae' // -(으)ㄹ 때 - when (at the time of)\n | 'causal-nikka' // -(으)니까 - because/since\n // Korean honorific forms (-시- infix)\n | 'honorific-conditional' // -하시면 - if (honorific)\n | 'honorific-temporal' // -하실 때 - when (honorific)\n | 'honorific-causal' // -하시니까 - because (honorific)\n | 'honorific-past' // -하셨어요 - past (honorific)\n | 'honorific-polite' // -하십니다 - polite (honorific)\n // Korean sequential forms\n | 'sequential-after' // -고 나서 - after doing\n | 'sequential-before' // -기 전에 - before doing\n | 'immediate' // -자마자 - as soon as\n | 'obligation' // -아야/어야 해 - must do, should do\n // Spanish-specific\n | 'reflexive'\n | 'reflexive-imperative'\n | 'gerund'\n | 'participle'\n // Arabic-specific\n | 'conditional-idha' // إذا - if/when (hypothetical)\n | 'temporal-indama' // عندما - when (temporal conjunction)\n | 'temporal-hina' // حين - at the time of\n | 'temporal-lamma' // لمّا - when (past emphasis)\n | 'past-verb' // فعل ماضي - past tense verb\n // Turkish-specific\n | 'conditional-se' // -se/-sa - if (hypothetical)\n | 'temporal-ince' // -ince/-ınca/-unca/-ünce - when/as\n | 'temporal-dikce' // -dikçe/-dıkça/-dukça/-dükçe - as/while\n | 'aorist' // -ir/-ar - habitual/general\n | 'optative' // -eyim/-ayım/-elim/-alım - let me/us\n | 'necessitative' // -meli/-malı - must/should\n // Japanese request/contracted forms\n | 'request' // てください/でください - polite request\n | 'casual-request' // てくれ/でくれ - casual request\n | 'contracted' // ちゃう/じゃう - contracted completion (てしまう)\n | 'contracted-past' // ちゃった/じゃった - contracted past completion\n // Compound\n | 'compound' // Multi-layer suffixes (ていなかった, 하고나서였어)\n | 'te-form' // Japanese て-form\n | 'dictionary'; // Base/infinitive form\n\n/**\n * Interface for language-specific morphological normalizers.\n *\n * Normalizers attempt to reduce inflected word forms to their\n * canonical stems. This enables matching conjugated verbs against\n * keyword dictionaries that only contain base forms.\n *\n * Example (Japanese):\n * 切り替えた (past) → { stem: '切り替え', confidence: 0.85 }\n * 切り替えます (polite) → { stem: '切り替え', confidence: 0.85 }\n *\n * Example (Spanish):\n * mostrarse (reflexive infinitive) → { stem: 'mostrar', confidence: 0.85 }\n * alternando (gerund) → { stem: 'alternar', confidence: 0.85 }\n */\nexport interface MorphologicalNormalizer {\n /** Language code this normalizer handles */\n readonly language: string;\n\n /**\n * Normalize a word to its canonical stem form.\n *\n * @param word - The word to normalize\n * @returns Normalization result with stem and confidence\n */\n normalize(word: string): NormalizationResult;\n\n /**\n * Check if a word appears to be a verb form that can be normalized.\n * Optional optimization to skip normalization for non-verb tokens.\n *\n * @param word - The word to check\n * @returns true if the word might be a normalizable verb form\n */\n isNormalizable?(word: string): boolean;\n}\n\n/**\n * Configuration for suffix-based normalization rules.\n * Used by agglutinative languages (Japanese, Korean, Turkish).\n */\nexport interface SuffixRule {\n /** The suffix pattern to match */\n readonly pattern: string;\n\n /** Confidence when this suffix is stripped */\n readonly confidence: number;\n\n /** What to replace the suffix with (empty string for simple removal) */\n readonly replacement?: string;\n\n /** Conjugation type this suffix indicates */\n readonly conjugationType?: ConjugationType;\n\n /** Minimum stem length after stripping (to avoid over-stripping) */\n readonly minStemLength?: number;\n}\n\n/**\n * Configuration for prefix-based normalization rules.\n * Used primarily by Arabic for article/conjunction prefixes.\n */\nexport interface PrefixRule {\n /** The prefix pattern to match */\n readonly pattern: string;\n\n /** Confidence penalty when this prefix is stripped */\n readonly confidencePenalty: number;\n\n /** What the prefix indicates (for metadata) */\n readonly prefixType?: 'article' | 'conjunction' | 'preposition' | 'verb-marker';\n\n /** Minimum remaining characters after stripping (to avoid over-stripping) */\n readonly minRemaining?: number;\n}\n\n/**\n * Helper to create a \"no change\" normalization result.\n */\nexport function noChange(word: string): NormalizationResult {\n return { stem: word, confidence: 1.0 };\n}\n\n/**\n * Helper to create a normalization result with metadata.\n */\nexport function normalized(\n stem: string,\n confidence: number,\n metadata?: NormalizationMetadata\n): NormalizationResult {\n if (metadata) {\n return { stem, confidence, metadata };\n }\n return { stem, confidence };\n}\n","/**\n * Japanese Morphological Normalizer\n *\n * Reduces Japanese verb conjugations to their stem forms.\n * Japanese verbs conjugate by modifying their endings:\n *\n * Base: 切り替え (kiri-kae) - \"toggle\"\n * て-form: 切り替えて (kiri-kaete) - \"toggle and...\"\n * た-form: 切り替えた (kiri-kaeta) - \"toggled\" (past)\n * ます-form: 切り替えます (kiri-kaemasu) - polite present\n * ている: 切り替えている (kiri-kaeteiru) - \"is toggling\" (progressive)\n * ない: 切り替えない (kiri-kaenai) - \"don't toggle\" (negative)\n *\n * This normalizer strips these suffixes to find the stem,\n * which can then be matched against keyword dictionaries.\n */\n\nimport type {\n MorphologicalNormalizer,\n NormalizationResult,\n SuffixRule,\n ConjugationType,\n} from './types';\nimport { noChange, normalized } from './types';\n\n/**\n * Suffix rules for Japanese verb conjugation.\n * Ordered by length (longest first) to ensure greedy matching.\n */\nconst JAPANESE_SUFFIX_RULES: readonly SuffixRule[] = [\n // Conditional forms - very common for event handlers (longest first)\n // したら/すると/すれば are する verb conditionals\n { pattern: 'したら', confidence: 0.88, conjugationType: 'conditional-tara', minStemLength: 2 },\n { pattern: 'すると', confidence: 0.88, conjugationType: 'conditional-to', minStemLength: 2 },\n { pattern: 'すれば', confidence: 0.85, conjugationType: 'conditional-ba', minStemLength: 2 },\n // たら/れば are regular verb conditionals\n { pattern: 'たら', confidence: 0.85, conjugationType: 'conditional-tara', minStemLength: 2 },\n { pattern: 'れば', confidence: 0.82, conjugationType: 'conditional-ba', minStemLength: 2 },\n\n // Compound forms (longest first)\n { pattern: 'ていました', confidence: 0.82, conjugationType: 'past', minStemLength: 2 },\n { pattern: 'ています', confidence: 0.85, conjugationType: 'progressive', minStemLength: 2 },\n { pattern: 'てください', confidence: 0.85, conjugationType: 'request', minStemLength: 2 },\n { pattern: 'でください', confidence: 0.85, conjugationType: 'request', minStemLength: 2 },\n { pattern: 'ている', confidence: 0.85, conjugationType: 'progressive', minStemLength: 2 },\n { pattern: 'ておく', confidence: 0.82, conjugationType: 'progressive', minStemLength: 2 },\n { pattern: 'てみる', confidence: 0.82, conjugationType: 'progressive', minStemLength: 2 },\n { pattern: 'てある', confidence: 0.82, conjugationType: 'progressive', minStemLength: 2 },\n\n // Casual request forms\n { pattern: 'てくれ', confidence: 0.8, conjugationType: 'casual-request', minStemLength: 2 },\n { pattern: 'でくれ', confidence: 0.8, conjugationType: 'casual-request', minStemLength: 2 },\n\n // Contracted/colloquial forms (ちゃう/じゃう = てしまう/でしまう)\n { pattern: 'ちゃった', confidence: 0.82, conjugationType: 'contracted-past', minStemLength: 2 },\n { pattern: 'じゃった', confidence: 0.82, conjugationType: 'contracted-past', minStemLength: 2 },\n { pattern: 'ちゃう', confidence: 0.82, conjugationType: 'contracted', minStemLength: 2 },\n { pattern: 'じゃう', confidence: 0.82, conjugationType: 'contracted', minStemLength: 2 },\n\n // Polite forms\n { pattern: 'ました', confidence: 0.85, conjugationType: 'past', minStemLength: 2 },\n { pattern: 'ません', confidence: 0.85, conjugationType: 'negative', minStemLength: 2 },\n { pattern: 'ます', confidence: 0.85, conjugationType: 'polite', minStemLength: 2 },\n\n // て/た forms (very common)\n { pattern: 'て', confidence: 0.85, conjugationType: 'te-form', minStemLength: 2 },\n { pattern: 'た', confidence: 0.85, conjugationType: 'past', minStemLength: 2 },\n\n // Negative forms\n { pattern: 'ない', confidence: 0.82, conjugationType: 'negative', minStemLength: 2 },\n { pattern: 'なかった', confidence: 0.82, conjugationType: 'past', minStemLength: 2 },\n\n // Potential forms\n { pattern: 'られる', confidence: 0.8, conjugationType: 'potential', minStemLength: 2 },\n { pattern: 'れる', confidence: 0.78, conjugationType: 'potential', minStemLength: 2 },\n\n // Passive forms\n { pattern: 'られた', confidence: 0.8, conjugationType: 'passive', minStemLength: 2 },\n\n // Causative forms\n { pattern: 'させる', confidence: 0.8, conjugationType: 'causative', minStemLength: 2 },\n { pattern: 'せる', confidence: 0.78, conjugationType: 'causative', minStemLength: 2 },\n\n // Volitional forms\n { pattern: 'よう', confidence: 0.8, conjugationType: 'volitional', minStemLength: 2 },\n\n // Dictionary form ending (る-verbs) - lower confidence due to ambiguity\n { pattern: 'る', confidence: 0.75, conjugationType: 'dictionary', minStemLength: 3 },\n];\n\n/**\n * Special する verb patterns.\n * する verbs are formed by noun + する, very common in Japanese.\n * Order by length (longest first) for greedy matching.\n */\nconst SURU_PATTERNS: readonly {\n pattern: string;\n confidence: number;\n conjugationType: ConjugationType;\n}[] = [\n // Conditional forms (most important for native idioms)\n { pattern: 'したら', confidence: 0.88, conjugationType: 'conditional-tara' },\n { pattern: 'すると', confidence: 0.88, conjugationType: 'conditional-to' },\n { pattern: 'すれば', confidence: 0.85, conjugationType: 'conditional-ba' },\n // Progressive forms\n { pattern: 'しています', confidence: 0.85, conjugationType: 'progressive' },\n { pattern: 'している', confidence: 0.85, conjugationType: 'progressive' },\n // Other forms\n { pattern: 'しました', confidence: 0.85, conjugationType: 'past' },\n { pattern: 'します', confidence: 0.85, conjugationType: 'polite' },\n { pattern: 'しない', confidence: 0.82, conjugationType: 'negative' },\n { pattern: 'して', confidence: 0.85, conjugationType: 'te-form' },\n { pattern: 'した', confidence: 0.85, conjugationType: 'past' },\n { pattern: 'する', confidence: 0.88, conjugationType: 'dictionary' },\n];\n\n/**\n * Check if a character is hiragana.\n */\nfunction isHiragana(char: string): boolean {\n const code = char.charCodeAt(0);\n return code >= 0x3040 && code <= 0x309f;\n}\n\n/**\n * Check if a character is katakana.\n */\nfunction isKatakana(char: string): boolean {\n const code = char.charCodeAt(0);\n return code >= 0x30a0 && code <= 0x30ff;\n}\n\n/**\n * Check if a character is kanji.\n */\nfunction isKanji(char: string): boolean {\n const code = char.charCodeAt(0);\n return (code >= 0x4e00 && code <= 0x9fff) || (code >= 0x3400 && code <= 0x4dbf);\n}\n\n/**\n * Check if a word contains Japanese characters.\n */\nfunction containsJapanese(word: string): boolean {\n for (const char of word) {\n if (isHiragana(char) || isKatakana(char) || isKanji(char)) {\n return true;\n }\n }\n return false;\n}\n\n/**\n * Japanese morphological normalizer.\n */\nexport class JapaneseMorphologicalNormalizer implements MorphologicalNormalizer {\n readonly language = 'ja';\n\n /**\n * Check if a word might be a Japanese verb that can be normalized.\n */\n isNormalizable(word: string): boolean {\n // Must contain Japanese characters\n if (!containsJapanese(word)) return false;\n\n // Must be at least 2 characters\n if (word.length < 2) return false;\n\n // Check if it ends with a hiragana character (verbs typically do)\n const lastChar = word[word.length - 1];\n return isHiragana(lastChar);\n }\n\n /**\n * Normalize a Japanese word to its stem form.\n */\n normalize(word: string): NormalizationResult {\n // Check for compound conjugations first (multi-layer suffixes)\n const compoundResult = this.normalizeCompound(word);\n if (compoundResult) return compoundResult;\n\n // Check for する verb patterns (most common compound verbs)\n const suruResult = this.trySuruNormalization(word);\n if (suruResult) return suruResult;\n\n // Try suffix rules\n for (const rule of JAPANESE_SUFFIX_RULES) {\n if (word.endsWith(rule.pattern)) {\n const stem = word.slice(0, -rule.pattern.length);\n\n // Validate stem length\n const minLength = rule.minStemLength ?? 2;\n if (stem.length < minLength) continue;\n\n // Return normalized result\n const metadata: {\n removedSuffixes: string[];\n conjugationType?: typeof rule.conjugationType;\n } = {\n removedSuffixes: [rule.pattern],\n };\n if (rule.conjugationType) {\n metadata.conjugationType = rule.conjugationType;\n }\n return normalized(stem, rule.confidence, metadata);\n }\n }\n\n // No normalization needed\n return noChange(word);\n }\n\n /**\n * Try to normalize a する verb.\n */\n private trySuruNormalization(word: string): NormalizationResult | null {\n for (const pattern of SURU_PATTERNS) {\n if (word.endsWith(pattern.pattern)) {\n const stem = word.slice(0, -pattern.pattern.length);\n\n // する verbs need at least one character for the noun part\n if (stem.length < 1) continue;\n\n // Return the noun part (without する)\n return normalized(stem, pattern.confidence, {\n removedSuffixes: [pattern.pattern],\n conjugationType: pattern.conjugationType,\n originalForm: 'suru-verb',\n });\n }\n }\n return null;\n }\n\n /**\n * Normalize compound conjugations (multi-layer suffixes).\n * These are combinations like ていなかった (was not doing), でいない (is not doing).\n * Handles cases that single-suffix rules miss.\n */\n private normalizeCompound(word: string): NormalizationResult | null {\n // Compound patterns with negative progressive forms\n const compoundPatterns: readonly {\n pattern: string;\n suffixes: string[];\n confidence: number;\n minStemLength: number;\n }[] = [\n // Progressive negative past forms\n {\n pattern: 'ていなかった',\n suffixes: ['て', 'い', 'なかった'],\n confidence: 0.8,\n minStemLength: 2,\n },\n {\n pattern: 'でいなかった',\n suffixes: ['で', 'い', 'なかった'],\n confidence: 0.8,\n minStemLength: 2,\n },\n // Progressive negative forms\n { pattern: 'ていない', suffixes: ['て', 'い', 'ない'], confidence: 0.82, minStemLength: 2 },\n { pattern: 'でいない', suffixes: ['で', 'い', 'ない'], confidence: 0.82, minStemLength: 2 },\n // Progressive past forms\n { pattern: 'ていた', suffixes: ['て', 'い', 'た'], confidence: 0.85, minStemLength: 2 },\n { pattern: 'でいた', suffixes: ['で', 'い', 'た'], confidence: 0.85, minStemLength: 2 },\n ];\n\n for (const { pattern, suffixes, confidence, minStemLength } of compoundPatterns) {\n if (word.endsWith(pattern)) {\n const stem = word.slice(0, -pattern.length);\n\n // Validate minimum stem length\n if (stem.length < minStemLength) continue;\n\n return normalized(stem, confidence, {\n removedSuffixes: suffixes,\n conjugationType: 'compound',\n });\n }\n }\n\n return null;\n }\n}\n\n// Export singleton instance\nexport const japaneseMorphologicalNormalizer = new JapaneseMorphologicalNormalizer();\n","/**\n * Japanese Language Profile\n *\n * SOV word order, particles (を, に, で, etc.), no spaces between words.\n * Agglutinative language with rich verb conjugation.\n */\n\nimport type { LanguageProfile } from './types';\n\nexport const japaneseProfile: LanguageProfile = {\n code: 'ja',\n name: 'Japanese',\n nativeName: '日本語',\n direction: 'ltr',\n wordOrder: 'SOV',\n markingStrategy: 'particle',\n usesSpaces: false,\n // Japanese uses verb stem/masu-stem form, no clear infinitive/imperative distinction\n // for UI commands. Uses katakana loanwords (トグル) or native stems (切り替え)\n defaultVerbForm: 'base',\n verb: {\n position: 'end',\n suffixes: ['る', 'て', 'た', 'ます', 'ない'],\n subjectDrop: true,\n },\n references: {\n me: '自分', // \"self\" - in hyperscript context, refers to current element\n it: 'それ', // \"it\"\n you: 'あなた', // \"you\"\n result: '結果',\n event: 'イベント',\n target: 'ターゲット',\n body: 'ボディ',\n },\n possessive: {\n marker: 'の',\n markerPosition: 'between',\n // In Japanese: 自分の value (jibun no value) = \"my value\"\n keywords: {\n 私の: 'me', // watashi no (my)\n あなたの: 'you', // anata no (your)\n その: 'it', // sono (its)\n },\n },\n roleMarkers: {\n patient: { primary: 'を', position: 'after' },\n destination: { primary: 'に', alternatives: ['へ', 'で'], position: 'after' },\n source: { primary: 'から', position: 'after' },\n style: { primary: 'で', position: 'after' },\n event: { primary: 'を', position: 'after' }, // Event as object marker\n // Possession marker for \"X's Y\" patterns\n // Note: の is used between target and patient: #button の .active\n },\n keywords: {\n // Class/Attribute operations\n toggle: {\n primary: '切り替え',\n alternatives: ['切り替える', 'トグル', 'トグルする'],\n normalized: 'toggle',\n },\n add: { primary: '追加', alternatives: ['追加する', '加える'], normalized: 'add' },\n remove: { primary: '削除', alternatives: ['削除する', '取り除く'], normalized: 'remove' },\n // Content operations\n put: { primary: '置く', alternatives: ['入れる', 'セット'], normalized: 'put' },\n append: { primary: '末尾追加', alternatives: ['末尾に追加', 'アペンド'], normalized: 'append' },\n prepend: {\n primary: '先頭追加',\n alternatives: ['先頭に追加', 'プリペンド'],\n normalized: 'prepend',\n },\n take: { primary: '取る', alternatives: ['取得'], normalized: 'take' },\n make: { primary: '作る', alternatives: ['作成'], normalized: 'make' },\n clone: { primary: '複製', alternatives: ['クローン'], normalized: 'clone' },\n swap: { primary: '交換', alternatives: ['スワップ', '入れ替え'], normalized: 'swap' },\n morph: { primary: '変形', alternatives: ['モーフ', '変換'], normalized: 'morph' },\n // Variable operations\n set: { primary: '設定', alternatives: ['設定する', 'セット'], normalized: 'set' },\n get: { primary: '取得', alternatives: ['取得する', 'ゲット'], normalized: 'get' },\n increment: {\n primary: '増加',\n alternatives: ['増やす', 'インクリメント'],\n normalized: 'increment',\n },\n decrement: {\n primary: '減少',\n alternatives: ['減らす', 'デクリメント'],\n normalized: 'decrement',\n },\n log: { primary: '記録', alternatives: ['ログ', '出力'], normalized: 'log' },\n // Visibility\n show: { primary: '表示', alternatives: ['表示する', '見せる'], normalized: 'show' },\n hide: { primary: '隠す', alternatives: ['非表示', '非表示にする'], normalized: 'hide' },\n transition: {\n primary: '遷移',\n alternatives: ['トランジション', 'アニメーション'],\n normalized: 'transition',\n },\n // Events\n on: { primary: 'で', alternatives: ['時', 'とき'], normalized: 'on' },\n trigger: { primary: '引き金', alternatives: ['発火', 'トリガー'], normalized: 'trigger' },\n send: { primary: '送る', alternatives: ['送信'], normalized: 'send' },\n // DOM focus\n focus: { primary: 'フォーカス', alternatives: ['集中'], normalized: 'focus' },\n blur: { primary: 'ぼかし', alternatives: ['フォーカス解除'], normalized: 'blur' },\n // Navigation\n go: { primary: '移動', alternatives: ['行く', 'ナビゲート'], normalized: 'go' },\n // Async\n wait: { primary: '待つ', alternatives: ['待機'], normalized: 'wait' },\n fetch: { primary: '取得', alternatives: ['フェッチ'], normalized: 'fetch' },\n settle: { primary: '安定', alternatives: ['落ち着く'], normalized: 'settle' },\n // Control flow\n if: { primary: 'もし', alternatives: ['条件'], normalized: 'if' },\n when: { primary: 'とき', normalized: 'when' },\n where: { primary: 'どこ', normalized: 'where' },\n else: { primary: 'そうでなければ', alternatives: ['それ以外'], normalized: 'else' },\n repeat: { primary: '繰り返し', alternatives: ['繰り返す', 'リピート'], normalized: 'repeat' },\n for: { primary: 'ために', alternatives: ['各'], normalized: 'for' },\n while: { primary: 'の間', alternatives: ['間'], normalized: 'while' },\n continue: { primary: '続ける', alternatives: ['継続'], normalized: 'continue' },\n halt: { primary: '停止', alternatives: ['止める', 'ハルト'], normalized: 'halt' },\n throw: { primary: '投げる', alternatives: ['スロー'], normalized: 'throw' },\n call: { primary: '呼び出し', alternatives: ['コール', '呼ぶ'], normalized: 'call' },\n return: { primary: '戻る', alternatives: ['返す', 'リターン'], normalized: 'return' },\n then: { primary: 'それから', alternatives: ['次に', 'そして'], normalized: 'then' },\n and: { primary: 'そして', alternatives: ['と', 'また'], normalized: 'and' },\n end: { primary: '終わり', alternatives: ['終了', 'おわり'], normalized: 'end' },\n // Advanced\n js: { primary: 'JS実行', alternatives: ['js'], normalized: 'js' },\n async: { primary: '非同期', alternatives: ['アシンク'], normalized: 'async' },\n tell: { primary: '伝える', alternatives: ['テル'], normalized: 'tell' },\n default: { primary: '既定', alternatives: ['デフォルト'], normalized: 'default' },\n init: { primary: '初期化', alternatives: ['イニット'], normalized: 'init' },\n behavior: { primary: '振る舞い', alternatives: ['ビヘイビア'], normalized: 'behavior' },\n install: { primary: 'インストール', alternatives: ['導入'], normalized: 'install' },\n measure: { primary: '測定', alternatives: ['計測', 'メジャー'], normalized: 'measure' },\n // Modifiers\n into: { primary: 'へ', alternatives: ['に'], normalized: 'into' },\n before: { primary: '前に', alternatives: ['前'], normalized: 'before' },\n after: { primary: '後に', alternatives: ['後'], normalized: 'after' },\n // Event modifiers (for repeat until event)\n until: { primary: 'まで', alternatives: ['迄'], normalized: 'until' },\n event: { primary: 'イベント', alternatives: ['事象'], normalized: 'event' },\n from: { primary: 'から', normalized: 'from' },\n },\n tokenization: {\n particles: ['を', 'に', 'で', 'から', 'の', 'が', 'は', 'も', 'へ', 'と'],\n boundaryStrategy: 'particle',\n },\n};\n","/**\n * Japanese Tokenizer\n *\n * Tokenizes Japanese hyperscript input.\n * Japanese is challenging because:\n * - No spaces between words\n * - Particles (助詞) mark grammatical roles\n * - Mixed scripts (hiragana, katakana, kanji, romaji)\n * - CSS selectors are embedded ASCII\n */\n\nimport type { LanguageToken, TokenKind, TokenStream } from '../types';\nimport {\n BaseTokenizer,\n TokenStreamImpl,\n createToken,\n createPosition,\n createUnicodeRangeClassifier,\n combineClassifiers,\n isWhitespace,\n isSelectorStart,\n isQuote,\n isDigit,\n isAsciiIdentifierChar,\n isUrlStart,\n type KeywordEntry,\n type TimeUnitMapping,\n} from './base';\nimport { JapaneseMorphologicalNormalizer } from './morphology/japanese-normalizer';\nimport { japaneseProfile } from '../generators/profiles/japanese';\n\n// =============================================================================\n// Japanese Character Classification\n// =============================================================================\n\n/** Check if character is hiragana (U+3040-U+309F). */\nconst isHiragana = createUnicodeRangeClassifier([[0x3040, 0x309f]]);\n\n/** Check if character is katakana (U+30A0-U+30FF). */\nconst isKatakana = createUnicodeRangeClassifier([[0x30a0, 0x30ff]]);\n\n/** Check if character is kanji (CJK Unified Ideographs + Extension A). */\nconst isKanji = createUnicodeRangeClassifier([\n [0x4e00, 0x9fff], // CJK Unified Ideographs\n [0x3400, 0x4dbf], // CJK Unified Ideographs Extension A\n]);\n\n/** Check if character is Japanese (hiragana, katakana, or kanji). */\nconst isJapanese = combineClassifiers(isHiragana, isKatakana, isKanji);\n\n// =============================================================================\n// Japanese Particles\n// =============================================================================\n\n/**\n * Japanese particles that mark grammatical roles.\n * These are single hiragana characters that appear after nouns/verbs.\n */\nconst PARTICLES = new Set([\n 'を', // wo - object marker\n 'に', // ni - destination, time\n 'で', // de - location of action, means\n 'から', // kara - from\n 'まで', // made - until\n 'へ', // e - direction\n 'と', // to - and, with\n 'の', // no - possessive\n 'が', // ga - subject marker\n 'は', // wa - topic marker\n 'も', // mo - also\n 'より', // yori - than, from\n]);\n\n/**\n * Single-character particles (most common).\n */\nconst SINGLE_CHAR_PARTICLES = new Set(['を', 'に', 'で', 'へ', 'と', 'の', 'が', 'は', 'も']);\n\n/**\n * Multi-character particles.\n */\nconst MULTI_CHAR_PARTICLES = ['から', 'まで', 'より'];\n\n/**\n * Particle metadata mapping particles to semantic roles and confidence scores.\n * Used to enhance particle tokens with role information for the pattern matcher.\n */\ninterface ParticleMetadata {\n readonly role: string; // SemanticRole\n readonly confidence: number;\n readonly description?: string;\n}\n\nconst PARTICLE_ROLES = new Map<string, ParticleMetadata>([\n ['を', { role: 'patient', confidence: 0.95, description: 'object marker' }],\n ['に', { role: 'destination', confidence: 0.85, description: 'destination/time marker' }],\n ['で', { role: 'manner', confidence: 0.88, description: 'means/location marker' }],\n ['から', { role: 'source', confidence: 0.9, description: 'from/source marker' }],\n ['まで', { role: 'destination', confidence: 0.75, description: 'until/boundary marker' }],\n ['へ', { role: 'destination', confidence: 0.9, description: 'direction marker' }],\n ['と', { role: 'style', confidence: 0.7, description: 'with/and marker' }],\n ['の', { role: 'patient', confidence: 0.6, description: 'possessive marker' }],\n ['が', { role: 'agent', confidence: 0.85, description: 'subject marker' }],\n ['は', { role: 'agent', confidence: 0.75, description: 'topic marker' }],\n ['も', { role: 'patient', confidence: 0.65, description: 'also/too marker' }],\n ['より', { role: 'source', confidence: 0.85, description: 'from/than marker' }],\n]);\n\n// =============================================================================\n// Japanese Extras (keywords not in profile)\n// =============================================================================\n\n/**\n * Extra keywords not covered by the profile:\n * - Literals (true, false, null, undefined)\n * - Positional words\n * - Event names\n * - Attached particle forms (native idioms)\n * - Conditional event forms\n * - Time units\n */\nconst JAPANESE_EXTRAS: KeywordEntry[] = [\n // Values/Literals\n { native: '真', normalized: 'true' },\n { native: '偽', normalized: 'false' },\n { native: 'ヌル', normalized: 'null' },\n { native: '未定義', normalized: 'undefined' },\n\n // Positional\n { native: '最初', normalized: 'first' },\n { native: '最後', normalized: 'last' },\n { native: '次', normalized: 'next' },\n { native: '前', normalized: 'previous' },\n { native: '最も近い', normalized: 'closest' },\n { native: '親', normalized: 'parent' },\n\n // Events\n { native: 'クリック', normalized: 'click' },\n { native: '変更', normalized: 'change' },\n { native: '送信', normalized: 'submit' },\n { native: '入力', normalized: 'input' },\n { native: 'ロード', normalized: 'load' },\n { native: 'スクロール', normalized: 'scroll' },\n { native: 'キーダウン', normalized: 'keydown' },\n { native: 'キーアップ', normalized: 'keyup' },\n { native: 'マウスオーバー', normalized: 'mouseover' },\n { native: 'マウスアウト', normalized: 'mouseout' },\n { native: 'ブラー', normalized: 'blur' },\n\n // References (additional forms)\n { native: '私', normalized: 'me' },\n { native: '私の', normalized: 'my' },\n { native: 'その', normalized: 'its' },\n\n // Note: Attached particle forms (を切り替え, を追加, etc.) are intentionally NOT included\n // because they would cause ambiguous parsing. The separate particle + verb pattern\n // (を + 切り替え) is preferred for consistent semantic analysis.\n\n // Conditional event forms\n { native: 'したら', normalized: 'on' },\n { native: 'すると', normalized: 'on' },\n { native: '時に', normalized: 'on' },\n\n // Control flow helpers\n { native: 'もし', normalized: 'if' }, // Starts with particle も, needs explicit entry\n { native: 'ならば', normalized: 'then' },\n { native: 'なら', normalized: 'then' },\n\n // Time units\n { native: '秒', normalized: 's' },\n { native: 'ミリ秒', normalized: 'ms' },\n { native: '分', normalized: 'm' },\n { native: '時間', normalized: 'h' },\n];\n\n// =============================================================================\n// Japanese Time Units\n// =============================================================================\n\n/**\n * Japanese time unit patterns for number parsing.\n * Sorted by length (longest first) to ensure correct matching.\n * Japanese time units attach directly without whitespace.\n */\nconst JAPANESE_TIME_UNITS: readonly TimeUnitMapping[] = [\n { pattern: 'ミリ秒', suffix: 'ms', length: 3 },\n { pattern: '時間', suffix: 'h', length: 2 },\n { pattern: '秒', suffix: 's', length: 1 },\n { pattern: '分', suffix: 'm', length: 1 },\n];\n\n// =============================================================================\n// Japanese Tokenizer Implementation\n// =============================================================================\n\nexport class JapaneseTokenizer extends BaseTokenizer {\n readonly language = 'ja';\n readonly direction = 'ltr' as const;\n\n constructor() {\n super();\n // Initialize keywords from profile + extras (single source of truth)\n this.initializeKeywordsFromProfile(japaneseProfile, JAPANESE_EXTRAS);\n // Set morphological normalizer for verb conjugations\n this.normalizer = new JapaneseMorphologicalNormalizer();\n }\n\n tokenize(input: string): TokenStream {\n const tokens: LanguageToken[] = [];\n let pos = 0;\n\n while (pos < input.length) {\n // Skip whitespace (Japanese can have spaces for readability)\n if (isWhitespace(input[pos])) {\n pos++;\n continue;\n }\n\n // Try CSS selector first (ASCII-based, highest priority)\n if (isSelectorStart(input[pos])) {\n // Check for event modifier first (.once, .debounce(), etc.)\n const modifierToken = this.tryEventModifier(input, pos);\n if (modifierToken) {\n tokens.push(modifierToken);\n pos = modifierToken.position.end;\n continue;\n }\n\n const selectorToken = this.trySelector(input, pos);\n if (selectorToken) {\n tokens.push(selectorToken);\n pos = selectorToken.position.end;\n continue;\n }\n }\n\n // Try string literal (both ASCII and Japanese quotes)\n if (isQuote(input[pos])) {\n const stringToken = this.tryString(input, pos);\n if (stringToken) {\n tokens.push(stringToken);\n pos = stringToken.position.end;\n continue;\n }\n }\n\n // Try URL (/path, ./path, http://, etc.)\n if (isUrlStart(input, pos)) {\n const urlToken = this.tryUrl(input, pos);\n if (urlToken) {\n tokens.push(urlToken);\n pos = urlToken.position.end;\n continue;\n }\n }\n\n // Try number (including Japanese time units)\n if (isDigit(input[pos])) {\n const numberToken = this.extractJapaneseNumber(input, pos);\n if (numberToken) {\n tokens.push(numberToken);\n pos = numberToken.position.end;\n continue;\n }\n }\n\n // Try variable reference (:varname)\n const varToken = this.tryVariableRef(input, pos);\n if (varToken) {\n tokens.push(varToken);\n pos = varToken.position.end;\n continue;\n }\n\n // Try multi-character particle (before single-character)\n const multiParticle = this.tryMultiCharParticle(input, pos, MULTI_CHAR_PARTICLES);\n if (multiParticle) {\n // Add role metadata to particle token\n const metadata = PARTICLE_ROLES.get(multiParticle.value);\n if (metadata) {\n tokens.push({\n ...multiParticle,\n metadata: {\n particleRole: metadata.role,\n particleConfidence: metadata.confidence,\n },\n });\n } else {\n tokens.push(multiParticle);\n }\n pos = multiParticle.position.end;\n continue;\n }\n\n // Check if this starts a multi-character keyword (before single-char particle check)\n // This prevents splitting keywords like もし (if) into も (particle) + し (identifier)\n if (SINGLE_CHAR_PARTICLES.has(input[pos])) {\n const keywordToken = this.tryProfileKeyword(input, pos);\n // Only accept keywords longer than 1 char (e.g., もし but not を/で/に which are role markers)\n if (keywordToken && keywordToken.value.length > 1) {\n tokens.push(keywordToken);\n pos = keywordToken.position.end;\n continue;\n }\n // Not a multi-char keyword, treat as particle\n const particle = input[pos];\n const metadata = PARTICLE_ROLES.get(particle);\n if (metadata) {\n tokens.push({\n ...createToken(particle, 'particle', createPosition(pos, pos + 1)),\n metadata: {\n particleRole: metadata.role,\n particleConfidence: metadata.confidence,\n },\n });\n } else {\n tokens.push(createToken(particle, 'particle', createPosition(pos, pos + 1)));\n }\n pos++;\n continue;\n }\n\n // Try Japanese word (kanji/kana sequence)\n if (isJapanese(input[pos])) {\n const wordToken = this.extractJapaneseWord(input, pos);\n if (wordToken) {\n tokens.push(wordToken);\n pos = wordToken.position.end;\n continue;\n }\n }\n\n // Try ASCII word (for mixed content)\n if (isAsciiIdentifierChar(input[pos])) {\n const asciiToken = this.extractAsciiWord(input, pos);\n if (asciiToken) {\n tokens.push(asciiToken);\n pos = asciiToken.position.end;\n continue;\n }\n }\n\n // Skip unknown character\n pos++;\n }\n\n return new TokenStreamImpl(tokens, 'ja');\n }\n\n classifyToken(token: string): TokenKind {\n if (PARTICLES.has(token)) return 'particle';\n // O(1) Map lookup instead of O(n) array search\n if (this.isKeyword(token)) return 'keyword';\n if (token.startsWith('#') || token.startsWith('.') || token.startsWith('[')) return 'selector';\n if (token.startsWith('\"') || token.startsWith(\"'\") || token.startsWith('「')) return 'literal';\n if (/^\\d/.test(token)) return 'literal';\n\n return 'identifier';\n }\n\n /**\n * Extract a Japanese word (sequence of kanji/kana).\n * Stops at particles, ASCII, or whitespace.\n *\n * Uses morphological normalization to handle verb conjugations:\n * 1. First checks if the exact word is in the keyword map\n * 2. If not found, tries to strip conjugation suffixes and check again\n */\n private extractJapaneseWord(input: string, startPos: number): LanguageToken | null {\n let pos = startPos;\n let word = '';\n\n while (pos < input.length) {\n const char = input[pos];\n\n // Stop at particles (except within longer words)\n if (SINGLE_CHAR_PARTICLES.has(char) && word.length > 0) {\n break;\n }\n\n // Check for multi-char particle\n let foundMulti = false;\n for (const particle of MULTI_CHAR_PARTICLES) {\n if (input.slice(pos, pos + particle.length) === particle && word.length > 0) {\n foundMulti = true;\n break;\n }\n }\n if (foundMulti) break;\n\n // Continue if Japanese character\n if (isJapanese(char)) {\n word += char;\n pos++;\n } else {\n break;\n }\n }\n\n if (!word) return null;\n\n // O(1) Map lookup instead of O(n) array search\n const keywordEntry = this.lookupKeyword(word);\n if (keywordEntry) {\n return createToken(word, 'keyword', createPosition(startPos, pos), keywordEntry.normalized);\n }\n\n // Try morphological normalization for conjugated forms\n const morphToken = this.tryMorphKeywordMatch(word, startPos, pos);\n if (morphToken) return morphToken;\n\n // Not a keyword, return as identifier\n return createToken(word, 'identifier', createPosition(startPos, pos));\n }\n\n /**\n * Extract an ASCII word (for mixed Japanese/English content).\n */\n private extractAsciiWord(input: string, startPos: number): LanguageToken | null {\n let pos = startPos;\n let word = '';\n\n while (pos < input.length && isAsciiIdentifierChar(input[pos])) {\n word += input[pos++];\n }\n\n if (!word) return null;\n\n return createToken(word, 'identifier', createPosition(startPos, pos));\n }\n\n /**\n * Extract a number, including Japanese time unit suffixes.\n * Japanese time units attach directly without whitespace.\n */\n private extractJapaneseNumber(input: string, startPos: number): LanguageToken | null {\n return this.tryNumberWithTimeUnits(input, startPos, JAPANESE_TIME_UNITS, {\n allowSign: false,\n skipWhitespace: false,\n });\n }\n}\n\n/**\n * Singleton instance.\n */\nexport const japaneseTokenizer = new JapaneseTokenizer();\n","/**\n * Japanese Language Module\n *\n * Self-registering module for Japanese language support.\n * Importing this module registers Japanese tokenizer and profile.\n */\n\nimport { registerLanguage } from '../registry';\nimport { japaneseTokenizer } from '../tokenizers/japanese';\nimport { japaneseProfile } from '../generators/profiles/japanese';\n\nexport { japaneseTokenizer } from '../tokenizers/japanese';\nexport { japaneseProfile } from '../generators/profiles/japanese';\n\nregisterLanguage('ja', japaneseTokenizer, japaneseProfile);\n"],"mappings":";AA8EA,IAAM,aAAa,oBAAI,IAA+B;AACtD,IAAM,WAAW,oBAAI,IAA6B;AAClD,IAAM,eAAe,oBAAI,IAA+B;AAgBjD,SAAS,iBACd,MACA,WACA,SACM;AACN,aAAW,IAAI,MAAM,SAAS;AAC9B,WAAS,IAAI,MAAM,OAAO;AAE1B,eAAa,OAAO,IAAI;AAC1B;;;AC1DO,IAAM,kBAAN,MAA6C;AAAA,EAKlD,YAAY,QAAyB,UAAkB;AAFvD,SAAQ,MAAc;AAGpB,SAAK,SAAS;AACd,SAAK,WAAW;AAAA,EAClB;AAAA,EAEA,KAAK,SAAiB,GAAyB;AAC7C,UAAM,QAAQ,KAAK,MAAM;AACzB,QAAI,QAAQ,KAAK,SAAS,KAAK,OAAO,QAAQ;AAC5C,aAAO;AAAA,IACT;AACA,WAAO,KAAK,OAAO,KAAK;AAAA,EAC1B;AAAA,EAEA,UAAyB;AACvB,QAAI,KAAK,QAAQ,GAAG;AAClB,YAAM,IAAI,MAAM,gCAAgC;AAAA,IAClD;AACA,WAAO,KAAK,OAAO,KAAK,KAAK;AAAA,EAC/B;AAAA,EAEA,UAAmB;AACjB,WAAO,KAAK,OAAO,KAAK,OAAO;AAAA,EACjC;AAAA,EAEA,OAAmB;AACjB,WAAO,EAAE,UAAU,KAAK,IAAI;AAAA,EAC9B;AAAA,EAEA,MAAM,MAAwB;AAC5B,SAAK,MAAM,KAAK;AAAA,EAClB;AAAA,EAEA,WAAmB;AACjB,WAAO,KAAK;AAAA,EACd;AAAA;AAAA;AAAA;AAAA,EAKA,YAA6B;AAC3B,WAAO,KAAK,OAAO,MAAM,KAAK,GAAG;AAAA,EACnC;AAAA;AAAA;AAAA;AAAA,EAKA,UAAU,WAA+D;AACvE,UAAM,SAA0B,CAAC;AACjC,WAAO,CAAC,KAAK,QAAQ,KAAK,UAAU,KAAK,KAAK,CAAE,GAAG;AACjD,aAAO,KAAK,KAAK,QAAQ,CAAC;AAAA,IAC5B;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKA,UAAU,WAAoD;AAC5D,WAAO,CAAC,KAAK,QAAQ,KAAK,UAAU,KAAK,KAAK,CAAE,GAAG;AACjD,WAAK,QAAQ;AAAA,IACf;AAAA,EACF;AACF;AASO,SAAS,eAAe,OAAe,KAA6B;AACzE,SAAO,EAAE,OAAO,IAAI;AACtB;AAiBO,SAAS,YACd,OACA,MACA,UACA,qBACe;AAEf,MAAI,OAAO,wBAAwB,UAAU;AAC3C,WAAO,EAAE,OAAO,MAAM,UAAU,YAAY,oBAAoB;AAAA,EAClE;AAGA,MAAI,qBAAqB;AACvB,UAAM,EAAE,YAAAA,aAAY,MAAM,eAAe,IAAI;AAC7C,UAAM,QAAuB,EAAE,OAAO,MAAM,SAAS;AAGrD,QAAIA,gBAAe,QAAW;AAC5B,MAAC,MAAc,aAAaA;AAAA,IAC9B;AACA,QAAI,SAAS,QAAW;AACtB,MAAC,MAAc,OAAO;AACtB,UAAI,mBAAmB,QAAW;AAChC,QAAC,MAAc,iBAAiB;AAAA,MAClC;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAEA,SAAO,EAAE,OAAO,MAAM,SAAS;AACjC;AAKO,SAAS,aAAa,MAAuB;AAClD,SAAO,KAAK,KAAK,IAAI;AACvB;AAMO,SAAS,gBAAgB,MAAuB;AACrD,SACE,SAAS,OAAO,SAAS,OAAO,SAAS,OAAO,SAAS,OAAO,SAAS,OAAO,SAAS;AAE7F;AAKO,SAAS,QAAQ,MAAuB;AAC7C,SAAO,SAAS,OAAO,SAAS,OAAO,SAAS,OAAO,SAAS,YAAO,SAAS;AAClF;AAKO,SAAS,QAAQ,MAAuB;AAC7C,SAAO,KAAK,KAAK,IAAI;AACvB;AAKO,SAAS,cAAc,MAAuB;AACnD,SAAO,WAAW,KAAK,IAAI;AAC7B;AAKO,SAAS,sBAAsB,MAAuB;AAC3D,SAAO,gBAAgB,KAAK,IAAI;AAClC;AA0BO,SAAS,6BACd,QAC2B;AAC3B,SAAO,CAAC,SAA0B;AAChC,UAAM,OAAO,KAAK,WAAW,CAAC;AAC9B,WAAO,OAAO,KAAK,CAAC,CAAC,OAAO,GAAG,MAAM,QAAQ,SAAS,QAAQ,GAAG;AAAA,EACnE;AACF;AASO,SAAS,sBACX,aACwB;AAC3B,SAAO,CAAC,SAA0B,YAAY,KAAK,QAAM,GAAG,IAAI,CAAC;AACnE;AAkDO,SAAS,mBAAmB,OAAe,UAAiC;AACjF,MAAI,YAAY,MAAM,OAAQ,QAAO;AAErC,QAAM,OAAO,MAAM,QAAQ;AAC3B,MAAI,CAAC,gBAAgB,IAAI,EAAG,QAAO;AAEnC,MAAI,MAAM;AACV,MAAI,WAAW;AAGf,MAAI,SAAS,OAAO,SAAS,KAAK;AAEhC,gBAAY,MAAM,KAAK;AACvB,WAAO,MAAM,MAAM,UAAU,sBAAsB,MAAM,GAAG,CAAC,GAAG;AAC9D,kBAAY,MAAM,KAAK;AAAA,IACzB;AAEA,QAAI,SAAS,UAAU,EAAG,QAAO;AAIjC,QAAI,MAAM,MAAM,UAAU,MAAM,GAAG,MAAM,OAAO,SAAS,KAAK;AAE5D,YAAM,cAAc,MAAM;AAC1B,UAAI,YAAY;AAChB,aAAO,YAAY,MAAM,UAAU,sBAAsB,MAAM,SAAS,CAAC,GAAG;AAC1E;AAAA,MACF;AAEA,UAAI,YAAY,MAAM,UAAU,MAAM,SAAS,MAAM,KAAK;AACxD,eAAO;AAAA,MACT;AAAA,IACF;AAAA,EACF,WAAW,SAAS,KAAK;AAGvB,QAAI,QAAQ;AACZ,QAAI,UAAU;AACd,QAAI,YAA2B;AAC/B,QAAI,UAAU;AAEd,gBAAY,MAAM,KAAK;AAEvB,WAAO,MAAM,MAAM,UAAU,QAAQ,GAAG;AACtC,YAAM,IAAI,MAAM,GAAG;AACnB,kBAAY;AAEZ,UAAI,SAAS;AAEX,kBAAU;AAAA,MACZ,WAAW,MAAM,MAAM;AAErB,kBAAU;AAAA,MACZ,WAAW,SAAS;AAElB,YAAI,MAAM,WAAW;AACnB,oBAAU;AACV,sBAAY;AAAA,QACd;AAAA,MACF,OAAO;AAEL,YAAI,MAAM,OAAO,MAAM,OAAO,MAAM,KAAK;AACvC,oBAAU;AACV,sBAAY;AAAA,QACd,WAAW,MAAM,KAAK;AACpB;AAAA,QACF,WAAW,MAAM,KAAK;AACpB;AAAA,QACF;AAAA,MACF;AACA;AAAA,IACF;AACA,QAAI,UAAU,EAAG,QAAO;AAAA,EAC1B,WAAW,SAAS,KAAK;AAEvB,gBAAY,MAAM,KAAK;AACvB,WAAO,MAAM,MAAM,UAAU,sBAAsB,MAAM,GAAG,CAAC,GAAG;AAC9D,kBAAY,MAAM,KAAK;AAAA,IACzB;AACA,QAAI,SAAS,UAAU,EAAG,QAAO;AAAA,EACnC,WAAW,SAAS,KAAK;AAEvB,gBAAY,MAAM,KAAK;AACvB,WAAO,MAAM,MAAM,UAAU,sBAAsB,MAAM,GAAG,CAAC,GAAG;AAC9D,kBAAY,MAAM,KAAK;AAAA,IACzB;AACA,QAAI,SAAS,UAAU,EAAG,QAAO;AAAA,EACnC,WAAW,SAAS,KAAK;AASvB,gBAAY,MAAM,KAAK;AAGvB,QAAI,OAAO,MAAM,UAAU,CAAC,cAAc,MAAM,GAAG,CAAC,EAAG,QAAO;AAG9D,WAAO,MAAM,MAAM,UAAU,sBAAsB,MAAM,GAAG,CAAC,GAAG;AAC9D,kBAAY,MAAM,KAAK;AAAA,IACzB;AAIA,WAAO,MAAM,MAAM,QAAQ;AACzB,YAAM,UAAU,MAAM,GAAG;AAEzB,UAAI,YAAY,KAAK;AAEnB,oBAAY,MAAM,KAAK;AACvB,YAAI,OAAO,MAAM,UAAU,CAAC,sBAAsB,MAAM,GAAG,CAAC,GAAG;AAC7D,iBAAO;AAAA,QACT;AACA,eAAO,MAAM,MAAM,UAAU,sBAAsB,MAAM,GAAG,CAAC,GAAG;AAC9D,sBAAY,MAAM,KAAK;AAAA,QACzB;AAAA,MACF,WAAW,YAAY,KAAK;AAE1B,oBAAY,MAAM,KAAK;AACvB,YAAI,OAAO,MAAM,UAAU,CAAC,sBAAsB,MAAM,GAAG,CAAC,GAAG;AAC7D,iBAAO;AAAA,QACT;AACA,eAAO,MAAM,MAAM,UAAU,sBAAsB,MAAM,GAAG,CAAC,GAAG;AAC9D,sBAAY,MAAM,KAAK;AAAA,QACzB;AAAA,MACF,WAAW,YAAY,KAAK;AAG1B,YAAI,QAAQ;AACZ,YAAI,UAAU;AACd,YAAI,YAA2B;AAC/B,YAAI,UAAU;AAEd,oBAAY,MAAM,KAAK;AAEvB,eAAO,MAAM,MAAM,UAAU,QAAQ,GAAG;AACtC,gBAAM,IAAI,MAAM,GAAG;AACnB,sBAAY;AAEZ,cAAI,SAAS;AACX,sBAAU;AAAA,UACZ,WAAW,MAAM,MAAM;AACrB,sBAAU;AAAA,UACZ,WAAW,SAAS;AAClB,gBAAI,MAAM,WAAW;AACnB,wBAAU;AACV,0BAAY;AAAA,YACd;AAAA,UACF,OAAO;AACL,gBAAI,MAAM,OAAO,MAAM,OAAO,MAAM,KAAK;AACvC,wBAAU;AACV,0BAAY;AAAA,YACd,WAAW,MAAM,KAAK;AACpB;AAAA,YACF,WAAW,MAAM,KAAK;AACpB;AAAA,YACF;AAAA,UACF;AACA;AAAA,QACF;AACA,YAAI,UAAU,EAAG,QAAO;AAAA,MAC1B,OAAO;AAEL;AAAA,MACF;AAAA,IACF;AAGA,WAAO,MAAM,MAAM,UAAU,aAAa,MAAM,GAAG,CAAC,GAAG;AACrD,kBAAY,MAAM,KAAK;AAAA,IACzB;AAGA,QAAI,MAAM,MAAM,UAAU,MAAM,GAAG,MAAM,KAAK;AAC5C,kBAAY,MAAM,KAAK;AAEvB,aAAO,MAAM,MAAM,UAAU,aAAa,MAAM,GAAG,CAAC,GAAG;AACrD,oBAAY,MAAM,KAAK;AAAA,MACzB;AAAA,IACF;AAGA,QAAI,OAAO,MAAM,UAAU,MAAM,GAAG,MAAM,IAAK,QAAO;AACtD,gBAAY,MAAM,KAAK;AAAA,EACzB;AAEA,SAAO,YAAY;AACrB;AAeO,SAAS,mBAAmB,OAAe,KAAsB;AACtE,MAAI,OAAO,MAAM,UAAU,MAAM,GAAG,MAAM,IAAK,QAAO;AAGtD,MAAI,MAAM,KAAK,MAAM,OAAQ,QAAO;AACpC,QAAM,WAAW,MAAM,MAAM,CAAC,EAAE,YAAY;AAC5C,MAAI,aAAa,IAAK,QAAO;AAG7B,MAAI,MAAM,KAAK,MAAM,OAAQ,QAAO;AACpC,QAAM,SAAS,MAAM,MAAM,CAAC;AAC5B,SAAO,aAAa,MAAM,KAAK,WAAW,OAAO,CAAC,sBAAsB,MAAM;AAChF;AAQO,SAAS,qBAAqB,OAAe,UAAiC;AACnF,MAAI,YAAY,MAAM,OAAQ,QAAO;AAErC,QAAM,YAAY,MAAM,QAAQ;AAChC,MAAI,CAAC,QAAQ,SAAS,EAAG,QAAO;AAGhC,MAAI,cAAc,OAAO,mBAAmB,OAAO,QAAQ,GAAG;AAC5D,WAAO;AAAA,EACT;AAGA,QAAM,gBAAwC;AAAA,IAC5C,KAAK;AAAA,IACL,KAAK;AAAA,IACL,KAAK;AAAA,IACL,UAAK;AAAA,EACP;AAEA,QAAM,aAAa,cAAc,SAAS;AAC1C,MAAI,CAAC,WAAY,QAAO;AAExB,MAAI,MAAM,WAAW;AACrB,MAAI,UAAU;AACd,MAAI,UAAU;AAEd,SAAO,MAAM,MAAM,QAAQ;AACzB,UAAM,OAAO,MAAM,GAAG;AACtB,eAAW;AAEX,QAAI,SAAS;AACX,gBAAU;AAAA,IACZ,WAAW,SAAS,MAAM;AACxB,gBAAU;AAAA,IACZ,WAAW,SAAS,YAAY;AAE9B,aAAO;AAAA,IACT;AACA;AAAA,EACF;AAGA,SAAO;AACT;AAUO,SAAS,WAAW,OAAe,KAAsB;AAC9D,MAAI,OAAO,MAAM,OAAQ,QAAO;AAEhC,QAAM,OAAO,MAAM,GAAG;AACtB,QAAM,OAAO,MAAM,MAAM,CAAC,KAAK;AAC/B,QAAM,QAAQ,MAAM,MAAM,CAAC,KAAK;AAIhC,MAAI,SAAS,OAAO,SAAS,OAAO,iBAAiB,KAAK,IAAI,GAAG;AAC/D,WAAO;AAAA,EACT;AAGA,MAAI,SAAS,OAAO,SAAS,OAAO,WAAW,KAAK,KAAK,GAAG;AAC1D,WAAO;AAAA,EACT;AAGA,MAAI,SAAS,QAAQ,SAAS,OAAQ,SAAS,OAAO,UAAU,MAAO;AACrE,WAAO;AAAA,EACT;AAGA,QAAM,QAAQ,MAAM,MAAM,KAAK,MAAM,CAAC,EAAE,YAAY;AACpD,MAAI,MAAM,WAAW,SAAS,KAAK,MAAM,WAAW,UAAU,GAAG;AAC/D,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAUO,SAAS,WAAW,OAAe,UAAiC;AACzE,MAAI,CAAC,WAAW,OAAO,QAAQ,EAAG,QAAO;AAEzC,MAAI,MAAM;AACV,MAAI,MAAM;AAIV,QAAM,WAAW;AAEjB,SAAO,MAAM,MAAM,QAAQ;AACzB,UAAM,OAAO,MAAM,GAAG;AAGtB,QAAI,SAAS,KAAK;AAGhB,UAAI,IAAI,SAAS,KAAK,iBAAiB,KAAK,GAAG,GAAG;AAEhD,eAAO;AACP;AAEA,eAAO,MAAM,MAAM,UAAU,gBAAgB,KAAK,MAAM,GAAG,CAAC,GAAG;AAC7D,iBAAO,MAAM,KAAK;AAAA,QACpB;AAAA,MACF;AAEA;AAAA,IACF;AAEA,QAAI,SAAS,KAAK,IAAI,GAAG;AACvB,aAAO;AACP;AAAA,IACF,OAAO;AACL;AAAA,IACF;AAAA,EACF;AAGA,MAAI,IAAI,SAAS,EAAG,QAAO;AAE3B,SAAO;AACT;AAUO,SAAS,cAAc,OAAe,UAAiC;AAC5E,MAAI,YAAY,MAAM,OAAQ,QAAO;AAErC,QAAM,OAAO,MAAM,QAAQ;AAC3B,MAAI,CAAC,QAAQ,IAAI,KAAK,SAAS,OAAO,SAAS,IAAK,QAAO;AAE3D,MAAI,MAAM;AACV,MAAI,SAAS;AAGb,MAAI,MAAM,GAAG,MAAM,OAAO,MAAM,GAAG,MAAM,KAAK;AAC5C,cAAU,MAAM,KAAK;AAAA,EACvB;AAGA,MAAI,OAAO,MAAM,UAAU,CAAC,QAAQ,MAAM,GAAG,CAAC,GAAG;AAC/C,WAAO;AAAA,EACT;AAGA,SAAO,MAAM,MAAM,UAAU,QAAQ,MAAM,GAAG,CAAC,GAAG;AAChD,cAAU,MAAM,KAAK;AAAA,EACvB;AAGA,MAAI,MAAM,MAAM,UAAU,MAAM,GAAG,MAAM,KAAK;AAC5C,cAAU,MAAM,KAAK;AACrB,WAAO,MAAM,MAAM,UAAU,QAAQ,MAAM,GAAG,CAAC,GAAG;AAChD,gBAAU,MAAM,KAAK;AAAA,IACvB;AAAA,EACF;AAGA,MAAI,MAAM,MAAM,QAAQ;AACtB,UAAM,SAAS,MAAM,MAAM,KAAK,MAAM,CAAC;AACvC,QAAI,WAAW,MAAM;AACnB,gBAAU;AAAA,IACZ,WAAW,MAAM,GAAG,MAAM,OAAO,MAAM,GAAG,MAAM,OAAO,MAAM,GAAG,MAAM,KAAK;AACzE,gBAAU,MAAM,GAAG;AAAA,IACrB;AAAA,EACF;AAEA,SAAO;AACT;AAkCO,IAAe,iBAAf,MAAe,eAA2C;AAAA,EAA1D;AAQL;AAAA,SAAU,kBAAkC,CAAC;AAG7C;AAAA,SAAU,oBAA+C,oBAAI,IAAI;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkBvD,8BACR,SACA,SAAyB,CAAC,GACpB;AAEN,UAAM,aAAa,oBAAI,IAA0B;AAGjD,QAAI,QAAQ,UAAU;AACpB,iBAAW,CAACC,aAAY,WAAW,KAAK,OAAO,QAAQ,QAAQ,QAAQ,GAAG;AAExE,mBAAW,IAAI,YAAY,SAAS;AAAA,UAClC,QAAQ,YAAY;AAAA,UACpB,YAAY,YAAY,cAAcA;AAAA,QACxC,CAAC;AAGD,YAAI,YAAY,cAAc;AAC5B,qBAAW,OAAO,YAAY,cAAc;AAC1C,uBAAW,IAAI,KAAK;AAAA,cAClB,QAAQ;AAAA,cACR,YAAY,YAAY,cAAcA;AAAA,YACxC,CAAC;AAAA,UACH;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAGA,QAAI,QAAQ,YAAY;AACtB,iBAAW,CAACA,aAAY,MAAM,KAAK,OAAO,QAAQ,QAAQ,UAAU,GAAG;AACrE,mBAAW,IAAI,QAAQ,EAAE,QAAQ,YAAAA,YAAW,CAAC;AAAA,MAC/C;AAAA,IACF;AAGA,QAAI,QAAQ,aAAa;AACvB,iBAAW,CAAC,MAAM,MAAM,KAAK,OAAO,QAAQ,QAAQ,WAAW,GAAG;AAChE,YAAI,OAAO,SAAS;AAClB,qBAAW,IAAI,OAAO,SAAS,EAAE,QAAQ,OAAO,SAAS,YAAY,KAAK,CAAC;AAAA,QAC7E;AACA,YAAI,OAAO,cAAc;AACvB,qBAAW,OAAO,OAAO,cAAc;AACrC,uBAAW,IAAI,KAAK,EAAE,QAAQ,KAAK,YAAY,KAAK,CAAC;AAAA,UACvD;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAGA,eAAW,SAAS,QAAQ;AAC1B,iBAAW,IAAI,MAAM,QAAQ,KAAK;AAAA,IACpC;AAGA,SAAK,kBAAkB,MAAM,KAAK,WAAW,OAAO,CAAC,EAAE;AAAA,MACrD,CAAC,GAAG,MAAM,EAAE,OAAO,SAAS,EAAE,OAAO;AAAA,IACvC;AAIA,SAAK,oBAAoB,oBAAI,IAAI;AACjC,eAAW,WAAW,KAAK,iBAAiB;AAE1C,WAAK,kBAAkB,IAAI,QAAQ,OAAO,YAAY,GAAG,OAAO;AAGhE,YAAMA,cAAa,KAAK,iBAAiB,QAAQ,MAAM;AACvD,UAAIA,gBAAe,QAAQ,UAAU,CAAC,KAAK,kBAAkB,IAAIA,YAAW,YAAY,CAAC,GAAG;AAC1F,aAAK,kBAAkB,IAAIA,YAAW,YAAY,GAAG,OAAO;AAAA,MAC9D;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUU,iBAAiB,MAAsB;AAG/C,WAAO,KAAK,QAAQ,0BAA0B,EAAE;AAAA,EAClD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUU,kBAAkB,OAAe,KAAmC;AAC5E,eAAW,SAAS,KAAK,iBAAiB;AACxC,UAAI,MAAM,MAAM,GAAG,EAAE,WAAW,MAAM,MAAM,GAAG;AAC7C,eAAO;AAAA,UACL,MAAM;AAAA,UACN;AAAA,UACA,eAAe,KAAK,MAAM,MAAM,OAAO,MAAM;AAAA,UAC7C,MAAM;AAAA,QACR;AAAA,MACF;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUU,eAAe,OAAe,KAAsB;AAC5D,UAAM,YAAY,MAAM,MAAM,GAAG;AACjC,WAAO,KAAK,gBAAgB,KAAK,WAAS,UAAU,WAAW,MAAM,MAAM,CAAC;AAAA,EAC9E;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASU,cAAc,QAA0C;AAChE,WAAO,KAAK,kBAAkB,IAAI,OAAO,YAAY,CAAC;AAAA,EACxD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASU,UAAU,QAAyB;AAC3C,WAAO,KAAK,kBAAkB,IAAI,OAAO,YAAY,CAAC;AAAA,EACxD;AAAA;AAAA;AAAA;AAAA,EAKA,cAAc,YAA2C;AACvD,SAAK,aAAa;AAAA,EACpB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUU,aAAa,MAA0C;AAC/D,QAAI,CAAC,KAAK,WAAY,QAAO;AAE7B,UAAM,SAAS,KAAK,WAAW,UAAU,IAAI;AAG7C,QAAI,OAAO,SAAS,QAAQ,OAAO,cAAc,KAAK;AACpD,aAAO;AAAA,IACT;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkBU,qBACR,MACA,UACA,QACsB;AACtB,UAAM,SAAS,KAAK,aAAa,IAAI;AACrC,QAAI,CAAC,OAAQ,QAAO;AAGpB,UAAM,YAAY,KAAK,cAAc,OAAO,IAAI;AAChD,QAAI,CAAC,UAAW,QAAO;AAEvB,UAAM,eAAmC;AAAA,MACvC,YAAY,UAAU;AAAA,MACtB,MAAM,OAAO;AAAA,MACb,gBAAgB,OAAO;AAAA,IACzB;AACA,WAAO,YAAY,MAAM,WAAW,eAAe,UAAU,MAAM,GAAG,YAAY;AAAA,EACpF;AAAA;AAAA;AAAA;AAAA,EAKU,YAAY,OAAe,KAAmC;AACtE,UAAM,WAAW,mBAAmB,OAAO,GAAG;AAC9C,QAAI,UAAU;AACZ,aAAO,YAAY,UAAU,YAAY,eAAe,KAAK,MAAM,SAAS,MAAM,CAAC;AAAA,IACrF;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA,EAMU,iBAAiB,OAAe,KAAmC;AAE3E,QAAI,MAAM,GAAG,MAAM,KAAK;AACtB,aAAO;AAAA,IACT;AAGA,UAAM,QAAQ,MACX,MAAM,GAAG,EACT,MAAM,gEAAgE;AACzE,QAAI,CAAC,OAAO;AACV,aAAO;AAAA,IACT;AAEA,UAAM,YAAY,MAAM,CAAC,EAAE,QAAQ,YAAY,EAAE;AACjD,UAAM,eAAe,UAAU,MAAM,CAAC,EAAE,MAAM,GAAG,EAAE,CAAC;AACpD,UAAM,QAAQ,MAAM,CAAC;AAGrB,UAAM,QAAQ;AAAA,MACZ;AAAA,MACA;AAAA,MACA,eAAe,KAAK,MAAM,UAAU,MAAM;AAAA,IAC5C;AAGA,WAAO;AAAA,MACL,GAAG;AAAA,MACH,UAAU;AAAA,QACR;AAAA,QACA,OAAO,QAAS,iBAAiB,UAAU,QAAQ,SAAS,OAAO,EAAE,IAAK;AAAA,MAC5E;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKU,UAAU,OAAe,KAAmC;AACpE,UAAM,UAAU,qBAAqB,OAAO,GAAG;AAC/C,QAAI,SAAS;AACX,aAAO,YAAY,SAAS,WAAW,eAAe,KAAK,MAAM,QAAQ,MAAM,CAAC;AAAA,IAClF;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKU,UAAU,OAAe,KAAmC;AACpE,UAAM,SAAS,cAAc,OAAO,GAAG;AACvC,QAAI,QAAQ;AACV,aAAO,YAAY,QAAQ,WAAW,eAAe,KAAK,MAAM,OAAO,MAAM,CAAC;AAAA,IAChF;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAsBU,iBACR,OACA,KACA,WACA,iBAAiB,OAC0B;AAC3C,QAAI,UAAU;AAGd,QAAI,gBAAgB;AAClB,aAAO,UAAU,MAAM,UAAU,aAAa,MAAM,OAAO,CAAC,GAAG;AAC7D;AAAA,MACF;AAAA,IACF;AAEA,UAAM,YAAY,MAAM,MAAM,OAAO;AAGrC,eAAW,QAAQ,WAAW;AAC5B,YAAM,YAAY,UAAU,MAAM,GAAG,KAAK,MAAM;AAChD,YAAM,UAAU,KAAK,kBACjB,UAAU,YAAY,MAAM,KAAK,QAAQ,YAAY,IACrD,cAAc,KAAK;AAEvB,UAAI,SAAS;AAEX,YAAI,KAAK,eAAe;AACtB,gBAAM,WAAW,UAAU,KAAK,MAAM,KAAK;AAC3C,cAAI,aAAa,KAAK,cAAe;AAAA,QACvC;AAGA,YAAI,KAAK,eAAe;AACtB,gBAAM,WAAW,UAAU,KAAK,MAAM,KAAK;AAC3C,cAAI,sBAAsB,QAAQ,EAAG;AAAA,QACvC;AAEA,eAAO,EAAE,QAAQ,KAAK,QAAQ,QAAQ,UAAU,KAAK,OAAO;AAAA,MAC9D;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWU,gBACR,OACA,UACA,YAAY,MAC+B;AAC3C,QAAI,MAAM;AACV,QAAI,SAAS;AAGb,QAAI,cAAc,MAAM,GAAG,MAAM,OAAO,MAAM,GAAG,MAAM,MAAM;AAC3D,gBAAU,MAAM,KAAK;AAAA,IACvB;AAGA,QAAI,OAAO,MAAM,UAAU,CAAC,QAAQ,MAAM,GAAG,CAAC,GAAG;AAC/C,aAAO;AAAA,IACT;AAGA,WAAO,MAAM,MAAM,UAAU,QAAQ,MAAM,GAAG,CAAC,GAAG;AAChD,gBAAU,MAAM,KAAK;AAAA,IACvB;AAGA,QAAI,MAAM,MAAM,UAAU,MAAM,GAAG,MAAM,KAAK;AAC5C,gBAAU,MAAM,KAAK;AACrB,aAAO,MAAM,MAAM,UAAU,QAAQ,MAAM,GAAG,CAAC,GAAG;AAChD,kBAAU,MAAM,KAAK;AAAA,MACvB;AAAA,IACF;AAEA,QAAI,CAAC,UAAU,WAAW,OAAO,WAAW,IAAK,QAAO;AAExD,WAAO,EAAE,QAAQ,QAAQ,IAAI;AAAA,EAC/B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAgBU,uBACR,OACA,KACA,iBACA,UAA6D,CAAC,GACxC;AACtB,UAAM,EAAE,YAAY,MAAM,iBAAiB,MAAM,IAAI;AAGrD,UAAM,aAAa,KAAK,gBAAgB,OAAO,KAAK,SAAS;AAC7D,QAAI,CAAC,WAAY,QAAO;AAExB,QAAI,EAAE,QAAQ,OAAO,IAAI;AAGzB,UAAM,WAAW,CAAC,GAAG,iBAAiB,GAAG,eAAc,mBAAmB;AAC1E,UAAM,YAAY,KAAK,iBAAiB,OAAO,QAAQ,UAAU,cAAc;AAE/E,QAAI,WAAW;AACb,gBAAU,UAAU;AACpB,eAAS,UAAU;AAAA,IACrB;AAEA,WAAO,YAAY,QAAQ,WAAW,eAAe,KAAK,MAAM,CAAC;AAAA,EACnE;AAAA;AAAA;AAAA;AAAA;AAAA,EAMU,OAAO,OAAe,KAAmC;AACjE,UAAM,MAAM,WAAW,OAAO,GAAG;AACjC,QAAI,KAAK;AACP,aAAO,YAAY,KAAK,OAAO,eAAe,KAAK,MAAM,IAAI,MAAM,CAAC;AAAA,IACtE;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA,EAMU,eAAe,OAAe,KAAmC;AACzE,QAAI,MAAM,GAAG,MAAM,IAAK,QAAO;AAC/B,QAAI,MAAM,KAAK,MAAM,OAAQ,QAAO;AACpC,QAAI,CAAC,sBAAsB,MAAM,MAAM,CAAC,CAAC,EAAG,QAAO;AAEnD,QAAI,SAAS,MAAM;AACnB,WAAO,SAAS,MAAM,UAAU,sBAAsB,MAAM,MAAM,CAAC,GAAG;AACpE;AAAA,IACF;AAEA,UAAM,SAAS,MAAM,MAAM,KAAK,MAAM;AACtC,WAAO,YAAY,QAAQ,cAAc,eAAe,KAAK,MAAM,CAAC;AAAA,EACtE;AAAA;AAAA;AAAA;AAAA;AAAA,EAMU,YAAY,OAAe,KAAmC;AAEtE,UAAM,UAAU,MAAM,MAAM,KAAK,MAAM,CAAC;AACxC,QAAI,CAAC,MAAM,MAAM,MAAM,MAAM,MAAM,MAAM,IAAI,EAAE,SAAS,OAAO,GAAG;AAChE,aAAO,YAAY,SAAS,YAAY,eAAe,KAAK,MAAM,CAAC,CAAC;AAAA,IACtE;AAGA,UAAM,UAAU,MAAM,GAAG;AACzB,QAAI,CAAC,KAAK,KAAK,KAAK,KAAK,KAAK,KAAK,KAAK,GAAG,EAAE,SAAS,OAAO,GAAG;AAC9D,aAAO,YAAY,SAAS,YAAY,eAAe,KAAK,MAAM,CAAC,CAAC;AAAA,IACtE;AAGA,QAAI,CAAC,KAAK,KAAK,KAAK,KAAK,KAAK,KAAK,GAAG,EAAE,SAAS,OAAO,GAAG;AACzD,aAAO,YAAY,SAAS,eAAe,eAAe,KAAK,MAAM,CAAC,CAAC;AAAA,IACzE;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAaU,qBACR,OACA,KACA,WACsB;AACtB,eAAW,YAAY,WAAW;AAChC,UAAI,MAAM,MAAM,KAAK,MAAM,SAAS,MAAM,MAAM,UAAU;AACxD,eAAO,YAAY,UAAU,YAAY,eAAe,KAAK,MAAM,SAAS,MAAM,CAAC;AAAA,MACrF;AAAA,IACF;AACA,WAAO;AAAA,EACT;AACF;AAAA;AAAA;AAAA;AAAA;AAzhBsB,eA0TM,sBAAkD;AAAA,EAC1E,EAAE,SAAS,MAAM,QAAQ,MAAM,QAAQ,EAAE;AAAA,EACzC,EAAE,SAAS,KAAK,QAAQ,KAAK,QAAQ,GAAG,eAAe,KAAK;AAAA,EAC5D,EAAE,SAAS,KAAK,QAAQ,KAAK,QAAQ,GAAG,eAAe,MAAM,eAAe,IAAI;AAAA,EAChF,EAAE,SAAS,KAAK,QAAQ,KAAK,QAAQ,GAAG,eAAe,KAAK;AAC9D;AA/TK,IAAe,gBAAf;;;ACxjBA,SAAS,SAAS,MAAmC;AAC1D,SAAO,EAAE,MAAM,MAAM,YAAY,EAAI;AACvC;AAKO,SAAS,WACd,MACA,YACA,UACqB;AACrB,MAAI,UAAU;AACZ,WAAO,EAAE,MAAM,YAAY,SAAS;AAAA,EACtC;AACA,SAAO,EAAE,MAAM,WAAW;AAC5B;;;ACrLA,IAAM,wBAA+C;AAAA;AAAA;AAAA,EAGnD,EAAE,SAAS,sBAAO,YAAY,MAAM,iBAAiB,oBAAoB,eAAe,EAAE;AAAA,EAC1F,EAAE,SAAS,sBAAO,YAAY,MAAM,iBAAiB,kBAAkB,eAAe,EAAE;AAAA,EACxF,EAAE,SAAS,sBAAO,YAAY,MAAM,iBAAiB,kBAAkB,eAAe,EAAE;AAAA;AAAA,EAExF,EAAE,SAAS,gBAAM,YAAY,MAAM,iBAAiB,oBAAoB,eAAe,EAAE;AAAA,EACzF,EAAE,SAAS,gBAAM,YAAY,MAAM,iBAAiB,kBAAkB,eAAe,EAAE;AAAA;AAAA,EAGvF,EAAE,SAAS,kCAAS,YAAY,MAAM,iBAAiB,QAAQ,eAAe,EAAE;AAAA,EAChF,EAAE,SAAS,4BAAQ,YAAY,MAAM,iBAAiB,eAAe,eAAe,EAAE;AAAA,EACtF,EAAE,SAAS,kCAAS,YAAY,MAAM,iBAAiB,WAAW,eAAe,EAAE;AAAA,EACnF,EAAE,SAAS,kCAAS,YAAY,MAAM,iBAAiB,WAAW,eAAe,EAAE;AAAA,EACnF,EAAE,SAAS,sBAAO,YAAY,MAAM,iBAAiB,eAAe,eAAe,EAAE;AAAA,EACrF,EAAE,SAAS,sBAAO,YAAY,MAAM,iBAAiB,eAAe,eAAe,EAAE;AAAA,EACrF,EAAE,SAAS,sBAAO,YAAY,MAAM,iBAAiB,eAAe,eAAe,EAAE;AAAA,EACrF,EAAE,SAAS,sBAAO,YAAY,MAAM,iBAAiB,eAAe,eAAe,EAAE;AAAA;AAAA,EAGrF,EAAE,SAAS,sBAAO,YAAY,KAAK,iBAAiB,kBAAkB,eAAe,EAAE;AAAA,EACvF,EAAE,SAAS,sBAAO,YAAY,KAAK,iBAAiB,kBAAkB,eAAe,EAAE;AAAA;AAAA,EAGvF,EAAE,SAAS,4BAAQ,YAAY,MAAM,iBAAiB,mBAAmB,eAAe,EAAE;AAAA,EAC1F,EAAE,SAAS,4BAAQ,YAAY,MAAM,iBAAiB,mBAAmB,eAAe,EAAE;AAAA,EAC1F,EAAE,SAAS,sBAAO,YAAY,MAAM,iBAAiB,cAAc,eAAe,EAAE;AAAA,EACpF,EAAE,SAAS,sBAAO,YAAY,MAAM,iBAAiB,cAAc,eAAe,EAAE;AAAA;AAAA,EAGpF,EAAE,SAAS,sBAAO,YAAY,MAAM,iBAAiB,QAAQ,eAAe,EAAE;AAAA,EAC9E,EAAE,SAAS,sBAAO,YAAY,MAAM,iBAAiB,YAAY,eAAe,EAAE;AAAA,EAClF,EAAE,SAAS,gBAAM,YAAY,MAAM,iBAAiB,UAAU,eAAe,EAAE;AAAA;AAAA,EAG/E,EAAE,SAAS,UAAK,YAAY,MAAM,iBAAiB,WAAW,eAAe,EAAE;AAAA,EAC/E,EAAE,SAAS,UAAK,YAAY,MAAM,iBAAiB,QAAQ,eAAe,EAAE;AAAA;AAAA,EAG5E,EAAE,SAAS,gBAAM,YAAY,MAAM,iBAAiB,YAAY,eAAe,EAAE;AAAA,EACjF,EAAE,SAAS,4BAAQ,YAAY,MAAM,iBAAiB,QAAQ,eAAe,EAAE;AAAA;AAAA,EAG/E,EAAE,SAAS,sBAAO,YAAY,KAAK,iBAAiB,aAAa,eAAe,EAAE;AAAA,EAClF,EAAE,SAAS,gBAAM,YAAY,MAAM,iBAAiB,aAAa,eAAe,EAAE;AAAA;AAAA,EAGlF,EAAE,SAAS,sBAAO,YAAY,KAAK,iBAAiB,WAAW,eAAe,EAAE;AAAA;AAAA,EAGhF,EAAE,SAAS,sBAAO,YAAY,KAAK,iBAAiB,aAAa,eAAe,EAAE;AAAA,EAClF,EAAE,SAAS,gBAAM,YAAY,MAAM,iBAAiB,aAAa,eAAe,EAAE;AAAA;AAAA,EAGlF,EAAE,SAAS,gBAAM,YAAY,KAAK,iBAAiB,cAAc,eAAe,EAAE;AAAA;AAAA,EAGlF,EAAE,SAAS,UAAK,YAAY,MAAM,iBAAiB,cAAc,eAAe,EAAE;AACpF;AAOA,IAAM,gBAIA;AAAA;AAAA,EAEJ,EAAE,SAAS,sBAAO,YAAY,MAAM,iBAAiB,mBAAmB;AAAA,EACxE,EAAE,SAAS,sBAAO,YAAY,MAAM,iBAAiB,iBAAiB;AAAA,EACtE,EAAE,SAAS,sBAAO,YAAY,MAAM,iBAAiB,iBAAiB;AAAA;AAAA,EAEtE,EAAE,SAAS,kCAAS,YAAY,MAAM,iBAAiB,cAAc;AAAA,EACrE,EAAE,SAAS,4BAAQ,YAAY,MAAM,iBAAiB,cAAc;AAAA;AAAA,EAEpE,EAAE,SAAS,4BAAQ,YAAY,MAAM,iBAAiB,OAAO;AAAA,EAC7D,EAAE,SAAS,sBAAO,YAAY,MAAM,iBAAiB,SAAS;AAAA,EAC9D,EAAE,SAAS,sBAAO,YAAY,MAAM,iBAAiB,WAAW;AAAA,EAChE,EAAE,SAAS,gBAAM,YAAY,MAAM,iBAAiB,UAAU;AAAA,EAC9D,EAAE,SAAS,gBAAM,YAAY,MAAM,iBAAiB,OAAO;AAAA,EAC3D,EAAE,SAAS,gBAAM,YAAY,MAAM,iBAAiB,aAAa;AACnE;AAKA,SAAS,WAAW,MAAuB;AACzC,QAAM,OAAO,KAAK,WAAW,CAAC;AAC9B,SAAO,QAAQ,SAAU,QAAQ;AACnC;AAKA,SAAS,WAAW,MAAuB;AACzC,QAAM,OAAO,KAAK,WAAW,CAAC;AAC9B,SAAO,QAAQ,SAAU,QAAQ;AACnC;AAKA,SAAS,QAAQ,MAAuB;AACtC,QAAM,OAAO,KAAK,WAAW,CAAC;AAC9B,SAAQ,QAAQ,SAAU,QAAQ,SAAY,QAAQ,SAAU,QAAQ;AAC1E;AAKA,SAAS,iBAAiB,MAAuB;AAC/C,aAAW,QAAQ,MAAM;AACvB,QAAI,WAAW,IAAI,KAAK,WAAW,IAAI,KAAK,QAAQ,IAAI,GAAG;AACzD,aAAO;AAAA,IACT;AAAA,EACF;AACA,SAAO;AACT;AAKO,IAAM,kCAAN,MAAyE;AAAA,EAAzE;AACL,SAAS,WAAW;AAAA;AAAA;AAAA;AAAA;AAAA,EAKpB,eAAe,MAAuB;AAEpC,QAAI,CAAC,iBAAiB,IAAI,EAAG,QAAO;AAGpC,QAAI,KAAK,SAAS,EAAG,QAAO;AAG5B,UAAM,WAAW,KAAK,KAAK,SAAS,CAAC;AACrC,WAAO,WAAW,QAAQ;AAAA,EAC5B;AAAA;AAAA;AAAA;AAAA,EAKA,UAAU,MAAmC;AAE3C,UAAM,iBAAiB,KAAK,kBAAkB,IAAI;AAClD,QAAI,eAAgB,QAAO;AAG3B,UAAM,aAAa,KAAK,qBAAqB,IAAI;AACjD,QAAI,WAAY,QAAO;AAGvB,eAAW,QAAQ,uBAAuB;AACxC,UAAI,KAAK,SAAS,KAAK,OAAO,GAAG;AAC/B,cAAM,OAAO,KAAK,MAAM,GAAG,CAAC,KAAK,QAAQ,MAAM;AAG/C,cAAM,YAAY,KAAK,iBAAiB;AACxC,YAAI,KAAK,SAAS,UAAW;AAG7B,cAAM,WAGF;AAAA,UACF,iBAAiB,CAAC,KAAK,OAAO;AAAA,QAChC;AACA,YAAI,KAAK,iBAAiB;AACxB,mBAAS,kBAAkB,KAAK;AAAA,QAClC;AACA,eAAO,WAAW,MAAM,KAAK,YAAY,QAAQ;AAAA,MACnD;AAAA,IACF;AAGA,WAAO,SAAS,IAAI;AAAA,EACtB;AAAA;AAAA;AAAA;AAAA,EAKQ,qBAAqB,MAA0C;AACrE,eAAW,WAAW,eAAe;AACnC,UAAI,KAAK,SAAS,QAAQ,OAAO,GAAG;AAClC,cAAM,OAAO,KAAK,MAAM,GAAG,CAAC,QAAQ,QAAQ,MAAM;AAGlD,YAAI,KAAK,SAAS,EAAG;AAGrB,eAAO,WAAW,MAAM,QAAQ,YAAY;AAAA,UAC1C,iBAAiB,CAAC,QAAQ,OAAO;AAAA,UACjC,iBAAiB,QAAQ;AAAA,UACzB,cAAc;AAAA,QAChB,CAAC;AAAA,MACH;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOQ,kBAAkB,MAA0C;AAElE,UAAM,mBAKA;AAAA;AAAA,MAEJ;AAAA,QACE,SAAS;AAAA,QACT,UAAU,CAAC,UAAK,UAAK,0BAAM;AAAA,QAC3B,YAAY;AAAA,QACZ,eAAe;AAAA,MACjB;AAAA,MACA;AAAA,QACE,SAAS;AAAA,QACT,UAAU,CAAC,UAAK,UAAK,0BAAM;AAAA,QAC3B,YAAY;AAAA,QACZ,eAAe;AAAA,MACjB;AAAA;AAAA,MAEA,EAAE,SAAS,4BAAQ,UAAU,CAAC,UAAK,UAAK,cAAI,GAAG,YAAY,MAAM,eAAe,EAAE;AAAA,MAClF,EAAE,SAAS,4BAAQ,UAAU,CAAC,UAAK,UAAK,cAAI,GAAG,YAAY,MAAM,eAAe,EAAE;AAAA;AAAA,MAElF,EAAE,SAAS,sBAAO,UAAU,CAAC,UAAK,UAAK,QAAG,GAAG,YAAY,MAAM,eAAe,EAAE;AAAA,MAChF,EAAE,SAAS,sBAAO,UAAU,CAAC,UAAK,UAAK,QAAG,GAAG,YAAY,MAAM,eAAe,EAAE;AAAA,IAClF;AAEA,eAAW,EAAE,SAAS,UAAU,YAAY,cAAc,KAAK,kBAAkB;AAC/E,UAAI,KAAK,SAAS,OAAO,GAAG;AAC1B,cAAM,OAAO,KAAK,MAAM,GAAG,CAAC,QAAQ,MAAM;AAG1C,YAAI,KAAK,SAAS,cAAe;AAEjC,eAAO,WAAW,MAAM,YAAY;AAAA,UAClC,iBAAiB;AAAA,UACjB,iBAAiB;AAAA,QACnB,CAAC;AAAA,MACH;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AACF;AAGO,IAAM,kCAAkC,IAAI,gCAAgC;;;ACtR5E,IAAM,kBAAmC;AAAA,EAC9C,MAAM;AAAA,EACN,MAAM;AAAA,EACN,YAAY;AAAA,EACZ,WAAW;AAAA,EACX,WAAW;AAAA,EACX,iBAAiB;AAAA,EACjB,YAAY;AAAA;AAAA;AAAA,EAGZ,iBAAiB;AAAA,EACjB,MAAM;AAAA,IACJ,UAAU;AAAA,IACV,UAAU,CAAC,UAAK,UAAK,UAAK,gBAAM,cAAI;AAAA,IACpC,aAAa;AAAA,EACf;AAAA,EACA,YAAY;AAAA,IACV,IAAI;AAAA;AAAA,IACJ,IAAI;AAAA;AAAA,IACJ,KAAK;AAAA;AAAA,IACL,QAAQ;AAAA,IACR,OAAO;AAAA,IACP,QAAQ;AAAA,IACR,MAAM;AAAA,EACR;AAAA,EACA,YAAY;AAAA,IACV,QAAQ;AAAA,IACR,gBAAgB;AAAA;AAAA,IAEhB,UAAU;AAAA,MACR,cAAI;AAAA;AAAA,MACJ,0BAAM;AAAA;AAAA,MACN,cAAI;AAAA;AAAA,IACN;AAAA,EACF;AAAA,EACA,aAAa;AAAA,IACX,SAAS,EAAE,SAAS,UAAK,UAAU,QAAQ;AAAA,IAC3C,aAAa,EAAE,SAAS,UAAK,cAAc,CAAC,UAAK,QAAG,GAAG,UAAU,QAAQ;AAAA,IACzE,QAAQ,EAAE,SAAS,gBAAM,UAAU,QAAQ;AAAA,IAC3C,OAAO,EAAE,SAAS,UAAK,UAAU,QAAQ;AAAA,IACzC,OAAO,EAAE,SAAS,UAAK,UAAU,QAAQ;AAAA;AAAA;AAAA;AAAA,EAG3C;AAAA,EACA,UAAU;AAAA;AAAA,IAER,QAAQ;AAAA,MACN,SAAS;AAAA,MACT,cAAc,CAAC,kCAAS,sBAAO,gCAAO;AAAA,MACtC,YAAY;AAAA,IACd;AAAA,IACA,KAAK,EAAE,SAAS,gBAAM,cAAc,CAAC,4BAAQ,oBAAK,GAAG,YAAY,MAAM;AAAA,IACvE,QAAQ,EAAE,SAAS,gBAAM,cAAc,CAAC,4BAAQ,0BAAM,GAAG,YAAY,SAAS;AAAA;AAAA,IAE9E,KAAK,EAAE,SAAS,gBAAM,cAAc,CAAC,sBAAO,oBAAK,GAAG,YAAY,MAAM;AAAA,IACtE,QAAQ,EAAE,SAAS,4BAAQ,cAAc,CAAC,kCAAS,0BAAM,GAAG,YAAY,SAAS;AAAA,IACjF,SAAS;AAAA,MACP,SAAS;AAAA,MACT,cAAc,CAAC,kCAAS,gCAAO;AAAA,MAC/B,YAAY;AAAA,IACd;AAAA,IACA,MAAM,EAAE,SAAS,gBAAM,cAAc,CAAC,cAAI,GAAG,YAAY,OAAO;AAAA,IAChE,MAAM,EAAE,SAAS,gBAAM,cAAc,CAAC,cAAI,GAAG,YAAY,OAAO;AAAA,IAChE,OAAO,EAAE,SAAS,gBAAM,cAAc,CAAC,0BAAM,GAAG,YAAY,QAAQ;AAAA,IACpE,MAAM,EAAE,SAAS,gBAAM,cAAc,CAAC,4BAAQ,0BAAM,GAAG,YAAY,OAAO;AAAA,IAC1E,OAAO,EAAE,SAAS,gBAAM,cAAc,CAAC,sBAAO,cAAI,GAAG,YAAY,QAAQ;AAAA;AAAA,IAEzE,KAAK,EAAE,SAAS,gBAAM,cAAc,CAAC,4BAAQ,oBAAK,GAAG,YAAY,MAAM;AAAA,IACvE,KAAK,EAAE,SAAS,gBAAM,cAAc,CAAC,4BAAQ,oBAAK,GAAG,YAAY,MAAM;AAAA,IACvE,WAAW;AAAA,MACT,SAAS;AAAA,MACT,cAAc,CAAC,sBAAO,4CAAS;AAAA,MAC/B,YAAY;AAAA,IACd;AAAA,IACA,WAAW;AAAA,MACT,SAAS;AAAA,MACT,cAAc,CAAC,sBAAO,sCAAQ;AAAA,MAC9B,YAAY;AAAA,IACd;AAAA,IACA,KAAK,EAAE,SAAS,gBAAM,cAAc,CAAC,gBAAM,cAAI,GAAG,YAAY,MAAM;AAAA;AAAA,IAEpE,MAAM,EAAE,SAAS,gBAAM,cAAc,CAAC,4BAAQ,oBAAK,GAAG,YAAY,OAAO;AAAA,IACzE,MAAM,EAAE,SAAS,gBAAM,cAAc,CAAC,sBAAO,sCAAQ,GAAG,YAAY,OAAO;AAAA,IAC3E,YAAY;AAAA,MACV,SAAS;AAAA,MACT,cAAc,CAAC,8CAAW,4CAAS;AAAA,MACnC,YAAY;AAAA,IACd;AAAA;AAAA,IAEA,IAAI,EAAE,SAAS,UAAK,cAAc,CAAC,UAAK,cAAI,GAAG,YAAY,KAAK;AAAA,IAChE,SAAS,EAAE,SAAS,sBAAO,cAAc,CAAC,gBAAM,0BAAM,GAAG,YAAY,UAAU;AAAA,IAC/E,MAAM,EAAE,SAAS,gBAAM,cAAc,CAAC,cAAI,GAAG,YAAY,OAAO;AAAA;AAAA,IAEhE,OAAO,EAAE,SAAS,kCAAS,cAAc,CAAC,cAAI,GAAG,YAAY,QAAQ;AAAA,IACrE,MAAM,EAAE,SAAS,sBAAO,cAAc,CAAC,4CAAS,GAAG,YAAY,OAAO;AAAA;AAAA,IAEtE,IAAI,EAAE,SAAS,gBAAM,cAAc,CAAC,gBAAM,gCAAO,GAAG,YAAY,KAAK;AAAA;AAAA,IAErE,MAAM,EAAE,SAAS,gBAAM,cAAc,CAAC,cAAI,GAAG,YAAY,OAAO;AAAA,IAChE,OAAO,EAAE,SAAS,gBAAM,cAAc,CAAC,0BAAM,GAAG,YAAY,QAAQ;AAAA,IACpE,QAAQ,EAAE,SAAS,gBAAM,cAAc,CAAC,0BAAM,GAAG,YAAY,SAAS;AAAA;AAAA,IAEtE,IAAI,EAAE,SAAS,gBAAM,cAAc,CAAC,cAAI,GAAG,YAAY,KAAK;AAAA,IAC5D,MAAM,EAAE,SAAS,gBAAM,YAAY,OAAO;AAAA,IAC1C,OAAO,EAAE,SAAS,gBAAM,YAAY,QAAQ;AAAA,IAC5C,MAAM,EAAE,SAAS,8CAAW,cAAc,CAAC,0BAAM,GAAG,YAAY,OAAO;AAAA,IACvE,QAAQ,EAAE,SAAS,4BAAQ,cAAc,CAAC,4BAAQ,0BAAM,GAAG,YAAY,SAAS;AAAA,IAChF,KAAK,EAAE,SAAS,sBAAO,cAAc,CAAC,QAAG,GAAG,YAAY,MAAM;AAAA,IAC9D,OAAO,EAAE,SAAS,gBAAM,cAAc,CAAC,QAAG,GAAG,YAAY,QAAQ;AAAA,IACjE,UAAU,EAAE,SAAS,sBAAO,cAAc,CAAC,cAAI,GAAG,YAAY,WAAW;AAAA,IACzE,MAAM,EAAE,SAAS,gBAAM,cAAc,CAAC,sBAAO,oBAAK,GAAG,YAAY,OAAO;AAAA,IACxE,OAAO,EAAE,SAAS,sBAAO,cAAc,CAAC,oBAAK,GAAG,YAAY,QAAQ;AAAA,IACpE,MAAM,EAAE,SAAS,4BAAQ,cAAc,CAAC,sBAAO,cAAI,GAAG,YAAY,OAAO;AAAA,IACzE,QAAQ,EAAE,SAAS,gBAAM,cAAc,CAAC,gBAAM,0BAAM,GAAG,YAAY,SAAS;AAAA,IAC5E,MAAM,EAAE,SAAS,4BAAQ,cAAc,CAAC,gBAAM,oBAAK,GAAG,YAAY,OAAO;AAAA,IACzE,KAAK,EAAE,SAAS,sBAAO,cAAc,CAAC,UAAK,cAAI,GAAG,YAAY,MAAM;AAAA,IACpE,KAAK,EAAE,SAAS,sBAAO,cAAc,CAAC,gBAAM,oBAAK,GAAG,YAAY,MAAM;AAAA;AAAA,IAEtE,IAAI,EAAE,SAAS,kBAAQ,cAAc,CAAC,IAAI,GAAG,YAAY,KAAK;AAAA,IAC9D,OAAO,EAAE,SAAS,sBAAO,cAAc,CAAC,0BAAM,GAAG,YAAY,QAAQ;AAAA,IACrE,MAAM,EAAE,SAAS,sBAAO,cAAc,CAAC,cAAI,GAAG,YAAY,OAAO;AAAA,IACjE,SAAS,EAAE,SAAS,gBAAM,cAAc,CAAC,gCAAO,GAAG,YAAY,UAAU;AAAA,IACzE,MAAM,EAAE,SAAS,sBAAO,cAAc,CAAC,0BAAM,GAAG,YAAY,OAAO;AAAA,IACnE,UAAU,EAAE,SAAS,4BAAQ,cAAc,CAAC,gCAAO,GAAG,YAAY,WAAW;AAAA,IAC7E,SAAS,EAAE,SAAS,wCAAU,cAAc,CAAC,cAAI,GAAG,YAAY,UAAU;AAAA,IAC1E,SAAS,EAAE,SAAS,gBAAM,cAAc,CAAC,gBAAM,0BAAM,GAAG,YAAY,UAAU;AAAA;AAAA,IAE9E,MAAM,EAAE,SAAS,UAAK,cAAc,CAAC,QAAG,GAAG,YAAY,OAAO;AAAA,IAC9D,QAAQ,EAAE,SAAS,gBAAM,cAAc,CAAC,QAAG,GAAG,YAAY,SAAS;AAAA,IACnE,OAAO,EAAE,SAAS,gBAAM,cAAc,CAAC,QAAG,GAAG,YAAY,QAAQ;AAAA;AAAA,IAEjE,OAAO,EAAE,SAAS,gBAAM,cAAc,CAAC,QAAG,GAAG,YAAY,QAAQ;AAAA,IACjE,OAAO,EAAE,SAAS,4BAAQ,cAAc,CAAC,cAAI,GAAG,YAAY,QAAQ;AAAA,IACpE,MAAM,EAAE,SAAS,gBAAM,YAAY,OAAO;AAAA,EAC5C;AAAA,EACA,cAAc;AAAA,IACZ,WAAW,CAAC,UAAK,UAAK,UAAK,gBAAM,UAAK,UAAK,UAAK,UAAK,UAAK,QAAG;AAAA,IAC7D,kBAAkB;AAAA,EACpB;AACF;;;AChHA,IAAMC,cAAa,6BAA6B,CAAC,CAAC,OAAQ,KAAM,CAAC,CAAC;AAGlE,IAAMC,cAAa,6BAA6B,CAAC,CAAC,OAAQ,KAAM,CAAC,CAAC;AAGlE,IAAMC,WAAU,6BAA6B;AAAA,EAC3C,CAAC,OAAQ,KAAM;AAAA;AAAA,EACf,CAAC,OAAQ,KAAM;AAAA;AACjB,CAAC;AAGD,IAAM,aAAa,mBAAmBF,aAAYC,aAAYC,QAAO;AAUrE,IAAM,YAAY,oBAAI,IAAI;AAAA,EACxB;AAAA;AAAA,EACA;AAAA;AAAA,EACA;AAAA;AAAA,EACA;AAAA;AAAA,EACA;AAAA;AAAA,EACA;AAAA;AAAA,EACA;AAAA;AAAA,EACA;AAAA;AAAA,EACA;AAAA;AAAA,EACA;AAAA;AAAA,EACA;AAAA;AAAA,EACA;AAAA;AACF,CAAC;AAKD,IAAM,wBAAwB,oBAAI,IAAI,CAAC,UAAK,UAAK,UAAK,UAAK,UAAK,UAAK,UAAK,UAAK,QAAG,CAAC;AAKnF,IAAM,uBAAuB,CAAC,gBAAM,gBAAM,cAAI;AAY9C,IAAM,iBAAiB,oBAAI,IAA8B;AAAA,EACvD,CAAC,UAAK,EAAE,MAAM,WAAW,YAAY,MAAM,aAAa,gBAAgB,CAAC;AAAA,EACzE,CAAC,UAAK,EAAE,MAAM,eAAe,YAAY,MAAM,aAAa,0BAA0B,CAAC;AAAA,EACvF,CAAC,UAAK,EAAE,MAAM,UAAU,YAAY,MAAM,aAAa,wBAAwB,CAAC;AAAA,EAChF,CAAC,gBAAM,EAAE,MAAM,UAAU,YAAY,KAAK,aAAa,qBAAqB,CAAC;AAAA,EAC7E,CAAC,gBAAM,EAAE,MAAM,eAAe,YAAY,MAAM,aAAa,wBAAwB,CAAC;AAAA,EACtF,CAAC,UAAK,EAAE,MAAM,eAAe,YAAY,KAAK,aAAa,mBAAmB,CAAC;AAAA,EAC/E,CAAC,UAAK,EAAE,MAAM,SAAS,YAAY,KAAK,aAAa,kBAAkB,CAAC;AAAA,EACxE,CAAC,UAAK,EAAE,MAAM,WAAW,YAAY,KAAK,aAAa,oBAAoB,CAAC;AAAA,EAC5E,CAAC,UAAK,EAAE,MAAM,SAAS,YAAY,MAAM,aAAa,iBAAiB,CAAC;AAAA,EACxE,CAAC,UAAK,EAAE,MAAM,SAAS,YAAY,MAAM,aAAa,eAAe,CAAC;AAAA,EACtE,CAAC,UAAK,EAAE,MAAM,WAAW,YAAY,MAAM,aAAa,kBAAkB,CAAC;AAAA,EAC3E,CAAC,gBAAM,EAAE,MAAM,UAAU,YAAY,MAAM,aAAa,mBAAmB,CAAC;AAC9E,CAAC;AAeD,IAAM,kBAAkC;AAAA;AAAA,EAEtC,EAAE,QAAQ,UAAK,YAAY,OAAO;AAAA,EAClC,EAAE,QAAQ,UAAK,YAAY,QAAQ;AAAA,EACnC,EAAE,QAAQ,gBAAM,YAAY,OAAO;AAAA,EACnC,EAAE,QAAQ,sBAAO,YAAY,YAAY;AAAA;AAAA,EAGzC,EAAE,QAAQ,gBAAM,YAAY,QAAQ;AAAA,EACpC,EAAE,QAAQ,gBAAM,YAAY,OAAO;AAAA,EACnC,EAAE,QAAQ,UAAK,YAAY,OAAO;AAAA,EAClC,EAAE,QAAQ,UAAK,YAAY,WAAW;AAAA,EACtC,EAAE,QAAQ,4BAAQ,YAAY,UAAU;AAAA,EACxC,EAAE,QAAQ,UAAK,YAAY,SAAS;AAAA;AAAA,EAGpC,EAAE,QAAQ,4BAAQ,YAAY,QAAQ;AAAA,EACtC,EAAE,QAAQ,gBAAM,YAAY,SAAS;AAAA,EACrC,EAAE,QAAQ,gBAAM,YAAY,SAAS;AAAA,EACrC,EAAE,QAAQ,gBAAM,YAAY,QAAQ;AAAA,EACpC,EAAE,QAAQ,sBAAO,YAAY,OAAO;AAAA,EACpC,EAAE,QAAQ,kCAAS,YAAY,SAAS;AAAA,EACxC,EAAE,QAAQ,kCAAS,YAAY,UAAU;AAAA,EACzC,EAAE,QAAQ,kCAAS,YAAY,QAAQ;AAAA,EACvC,EAAE,QAAQ,8CAAW,YAAY,YAAY;AAAA,EAC7C,EAAE,QAAQ,wCAAU,YAAY,WAAW;AAAA,EAC3C,EAAE,QAAQ,sBAAO,YAAY,OAAO;AAAA;AAAA,EAGpC,EAAE,QAAQ,UAAK,YAAY,KAAK;AAAA,EAChC,EAAE,QAAQ,gBAAM,YAAY,KAAK;AAAA,EACjC,EAAE,QAAQ,gBAAM,YAAY,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA,EAOlC,EAAE,QAAQ,sBAAO,YAAY,KAAK;AAAA,EAClC,EAAE,QAAQ,sBAAO,YAAY,KAAK;AAAA,EAClC,EAAE,QAAQ,gBAAM,YAAY,KAAK;AAAA;AAAA,EAGjC,EAAE,QAAQ,gBAAM,YAAY,KAAK;AAAA;AAAA,EACjC,EAAE,QAAQ,sBAAO,YAAY,OAAO;AAAA,EACpC,EAAE,QAAQ,gBAAM,YAAY,OAAO;AAAA;AAAA,EAGnC,EAAE,QAAQ,UAAK,YAAY,IAAI;AAAA,EAC/B,EAAE,QAAQ,sBAAO,YAAY,KAAK;AAAA,EAClC,EAAE,QAAQ,UAAK,YAAY,IAAI;AAAA,EAC/B,EAAE,QAAQ,gBAAM,YAAY,IAAI;AAClC;AAWA,IAAM,sBAAkD;AAAA,EACtD,EAAE,SAAS,sBAAO,QAAQ,MAAM,QAAQ,EAAE;AAAA,EAC1C,EAAE,SAAS,gBAAM,QAAQ,KAAK,QAAQ,EAAE;AAAA,EACxC,EAAE,SAAS,UAAK,QAAQ,KAAK,QAAQ,EAAE;AAAA,EACvC,EAAE,SAAS,UAAK,QAAQ,KAAK,QAAQ,EAAE;AACzC;AAMO,IAAM,oBAAN,cAAgC,cAAc;AAAA,EAInD,cAAc;AACZ,UAAM;AAJR,SAAS,WAAW;AACpB,SAAS,YAAY;AAKnB,SAAK,8BAA8B,iBAAiB,eAAe;AAEnE,SAAK,aAAa,IAAI,gCAAgC;AAAA,EACxD;AAAA,EAEA,SAAS,OAA4B;AACnC,UAAM,SAA0B,CAAC;AACjC,QAAI,MAAM;AAEV,WAAO,MAAM,MAAM,QAAQ;AAEzB,UAAI,aAAa,MAAM,GAAG,CAAC,GAAG;AAC5B;AACA;AAAA,MACF;AAGA,UAAI,gBAAgB,MAAM,GAAG,CAAC,GAAG;AAE/B,cAAM,gBAAgB,KAAK,iBAAiB,OAAO,GAAG;AACtD,YAAI,eAAe;AACjB,iBAAO,KAAK,aAAa;AACzB,gBAAM,cAAc,SAAS;AAC7B;AAAA,QACF;AAEA,cAAM,gBAAgB,KAAK,YAAY,OAAO,GAAG;AACjD,YAAI,eAAe;AACjB,iBAAO,KAAK,aAAa;AACzB,gBAAM,cAAc,SAAS;AAC7B;AAAA,QACF;AAAA,MACF;AAGA,UAAI,QAAQ,MAAM,GAAG,CAAC,GAAG;AACvB,cAAM,cAAc,KAAK,UAAU,OAAO,GAAG;AAC7C,YAAI,aAAa;AACf,iBAAO,KAAK,WAAW;AACvB,gBAAM,YAAY,SAAS;AAC3B;AAAA,QACF;AAAA,MACF;AAGA,UAAI,WAAW,OAAO,GAAG,GAAG;AAC1B,cAAM,WAAW,KAAK,OAAO,OAAO,GAAG;AACvC,YAAI,UAAU;AACZ,iBAAO,KAAK,QAAQ;AACpB,gBAAM,SAAS,SAAS;AACxB;AAAA,QACF;AAAA,MACF;AAGA,UAAI,QAAQ,MAAM,GAAG,CAAC,GAAG;AACvB,cAAM,cAAc,KAAK,sBAAsB,OAAO,GAAG;AACzD,YAAI,aAAa;AACf,iBAAO,KAAK,WAAW;AACvB,gBAAM,YAAY,SAAS;AAC3B;AAAA,QACF;AAAA,MACF;AAGA,YAAM,WAAW,KAAK,eAAe,OAAO,GAAG;AAC/C,UAAI,UAAU;AACZ,eAAO,KAAK,QAAQ;AACpB,cAAM,SAAS,SAAS;AACxB;AAAA,MACF;AAGA,YAAM,gBAAgB,KAAK,qBAAqB,OAAO,KAAK,oBAAoB;AAChF,UAAI,eAAe;AAEjB,cAAM,WAAW,eAAe,IAAI,cAAc,KAAK;AACvD,YAAI,UAAU;AACZ,iBAAO,KAAK;AAAA,YACV,GAAG;AAAA,YACH,UAAU;AAAA,cACR,cAAc,SAAS;AAAA,cACvB,oBAAoB,SAAS;AAAA,YAC/B;AAAA,UACF,CAAC;AAAA,QACH,OAAO;AACL,iBAAO,KAAK,aAAa;AAAA,QAC3B;AACA,cAAM,cAAc,SAAS;AAC7B;AAAA,MACF;AAIA,UAAI,sBAAsB,IAAI,MAAM,GAAG,CAAC,GAAG;AACzC,cAAM,eAAe,KAAK,kBAAkB,OAAO,GAAG;AAEtD,YAAI,gBAAgB,aAAa,MAAM,SAAS,GAAG;AACjD,iBAAO,KAAK,YAAY;AACxB,gBAAM,aAAa,SAAS;AAC5B;AAAA,QACF;AAEA,cAAM,WAAW,MAAM,GAAG;AAC1B,cAAM,WAAW,eAAe,IAAI,QAAQ;AAC5C,YAAI,UAAU;AACZ,iBAAO,KAAK;AAAA,YACV,GAAG,YAAY,UAAU,YAAY,eAAe,KAAK,MAAM,CAAC,CAAC;AAAA,YACjE,UAAU;AAAA,cACR,cAAc,SAAS;AAAA,cACvB,oBAAoB,SAAS;AAAA,YAC/B;AAAA,UACF,CAAC;AAAA,QACH,OAAO;AACL,iBAAO,KAAK,YAAY,UAAU,YAAY,eAAe,KAAK,MAAM,CAAC,CAAC,CAAC;AAAA,QAC7E;AACA;AACA;AAAA,MACF;AAGA,UAAI,WAAW,MAAM,GAAG,CAAC,GAAG;AAC1B,cAAM,YAAY,KAAK,oBAAoB,OAAO,GAAG;AACrD,YAAI,WAAW;AACb,iBAAO,KAAK,SAAS;AACrB,gBAAM,UAAU,SAAS;AACzB;AAAA,QACF;AAAA,MACF;AAGA,UAAI,sBAAsB,MAAM,GAAG,CAAC,GAAG;AACrC,cAAM,aAAa,KAAK,iBAAiB,OAAO,GAAG;AACnD,YAAI,YAAY;AACd,iBAAO,KAAK,UAAU;AACtB,gBAAM,WAAW,SAAS;AAC1B;AAAA,QACF;AAAA,MACF;AAGA;AAAA,IACF;AAEA,WAAO,IAAI,gBAAgB,QAAQ,IAAI;AAAA,EACzC;AAAA,EAEA,cAAc,OAA0B;AACtC,QAAI,UAAU,IAAI,KAAK,EAAG,QAAO;AAEjC,QAAI,KAAK,UAAU,KAAK,EAAG,QAAO;AAClC,QAAI,MAAM,WAAW,GAAG,KAAK,MAAM,WAAW,GAAG,KAAK,MAAM,WAAW,GAAG,EAAG,QAAO;AACpF,QAAI,MAAM,WAAW,GAAG,KAAK,MAAM,WAAW,GAAG,KAAK,MAAM,WAAW,QAAG,EAAG,QAAO;AACpF,QAAI,MAAM,KAAK,KAAK,EAAG,QAAO;AAE9B,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUQ,oBAAoB,OAAe,UAAwC;AACjF,QAAI,MAAM;AACV,QAAI,OAAO;AAEX,WAAO,MAAM,MAAM,QAAQ;AACzB,YAAM,OAAO,MAAM,GAAG;AAGtB,UAAI,sBAAsB,IAAI,IAAI,KAAK,KAAK,SAAS,GAAG;AACtD;AAAA,MACF;AAGA,UAAI,aAAa;AACjB,iBAAW,YAAY,sBAAsB;AAC3C,YAAI,MAAM,MAAM,KAAK,MAAM,SAAS,MAAM,MAAM,YAAY,KAAK,SAAS,GAAG;AAC3E,uBAAa;AACb;AAAA,QACF;AAAA,MACF;AACA,UAAI,WAAY;AAGhB,UAAI,WAAW,IAAI,GAAG;AACpB,gBAAQ;AACR;AAAA,MACF,OAAO;AACL;AAAA,MACF;AAAA,IACF;AAEA,QAAI,CAAC,KAAM,QAAO;AAGlB,UAAM,eAAe,KAAK,cAAc,IAAI;AAC5C,QAAI,cAAc;AAChB,aAAO,YAAY,MAAM,WAAW,eAAe,UAAU,GAAG,GAAG,aAAa,UAAU;AAAA,IAC5F;AAGA,UAAM,aAAa,KAAK,qBAAqB,MAAM,UAAU,GAAG;AAChE,QAAI,WAAY,QAAO;AAGvB,WAAO,YAAY,MAAM,cAAc,eAAe,UAAU,GAAG,CAAC;AAAA,EACtE;AAAA;AAAA;AAAA;AAAA,EAKQ,iBAAiB,OAAe,UAAwC;AAC9E,QAAI,MAAM;AACV,QAAI,OAAO;AAEX,WAAO,MAAM,MAAM,UAAU,sBAAsB,MAAM,GAAG,CAAC,GAAG;AAC9D,cAAQ,MAAM,KAAK;AAAA,IACrB;AAEA,QAAI,CAAC,KAAM,QAAO;AAElB,WAAO,YAAY,MAAM,cAAc,eAAe,UAAU,GAAG,CAAC;AAAA,EACtE;AAAA;AAAA;AAAA;AAAA;AAAA,EAMQ,sBAAsB,OAAe,UAAwC;AACnF,WAAO,KAAK,uBAAuB,OAAO,UAAU,qBAAqB;AAAA,MACvE,WAAW;AAAA,MACX,gBAAgB;AAAA,IAClB,CAAC;AAAA,EACH;AACF;AAKO,IAAM,oBAAoB,IAAI,kBAAkB;;;AChbvD,iBAAiB,MAAM,mBAAmB,eAAe;","names":["normalized","normalized","isHiragana","isKatakana","isKanji"]}
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
import { B as BaseTokenizer, T as TokenStream, a as TokenKind, L as LanguageProfile } from '../types-C4dcj53L.js';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Korean Tokenizer
|
|
5
|
+
*
|
|
6
|
+
* Tokenizes Korean hyperscript input.
|
|
7
|
+
* Korean is an agglutinative language with:
|
|
8
|
+
* - Hangul syllable blocks (가-힣)
|
|
9
|
+
* - Particles (조사) mark grammatical roles
|
|
10
|
+
* - 하다 verbs (noun + 하다)
|
|
11
|
+
* - CSS selectors are embedded ASCII
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
declare class KoreanTokenizer extends BaseTokenizer {
|
|
15
|
+
readonly language = "ko";
|
|
16
|
+
readonly direction: "ltr";
|
|
17
|
+
constructor();
|
|
18
|
+
tokenize(input: string): TokenStream;
|
|
19
|
+
classifyToken(token: string): TokenKind;
|
|
20
|
+
/**
|
|
21
|
+
* Extract a Korean word (sequence of Hangul).
|
|
22
|
+
* Prioritizes known keywords, then uses particle-based word boundaries.
|
|
23
|
+
*
|
|
24
|
+
* Uses morphological normalization to handle verb conjugations.
|
|
25
|
+
*/
|
|
26
|
+
private extractKoreanWord;
|
|
27
|
+
/**
|
|
28
|
+
* Extract an ASCII word (for mixed Korean/English content).
|
|
29
|
+
*/
|
|
30
|
+
private extractAsciiWord;
|
|
31
|
+
/**
|
|
32
|
+
* Extract a number, including Korean time unit suffixes.
|
|
33
|
+
* Korean time units attach directly without whitespace.
|
|
34
|
+
*/
|
|
35
|
+
private extractKoreanNumber;
|
|
36
|
+
}
|
|
37
|
+
/**
|
|
38
|
+
* Singleton instance.
|
|
39
|
+
*/
|
|
40
|
+
declare const koreanTokenizer: KoreanTokenizer;
|
|
41
|
+
|
|
42
|
+
/**
|
|
43
|
+
* Korean Language Profile
|
|
44
|
+
*
|
|
45
|
+
* SOV word order, particles (을/를, 에, 에서, etc.), space-separated between words.
|
|
46
|
+
* Agglutinative language with particles attaching to words.
|
|
47
|
+
*/
|
|
48
|
+
|
|
49
|
+
declare const koreanProfile: LanguageProfile;
|
|
50
|
+
|
|
51
|
+
export { koreanProfile, koreanTokenizer };
|