@bearcove/codemirror-lang-styx 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,263 @@
1
+ // src/syntax.grammar.ts
2
+ import { LRParser } from "@lezer/lr";
3
+
4
+ // src/heredoc.ts
5
+ import { ExternalTokenizer } from "@lezer/lr";
6
+
7
+ // src/syntax.grammar.terms.ts
8
+ var heredoc = 39;
9
+
10
+ // src/heredoc.ts
11
+ function isDelimiterStart(ch) {
12
+ return ch >= 65 && ch <= 90;
13
+ }
14
+ function isDelimiterChar(ch) {
15
+ return ch >= 65 && ch <= 90 || ch >= 48 && ch <= 57 || ch === 95;
16
+ }
17
+ function isLangHintChar(ch) {
18
+ return ch >= 97 && ch <= 122 || ch >= 48 && ch <= 57 || ch === 95 || ch === 46 || ch === 45;
19
+ }
20
+ var heredocTokenizer = new ExternalTokenizer(
21
+ (input, stack) => {
22
+ if (input.next !== 60) return;
23
+ input.advance();
24
+ if (input.next !== 60) return;
25
+ input.advance();
26
+ if (!isDelimiterStart(input.next)) return;
27
+ let delimiter = "";
28
+ while (isDelimiterChar(input.next)) {
29
+ delimiter += String.fromCharCode(input.next);
30
+ input.advance();
31
+ }
32
+ if (input.next === 44) {
33
+ input.advance();
34
+ if (input.next >= 97 && input.next <= 122) {
35
+ while (isLangHintChar(input.next)) {
36
+ input.advance();
37
+ }
38
+ }
39
+ }
40
+ if (input.next !== 10 && input.next !== 13) return;
41
+ if (input.next === 13) input.advance();
42
+ if (input.next === 10) input.advance();
43
+ while (input.next !== -1) {
44
+ while (input.next === 32 || input.next === 9) {
45
+ input.advance();
46
+ }
47
+ let matchPos = 0;
48
+ let isMatch = true;
49
+ const lineStart = input.pos;
50
+ for (let i = 0; i < delimiter.length && input.next !== -1; i++) {
51
+ if (input.next !== delimiter.charCodeAt(i)) {
52
+ isMatch = false;
53
+ break;
54
+ }
55
+ input.advance();
56
+ matchPos++;
57
+ }
58
+ if (isMatch && matchPos === delimiter.length) {
59
+ if (input.next === 10 || input.next === 13 || input.next === -1) {
60
+ input.acceptToken(heredoc);
61
+ return;
62
+ }
63
+ }
64
+ while (input.next !== 10 && input.next !== 13 && input.next !== -1) {
65
+ input.advance();
66
+ }
67
+ if (input.next === 13) input.advance();
68
+ if (input.next === 10) input.advance();
69
+ }
70
+ input.acceptToken(heredoc);
71
+ },
72
+ { contextual: false }
73
+ );
74
+
75
+ // src/highlight.ts
76
+ import { styleTags, tags as t } from "@lezer/highlight";
77
+ var styxHighlight = styleTags({
78
+ // Keys (property names) - first atom in an Entry
79
+ "KeyAtom/BareScalar": t.propertyName,
80
+ "KeyAtom/QuotedScalar": t.propertyName,
81
+ "KeyPayload/BareScalar": t.propertyName,
82
+ "KeyPayload/QuotedScalar": t.propertyName,
83
+ // Values - second atom in an Entry
84
+ "ValueAtom/BareScalar": t.string,
85
+ "ValueAtom/QuotedScalar": t.string,
86
+ "ValuePayload/BareScalar": t.string,
87
+ "ValuePayload/QuotedScalar": t.string,
88
+ // Sequence items
89
+ "SeqAtom/BareScalar": t.string,
90
+ "SeqAtom/QuotedScalar": t.string,
91
+ "SeqPayload/BareScalar": t.string,
92
+ "SeqPayload/QuotedScalar": t.string,
93
+ // Tags (@foo)
94
+ Tag: t.tagName,
95
+ // Raw strings and heredocs
96
+ RawScalar: t.special(t.string),
97
+ Heredoc: t.special(t.string),
98
+ // Inline attributes (key>value pairs)
99
+ Attributes: t.special(t.variableName),
100
+ Unit: t.null,
101
+ Comment: t.lineComment,
102
+ DocComment: t.docComment,
103
+ "( )": t.paren,
104
+ "{ }": t.brace,
105
+ ",": t.separator
106
+ });
107
+
108
+ // src/syntax.grammar.ts
109
+ var parser = LRParser.deserialize({
110
+ version: 14,
111
+ states: "*tQVQPOOOOQO'#C^'#C^OzQPO'#C_OOQQ'#Cb'#CbOOQQ'#Cd'#CdOOQQ'#Ce'#CeO#aQPO'#CnOOQQ'#Cs'#CsOOQQ'#Ct'#CtOOQQ'#Cv'#CvO$VQPO'#ChOOQQ'#Cw'#CwO$rQRO'#CaOOQQ'#Ca'#CaO%lQRO'#C`O&SQPO'#DWOOQO'#DW'#DWOOQO'#C|'#C|QVQPOOOOQO'#C}'#C}OOQO-E6{-E6{OOQO'#Cp'#CpOOQO'#DP'#DPO!lQPO'#CoOOQO'#Cr'#CrO&XQPO'#CrO&cQPO'#CoOOQQ,59Y,59YO&nQPO,59YOOQO'#DO'#DOO#hQPO'#CiOOQO'#Cu'#CuO&sQPO'#CjOOQO'#Cj'#CjO'gQPO'#CiOOQQ,59S,59SO'nQPO,59SOOQQ'#Cc'#CcOOQQ,58{,58{OOQO'#C{'#C{OOQO'#Cz'#CzO'sQPO'#CxOOQO'#Cx'#CxOOQO,58z,58zOOQO,59r,59rOOQO-E6z-E6zOOQO-E6}-E6}O(QQPO,59ZOOQO,59^,59^O(]QPO'#DPO(QQPO,59ZO(QQPO,59ZOOQQ1G.t1G.tOOQO-E6|-E6|O(sQPO,59TOOQO'#Ck'#CkOOQO,59U,59UO(sQPO,59TOOQO'#DR'#DRO(sQPO,59TOOQQ1G.n1G.nOOQO'#Cy'#CyOOQO,59d,59dO(zQPO1G.uO(zQPO1G.uOOQO,59l,59lOOQO-E7O-E7OO)VQPO1G.oO)VQPO1G.oOOQO,59m,59mOOQO-E7P-E7PO)^QPO7+$aP)iQPO'#DQO)vQPO7+$ZP#hQPO'#DO",
112
+ stateData: "*W~OyOS~OZYOaUO{PO|`O}cO!ORO!PSO!QTO!RVO!SWO!TXO~O}cO|RXZRXaRX!ORX!PRX!QRX!RRX!SRX!TRX~OZYOaUOeeO|eO}cO!ORO!PSO!QTO!RVO!SWO!TXO~O`kO~P!lOZYOaUO|mO!ORO!PSO!QTO!RVO!SWO!TXO~OYsO~P#hOZYOaUO!PSO!QTO!RVO!SWO~OZTXaTXwTX|TX!OTX!PTX!QTX!RTX!STX!TTX`TXeTX~P$^OwwO!ORO!TXO|SX`SXeSX~P$^O||O~O!ORO!TXO~P$^OeeO|eO`cX~O`!UO~OY^XZ^Xa^X|^X!O^X!P^X!Q^X!R^X!S^X!T^X~P$^OY]X~P#hOY!^O~O|lX`lXelX~P$^OeeO|eO`ca~O}cO!ORO!TXO`sXesX|sX~P$^OY]a~P#hOeeO|eO`ci~OY]i~P#hOeeO|eO`cq~O}cO!ORO!TXO~P$^OY]q~P#hO!O!R}{!Q!S!T{~",
113
+ goto: "(Q{PP|!Q![!i!r#[#_#_PP#_$Q$T$gPP#_$j$mP$z#_#_%U%b%z&T&W&Z%w&^&d&o'T'h'rPPPP'|T_ObS_ObXiUg!R!jS_ObWhUg!R!jR!Qi_^OUbgi!R!j^[OUbgi!R!jdpYnr!W!Z!]!e!f!k!lRy^Rv[^ZOUbgi!R!jdoYnr!W!Z!]!e!f!k!lQu[Qx^Q!XpR!_yRtYQrYQ!WnW![r!W!]!fX!g!Z!e!k!lR!YpRlUYfUg!S!a!iX!Rj!P!T!bQjUQ!PgT!c!R!jeqYnr!W!Z!]!e!f!k!l^ZOUbgi!R!jdoYnr!W!Z!]!e!f!k!lRx^_]OUbgi!R!jR{^R!`yRz^QbOR}b[QOUbg!R!jRdQQnYY!Vn!Z!e!k!lQ!ZrS!e!W!]R!k!fQgUW!Og!S!a!iQ!SjS!a!P!TR!i!bQ!TjQ!b!PT!d!T!bQ!]rQ!f!WT!h!]!fTaOb",
114
+ nodeNames: "\u26A0 Document Comment DocComment Entry KeyExpr Tag KeyPayload QuotedScalar RawScalar ) ( Sequence SeqContent SeqItem SeqPayload } { Object ObjContent ObjSep , ObjItem Unit Attributes SeqAtom BareScalar KeyAtom ValueExpr ValuePayload ValueAtom Heredoc",
115
+ maxTerm: 51,
116
+ nodeProps: [
117
+ ["openedBy", 10, "(", 16, "{"],
118
+ ["closedBy", 11, ")", 17, "}"]
119
+ ],
120
+ propSources: [styxHighlight],
121
+ skippedNodes: [0],
122
+ repeatNodeCount: 6,
123
+ tokenData: "B[~RmOX!|XY(UYZ(aZ]!|]^(f^p!|pq(Uqr!|rs(lsx!|xy)syz)xz|!||})}}!P!|!P!Q*S!Q!^!|!^!_$Q!a!b!|!b!c?b!c#f!|#f#g@U#g#o!|#o#pBQ#p#q!|#q#rBV#r;'S!|;'S;=`(O<%lO!|~#R_!T~OX!|Z]!|^p!|qr!|sx!|z|!|}!^!|!^!_$Q!_!`!|!`!a$|!a#o!|#p#q!|#r;'S!|;'S;=`(O<%lO!|~$T]OX$QZ]$Q^p$Qqr$Qsx$Qz|$Q}!`$Q!`!a$|!a#o$Q#p#q$Q#r;'S$Q;'S;=`'r<%lO$Q~%PZOX%rZ]%r^p%rqr%rsx%rz|%r}#o%r#p#q%r#r;'S%r;'S;=`'x<%lO%r~%w]!S~OX%rXY&pZ]%r^p%rpq&pqr%rsx%rz|%r}#o%r#p#q%r#r;'S%r;'S;=`'x<%lO%r~&s_OX$QXY&pZ]$Q^p$Qpq&pqr$Qsx$Qz|$Q}!_$Q!a!b$Q!c#o$Q#p#q$Q#r;'S$Q;'S;=`'r<%lO$Q~'uP;=`<%l$Q~'{P;=`<%l%r~(RP;=`<%l!|~(ZQy~XY(Upq(U~(fO|~~(iPYZ(a~(oXOY(lZ](l^r(lrs)[s#O(l#O#P)a#P;'S(l;'S;=`)m<%lO(l~)aO!P~~)dRO;'S(l;'S;=`)m<%lO(l~)pP;=`<%l(l~)xOZ~~)}OY~~*SOe~~*Xa!T~OX!|Z]!|^p!|qr!|sx!|z|!|}!P!|!P!Q+^!Q!^!|!^!_$Q!_!`!|!`!a$|!a#o!|#p#q!|#r;'S!|;'S;=`(O<%lO!|~+ch!T~OX,}XY.pZ],}^p,}pq.pqr,}rs.psx,}xz.pz|,}|}.p}!P,}!P!Q5u!Q!^,}!^!_/_!_!`,}!`!a0x!a#o,}#o#p.p#p#q,}#q#r.p#r;'S,};'S;=`5o<%lO,}~-Uh{~!T~OX,}XY.pZ],}^p,}pq.pqr,}rs.psx,}xz.pz|,}|}.p}!^,}!^!_/_!_!`,}!`!a0x!a#Q,}#Q#R!|#R#o,}#o#p.p#p#q,}#q#r.p#r;'S,};'S;=`5o<%lO,}~.uU{~OY.pZ].p^#Q.p#R;'S.p;'S;=`/X<%lO.p~/[P;=`<%l.p~/df{~OX/_XY.pZ]/_^p/_pq.pqr/_rs.psx/_xz.pz|/_|}.p}!`/_!`!a0x!a#Q/_#Q#R$Q#R#o/_#o#p.p#p#q/_#q#r.p#r;'S/_;'S;=`5c<%lO/_~0}d{~OX2]XY.pZ]2]^p2]pq.pqr2]rs.psx2]xz.pz|2]|}.p}#Q2]#Q#R%r#R#o2]#o#p.p#p#q2]#q#r.p#r;'S2];'S;=`5i<%lO2]~2dd{~!S~OX2]XY3rZ]2]^p2]pq3rqr2]rs.psx2]xz.pz|2]|}.p}#Q2]#Q#R%r#R#o2]#o#p.p#p#q2]#q#r.p#r;'S2];'S;=`5i<%lO2]~3wh{~OX/_XY3rZ]/_^p/_pq3rqr/_rs.psx/_xz.pz|/_|}.p}!_/_!_!a.p!a!b/_!b!c.p!c#Q/_#Q#R$Q#R#o/_#o#p.p#p#q/_#q#r.p#r;'S/_;'S;=`5c<%lO/_~5fP;=`<%l/_~5lP;=`<%l2]~5rP;=`<%l,}~5zj!T~OX5uXY7lYZ8XZ]5u]^8^^p5upq7lqr5urs7lsx5uxz7lz|5u|}7l}!^5u!^!_8j!_!`5u!`!a:X!a#Q5u#Q#R!|#R#o5u#o#p7l#p#q5u#q#r7l#r;'S5u;'S;=`?[<%lO5u~7oWOY7lYZ8XZ]7l]^8^^#Q7l#R;'S7l;'S;=`8d<%lO7l~8^O}~~8aPYZ8X~8gP;=`<%l7l~8mhOX8jXY7lYZ8XZ]8j]^8^^p8jpq7lqr8jrs7lsx8jxz7lz|8j|}7l}!`8j!`!a:X!a#Q8j#Q#R$Q#R#o8j#o#p7l#p#q8j#q#r7l#r;'S8j;'S;=`?O<%lO8j~:[fOX;pXY7lYZ8XZ];p]^8^^p;ppq7lqr;prs7lsx;pxz7lz|;p|}7l}#Q;p#Q#R%r#R#o;p#o#p7l#p#q;p#q#r7l#r;'S;p;'S;=`?U<%lO;p~;uf!S~OX;pXY=ZYZ8XZ];p]^8^^p;ppq=Zqr;prs7lsx;pxz7lz|;p|}7l}#Q;p#Q#R%r#R#o;p#o#p7l#p#q;p#q#r7l#r;'S;p;'S;=`?U<%lO;p~=^jOX8jXY=ZYZ8XZ]8j]^8^^p8jpq=Zqr8jrs7lsx8jxz7lz|8j|}7l}!_8j!_!a7l!a!b8j!b!c7l!c#Q8j#Q#R$Q#R#o8j#o#p7l#p#q8j#q#r7l#r;'S8j;'S;=`?O<%lO8j~?RP;=`<%l8j~?XP;=`<%l;p~?_P;=`<%l5u~?gR!R~!c!}?p#R#S?p#T#o?p~?uT!O~}!O?p!Q![?p!c!}?p#R#S?p#T#o?p~@Za!T~OX!|Z]!|^p!|qr!|rsA`st@Utx!|z|!|}!^!|!^!_$Q!_!`!|!`!a$|!a#o!|#p#q!|#r;'S!|;'S;=`(O<%lO!|~AcTOrA`rsArs;'SA`;'S;=`Az<%lOA`~AwP!Q~stAr~A}P;=`<%lA`~BVOa~~B[O`~",
124
+ tokenizers: [0, heredocTokenizer],
125
+ topRules: { "Document": [0, 1] },
126
+ tokenPrec: 413
127
+ });
128
+
129
+ // src/index.ts
130
+ import {
131
+ LRLanguage,
132
+ LanguageSupport,
133
+ foldNodeProp,
134
+ foldInside,
135
+ indentNodeProp,
136
+ continuedIndent,
137
+ syntaxTree,
138
+ foldService
139
+ } from "@codemirror/language";
140
+ import { completeFromList } from "@codemirror/autocomplete";
141
+ import { parseMixed } from "@lezer/common";
142
+ function parseHeredocText(text) {
143
+ const match = text.match(/^<<([A-Z][A-Z0-9_]*)(?:,([a-z][a-z0-9_.-]*))?\r?\n/);
144
+ if (!match) return null;
145
+ const delimiter = match[1];
146
+ const langHint = match[2] || null;
147
+ const headerLen = match[0].length;
148
+ const delimPattern = new RegExp(`^[ \\t]*${delimiter}$`, "m");
149
+ const contentMatch = text.slice(headerLen).match(delimPattern);
150
+ if (!contentMatch || contentMatch.index === void 0) {
151
+ return {
152
+ delimiter,
153
+ langHint,
154
+ contentStart: headerLen,
155
+ contentEnd: text.length
156
+ };
157
+ }
158
+ return {
159
+ delimiter,
160
+ langHint,
161
+ contentStart: headerLen,
162
+ contentEnd: headerLen + contentMatch.index
163
+ };
164
+ }
165
+ function createMixedParser(nestedLanguages) {
166
+ const langMap = /* @__PURE__ */ new Map();
167
+ for (const { tag, language } of nestedLanguages) {
168
+ langMap.set(tag, language.language.parser);
169
+ }
170
+ return parseMixed((node, input) => {
171
+ if (node.type.name !== "Heredoc") return null;
172
+ const text = input.read(node.from, node.to);
173
+ const parsed = parseHeredocText(text);
174
+ if (!parsed || !parsed.langHint) return null;
175
+ const nestedParser = langMap.get(parsed.langHint);
176
+ if (!nestedParser) return null;
177
+ return {
178
+ parser: nestedParser,
179
+ overlay: [{ from: node.from + parsed.contentStart, to: node.from + parsed.contentEnd }]
180
+ };
181
+ });
182
+ }
183
+ var styxFoldService = foldService.of((state, lineStart, lineEnd) => {
184
+ const tree = syntaxTree(state);
185
+ let node = tree.resolveInner(lineEnd, -1);
186
+ for (let cur = node; cur; cur = cur.parent) {
187
+ if (cur.type.name === "Object" || cur.type.name === "Sequence") {
188
+ const first = cur.firstChild;
189
+ const last = cur.lastChild;
190
+ if (first && last && first.to < last.from && first.from >= lineStart) {
191
+ return { from: first.to, to: last.from };
192
+ }
193
+ }
194
+ }
195
+ return null;
196
+ });
197
+ var baseProps = [
198
+ indentNodeProp.add({
199
+ Object: continuedIndent({ except: /^\s*\}/ }),
200
+ Sequence: continuedIndent({ except: /^\s*\)/ })
201
+ }),
202
+ foldNodeProp.add({
203
+ Object: foldInside,
204
+ Sequence: foldInside
205
+ })
206
+ ];
207
+ var styxLanguage = LRLanguage.define({
208
+ name: "styx",
209
+ parser: parser.configure({ props: baseProps }),
210
+ languageData: {
211
+ commentTokens: { line: "//" },
212
+ closeBrackets: { brackets: ["(", "{", '"'] }
213
+ }
214
+ });
215
+ function createStyxLanguage(nestedLanguages) {
216
+ if (nestedLanguages.length === 0) {
217
+ return styxLanguage;
218
+ }
219
+ const mixedParser = parser.configure({
220
+ props: baseProps,
221
+ wrap: createMixedParser(nestedLanguages)
222
+ });
223
+ return LRLanguage.define({
224
+ name: "styx",
225
+ parser: mixedParser,
226
+ languageData: {
227
+ commentTokens: { line: "//" },
228
+ closeBrackets: { brackets: ["(", "{", '"'] }
229
+ }
230
+ });
231
+ }
232
+ var builtinTags = [
233
+ "@string",
234
+ "@int",
235
+ "@float",
236
+ "@bool",
237
+ "@null",
238
+ "@object",
239
+ "@array",
240
+ "@optional",
241
+ "@required",
242
+ "@default",
243
+ "@enum",
244
+ "@pattern",
245
+ "@min",
246
+ "@max",
247
+ "@minLength",
248
+ "@maxLength"
249
+ ].map((label) => ({ label, type: "keyword" }));
250
+ var styxCompletion = styxLanguage.data.of({
251
+ autocomplete: completeFromList(builtinTags)
252
+ });
253
+ function styx(config = {}) {
254
+ const nestedLanguages = config.nestedLanguages || [];
255
+ const lang = createStyxLanguage(nestedLanguages);
256
+ const nestedSupports = nestedLanguages.flatMap((n) => n.language.support);
257
+ return new LanguageSupport(lang, [styxCompletion, styxFoldService, ...nestedSupports]);
258
+ }
259
+ export {
260
+ parser,
261
+ styx,
262
+ styxLanguage
263
+ };
package/package.json ADDED
@@ -0,0 +1,63 @@
1
+ {
2
+ "name": "@bearcove/codemirror-lang-styx",
3
+ "version": "0.1.0",
4
+ "description": "Styx language support for CodeMirror 6",
5
+ "type": "module",
6
+ "main": "dist/index.cjs",
7
+ "module": "dist/index.js",
8
+ "types": "dist/index.d.ts",
9
+ "exports": {
10
+ ".": {
11
+ "types": "./dist/index.d.ts",
12
+ "import": "./dist/index.js",
13
+ "require": "./dist/index.cjs"
14
+ }
15
+ },
16
+ "files": [
17
+ "dist",
18
+ "src"
19
+ ],
20
+ "keywords": [
21
+ "codemirror",
22
+ "lezer",
23
+ "styx",
24
+ "syntax-highlighting",
25
+ "editor"
26
+ ],
27
+ "author": "Amos Wenger <amos@bearcove.net>",
28
+ "license": "MIT",
29
+ "repository": {
30
+ "type": "git",
31
+ "url": "https://github.com/bearcove/styx.git",
32
+ "directory": "editors/codemirror-styx"
33
+ },
34
+ "bugs": {
35
+ "url": "https://github.com/bearcove/styx/issues"
36
+ },
37
+ "homepage": "https://styx.bearcove.net",
38
+ "devDependencies": {
39
+ "@lezer/generator": "^1.7.0",
40
+ "tsup": "^8.0.0",
41
+ "typescript": "^5.3.0",
42
+ "vitest": "^1.0.0"
43
+ },
44
+ "peerDependencies": {
45
+ "@codemirror/autocomplete": "^6.0.0",
46
+ "@codemirror/language": "^6.0.0",
47
+ "@codemirror/state": "^6.0.0",
48
+ "@codemirror/view": "^6.0.0",
49
+ "@lezer/common": "^1.0.0",
50
+ "@lezer/highlight": "^1.0.0",
51
+ "@lezer/lr": "^1.0.0"
52
+ },
53
+ "publishConfig": {
54
+ "access": "public"
55
+ },
56
+ "scripts": {
57
+ "build": "npm run build:grammar && npm run build:bundle",
58
+ "build:grammar": "lezer-generator src/styx.grammar -o src/syntax.grammar.ts",
59
+ "build:bundle": "tsup src/index.ts --format esm,cjs --dts --clean",
60
+ "dev": "npm run build:grammar && tsup src/index.ts --format esm,cjs --dts --watch",
61
+ "test": "vitest run"
62
+ }
63
+ }
package/src/heredoc.ts ADDED
@@ -0,0 +1,116 @@
1
+ import { ExternalTokenizer } from "@lezer/lr";
2
+ import { heredoc as Heredoc } from "./syntax.grammar.terms";
3
+
4
+ // Helper: check if char is valid delimiter start [A-Z]
5
+ function isDelimiterStart(ch: number): boolean {
6
+ return ch >= 65 && ch <= 90; // A-Z
7
+ }
8
+
9
+ // Helper: check if char is valid delimiter char [A-Z0-9_]
10
+ function isDelimiterChar(ch: number): boolean {
11
+ return (ch >= 65 && ch <= 90) || (ch >= 48 && ch <= 57) || ch === 95;
12
+ }
13
+
14
+ // Helper: check if char is valid lang hint char [a-z0-9_.-]
15
+ function isLangHintChar(ch: number): boolean {
16
+ return (ch >= 97 && ch <= 122) || (ch >= 48 && ch <= 57) ||
17
+ ch === 95 || ch === 46 || ch === 45;
18
+ }
19
+
20
+ /**
21
+ * External tokenizer that matches an entire heredoc as a single token.
22
+ *
23
+ * Format: <<DELIM[,lang]\n...content...\nDELIM
24
+ *
25
+ * The token includes:
26
+ * - The opening marker (<<DELIM or <<DELIM,lang)
27
+ * - The newline after the marker
28
+ * - All content lines
29
+ * - The closing delimiter
30
+ */
31
+ export const heredocTokenizer = new ExternalTokenizer(
32
+ (input, stack) => {
33
+ // Must start with <<
34
+ if (input.next !== 60 /* < */) return;
35
+ input.advance();
36
+ if (input.next !== 60 /* < */) return;
37
+ input.advance();
38
+
39
+ // Must have delimiter starting with [A-Z]
40
+ if (!isDelimiterStart(input.next)) return;
41
+
42
+ // Read delimiter name
43
+ let delimiter = "";
44
+ while (isDelimiterChar(input.next)) {
45
+ delimiter += String.fromCharCode(input.next);
46
+ input.advance();
47
+ }
48
+
49
+ // Optional lang hint after comma
50
+ if (input.next === 44 /* , */) {
51
+ input.advance();
52
+ // Consume lang hint (a-z start, then a-z0-9_.- continuation)
53
+ if (input.next >= 97 && input.next <= 122) {
54
+ while (isLangHintChar(input.next)) {
55
+ input.advance();
56
+ }
57
+ }
58
+ }
59
+
60
+ // Must be followed by newline
61
+ if (input.next !== 10 && input.next !== 13) return;
62
+
63
+ // Consume newline
64
+ if (input.next === 13) input.advance(); // \r
65
+ if (input.next === 10) input.advance(); // \n
66
+
67
+ // Now scan content lines until we find the delimiter at start of line
68
+ while (input.next !== -1) {
69
+ // At start of line - check for delimiter
70
+ // Skip optional leading whitespace (for indented heredocs)
71
+ while (input.next === 32 || input.next === 9) {
72
+ input.advance();
73
+ }
74
+
75
+ // Check if this line starts with the delimiter
76
+ let matchPos = 0;
77
+ let isMatch = true;
78
+
79
+ // We need to peek ahead without consuming if it's not a match
80
+ // Unfortunately ExternalTokenizer doesn't have peek, so we'll
81
+ // consume and track position
82
+ const lineStart = input.pos;
83
+
84
+ for (let i = 0; i < delimiter.length && input.next !== -1; i++) {
85
+ if (input.next !== delimiter.charCodeAt(i)) {
86
+ isMatch = false;
87
+ break;
88
+ }
89
+ input.advance();
90
+ matchPos++;
91
+ }
92
+
93
+ if (isMatch && matchPos === delimiter.length) {
94
+ // Check that delimiter is followed by newline or EOF
95
+ if (input.next === 10 || input.next === 13 || input.next === -1) {
96
+ // Found the end! Accept the token
97
+ input.acceptToken(Heredoc);
98
+ return;
99
+ }
100
+ }
101
+
102
+ // Not a match - consume rest of line
103
+ while (input.next !== 10 && input.next !== 13 && input.next !== -1) {
104
+ input.advance();
105
+ }
106
+
107
+ // Consume newline
108
+ if (input.next === 13) input.advance();
109
+ if (input.next === 10) input.advance();
110
+ }
111
+
112
+ // EOF without finding end delimiter - still accept as (unterminated) heredoc
113
+ input.acceptToken(Heredoc);
114
+ },
115
+ { contextual: false }
116
+ );
@@ -0,0 +1,37 @@
1
+ import { styleTags, tags as t } from "@lezer/highlight";
2
+
3
+ export const styxHighlight = styleTags({
4
+ // Keys (property names) - first atom in an Entry
5
+ "KeyAtom/BareScalar": t.propertyName,
6
+ "KeyAtom/QuotedScalar": t.propertyName,
7
+ "KeyPayload/BareScalar": t.propertyName,
8
+ "KeyPayload/QuotedScalar": t.propertyName,
9
+
10
+ // Values - second atom in an Entry
11
+ "ValueAtom/BareScalar": t.string,
12
+ "ValueAtom/QuotedScalar": t.string,
13
+ "ValuePayload/BareScalar": t.string,
14
+ "ValuePayload/QuotedScalar": t.string,
15
+
16
+ // Sequence items
17
+ "SeqAtom/BareScalar": t.string,
18
+ "SeqAtom/QuotedScalar": t.string,
19
+ "SeqPayload/BareScalar": t.string,
20
+ "SeqPayload/QuotedScalar": t.string,
21
+
22
+ // Tags (@foo)
23
+ Tag: t.tagName,
24
+
25
+ // Raw strings and heredocs
26
+ RawScalar: t.special(t.string),
27
+ Heredoc: t.special(t.string),
28
+
29
+ // Inline attributes (key>value pairs)
30
+ Attributes: t.special(t.variableName),
31
+ Unit: t.null,
32
+ Comment: t.lineComment,
33
+ DocComment: t.docComment,
34
+ "( )": t.paren,
35
+ "{ }": t.brace,
36
+ ",": t.separator,
37
+ });