@graffiticode/parser 0.1.5 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CLAUDE.md +72 -0
- package/package.json +5 -2
- package/src/index.js +1 -0
- package/src/parse.js +19 -6
- package/src/parser.js +4 -0
- package/src/unparse.js +330 -0
- package/src/unparse.spec.js +291 -0
package/CLAUDE.md
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
# CLAUDE.md
|
|
2
|
+
|
|
3
|
+
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
|
4
|
+
|
|
5
|
+
## Development Commands
|
|
6
|
+
|
|
7
|
+
### Testing
|
|
8
|
+
```bash
|
|
9
|
+
# Run all tests with experimental VM modules
|
|
10
|
+
npm test
|
|
11
|
+
|
|
12
|
+
# Run specific test files
|
|
13
|
+
NODE_OPTIONS=--experimental-vm-modules jest src/parser.spec.js
|
|
14
|
+
```
|
|
15
|
+
|
|
16
|
+
### Linting
|
|
17
|
+
```bash
|
|
18
|
+
# Lint code
|
|
19
|
+
npm run lint
|
|
20
|
+
|
|
21
|
+
# Lint and automatically fix issues
|
|
22
|
+
npm run lint:fix
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
## Architecture Overview
|
|
26
|
+
|
|
27
|
+
This is the Graffiticode parser package - a core component that parses Graffiticode language syntax into ASTs (Abstract Syntax Trees).
|
|
28
|
+
|
|
29
|
+
### Package Structure
|
|
30
|
+
|
|
31
|
+
The parser is a workspace package within the Graffiticode monorepo. It's an ES module package (`"type": "module"`) that exports parsing functionality used by the API and language compilers.
|
|
32
|
+
|
|
33
|
+
### Core Components
|
|
34
|
+
|
|
35
|
+
1. **Parser Entry Point** (`src/parser.js`):
|
|
36
|
+
- `buildParser()` - Factory function that creates a parser instance with dependencies
|
|
37
|
+
- Integrates with language lexicons loaded from the API
|
|
38
|
+
- Uses Node.js VM module for sandboxed execution
|
|
39
|
+
|
|
40
|
+
2. **Core Parser** (`src/parse.js`):
|
|
41
|
+
- Implements the main parsing logic with a state machine approach
|
|
42
|
+
- Handles tokenization and AST construction
|
|
43
|
+
- Includes error tracking and position coordinates
|
|
44
|
+
- Supports keywords, operators, and language-specific lexicons
|
|
45
|
+
|
|
46
|
+
3. **AST Module** (`src/ast.js`):
|
|
47
|
+
- Manages AST node creation and manipulation
|
|
48
|
+
- Node pooling for memory efficiency
|
|
49
|
+
- Error node generation
|
|
50
|
+
|
|
51
|
+
4. **Environment** (`src/env.js`):
|
|
52
|
+
- Manages parsing environment and scopes
|
|
53
|
+
- Handles lexicon lookups
|
|
54
|
+
|
|
55
|
+
5. **Folder** (`src/folder.js`):
|
|
56
|
+
- AST transformation and folding operations
|
|
57
|
+
|
|
58
|
+
## Testing Strategy
|
|
59
|
+
|
|
60
|
+
- Uses Jest with experimental VM modules support
|
|
61
|
+
- Test files follow `*.spec.js` pattern
|
|
62
|
+
- Main test file: `src/parser.spec.js` contains comprehensive parsing tests
|
|
63
|
+
|
|
64
|
+
## Monorepo Context
|
|
65
|
+
|
|
66
|
+
This parser package is part of the Graffiticode monorepo:
|
|
67
|
+
- Parent monorepo runs Firebase emulators for integration testing
|
|
68
|
+
- API package (`../api`) depends on this parser
|
|
69
|
+
- Auth packages (`../auth`, `../auth-client`) handle authentication
|
|
70
|
+
- Common package (`../common`) contains shared utilities
|
|
71
|
+
|
|
72
|
+
When working with the parser, be aware that it integrates tightly with the API's language loading mechanism (`../../api/src/lang/index.js`).
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@graffiticode/parser",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.3.0",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"publishConfig": {
|
|
6
6
|
"access": "public"
|
|
@@ -20,5 +20,8 @@
|
|
|
20
20
|
"keywords": [],
|
|
21
21
|
"author": "",
|
|
22
22
|
"license": "MIT",
|
|
23
|
-
"description": ""
|
|
23
|
+
"description": "",
|
|
24
|
+
"dependencies": {
|
|
25
|
+
"@graffiticode/basis": "^1.6.2"
|
|
26
|
+
}
|
|
24
27
|
}
|
package/src/index.js
CHANGED
package/src/parse.js
CHANGED
|
@@ -469,14 +469,27 @@ export const parse = (function () {
|
|
|
469
469
|
});
|
|
470
470
|
}
|
|
471
471
|
function binding(ctx, cc) {
|
|
472
|
+
// Save the current lexeme before bindingName consumes it
|
|
473
|
+
const savedLexeme = lexeme;
|
|
474
|
+
const savedCoord = getCoord(ctx);
|
|
472
475
|
return bindingName(ctx, function (ctx) {
|
|
473
|
-
|
|
474
|
-
|
|
476
|
+
// Check if we have a colon for full syntax, otherwise use shorthand
|
|
477
|
+
if (match(ctx, TK_COLON)) {
|
|
478
|
+
eat(ctx, TK_COLON);
|
|
479
|
+
const ret = function (ctx) {
|
|
480
|
+
countCounter(ctx);
|
|
481
|
+
return expr(ctx, cc);
|
|
482
|
+
};
|
|
483
|
+
ret.cls = "punc";
|
|
484
|
+
return ret;
|
|
485
|
+
} else {
|
|
486
|
+
// Shorthand syntax - create a name reference for the value
|
|
487
|
+
// The binding name was already pushed as a string by bindingName
|
|
488
|
+
// Now we need to push a name reference (identifier) as the value
|
|
489
|
+
Ast.name(ctx, savedLexeme, savedCoord);
|
|
475
490
|
countCounter(ctx);
|
|
476
|
-
return
|
|
477
|
-
}
|
|
478
|
-
ret.cls = "punc";
|
|
479
|
-
return ret;
|
|
491
|
+
return cc;
|
|
492
|
+
}
|
|
480
493
|
});
|
|
481
494
|
}
|
|
482
495
|
function lambda(ctx, cc) {
|
package/src/parser.js
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import vm from "vm";
|
|
2
2
|
import { getLangAsset } from "../../api/src/lang/index.js";
|
|
3
3
|
import { parse } from "./parse.js";
|
|
4
|
+
import { unparse } from "./unparse.js";
|
|
4
5
|
|
|
5
6
|
// commonjs export
|
|
6
7
|
const main = {
|
|
@@ -91,3 +92,6 @@ export const parser = buildParser({
|
|
|
91
92
|
main,
|
|
92
93
|
vm
|
|
93
94
|
});
|
|
95
|
+
|
|
96
|
+
// Add unparse as a property of parser
|
|
97
|
+
parser.unparse = unparse;
|
package/src/unparse.js
ADDED
|
@@ -0,0 +1,330 @@
|
|
|
1
|
+
// Pretty printer that converts an AST back to source code
|
|
2
|
+
import { lexicon as basisLexicon } from "@graffiticode/basis";
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Unparse an AST node to source code
|
|
6
|
+
* @param {object} node - The AST node to unparse
|
|
7
|
+
* @param {object} lexicon - The lexicon containing operator and keyword definitions
|
|
8
|
+
* @returns {string} The unparsed source code
|
|
9
|
+
*/
|
|
10
|
+
function unparseNode(node, lexicon) {
|
|
11
|
+
if (!node) {
|
|
12
|
+
return "";
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
// Handle primitive values
|
|
16
|
+
if (typeof node === "string" || typeof node === "number" || typeof node === "boolean") {
|
|
17
|
+
return String(node);
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
// Handle AST nodes
|
|
21
|
+
switch (node.tag) {
|
|
22
|
+
case "PROG":
|
|
23
|
+
// Program is a list of expressions ending with ".."
|
|
24
|
+
if (node.elts && node.elts.length > 0) {
|
|
25
|
+
const exprs = unparseNode(node.elts[0], lexicon);
|
|
26
|
+
return exprs + "..";
|
|
27
|
+
}
|
|
28
|
+
return "..";
|
|
29
|
+
|
|
30
|
+
case "EXPRS":
|
|
31
|
+
// Multiple expressions separated by periods
|
|
32
|
+
if (!node.elts || node.elts.length === 0) {
|
|
33
|
+
return "";
|
|
34
|
+
}
|
|
35
|
+
// Check if this looks like a function application that wasn't folded
|
|
36
|
+
// e.g., sub followed by arguments as separate expressions
|
|
37
|
+
if (node.elts.length >= 3) {
|
|
38
|
+
const first = node.elts[0];
|
|
39
|
+
// Check if first element is an identifier that could be a function
|
|
40
|
+
if (first && first.tag && first.elts && first.elts.length === 0) {
|
|
41
|
+
// This might be a function name followed by arguments
|
|
42
|
+
const funcName = first.tag;
|
|
43
|
+
// Check if this matches a lexicon function
|
|
44
|
+
if (lexicon && lexicon[funcName]) {
|
|
45
|
+
const arity = lexicon[funcName].arity || 0;
|
|
46
|
+
if (arity > 0 && node.elts.length === arity + 1) {
|
|
47
|
+
// Treat this as a function application
|
|
48
|
+
const args = node.elts.slice(1).map(elt => unparseNode(elt, lexicon)).join(" ");
|
|
49
|
+
return `${funcName} ${args}`;
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
return node.elts.map(elt => unparseNode(elt, lexicon)).join(".");
|
|
55
|
+
|
|
56
|
+
case "NUM":
|
|
57
|
+
return node.elts[0];
|
|
58
|
+
|
|
59
|
+
case "STR": {
|
|
60
|
+
// Escape quotes and backslashes in the string
|
|
61
|
+
const str = node.elts[0];
|
|
62
|
+
const escaped = str.replace(/\\/g, "\\\\").replace(/'/g, "\\'");
|
|
63
|
+
return `'${escaped}'`;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
case "BOOL":
|
|
67
|
+
return node.elts[0] ? "true" : "false";
|
|
68
|
+
|
|
69
|
+
case "NULL":
|
|
70
|
+
return "null";
|
|
71
|
+
|
|
72
|
+
case "IDENT":
|
|
73
|
+
return node.elts[0];
|
|
74
|
+
|
|
75
|
+
case "LIST": {
|
|
76
|
+
// Array literal [a, b, c]
|
|
77
|
+
if (!node.elts || node.elts.length === 0) {
|
|
78
|
+
return "[]";
|
|
79
|
+
}
|
|
80
|
+
const items = node.elts.map(elt => unparseNode(elt, lexicon));
|
|
81
|
+
return "[" + items.join(", ") + "]";
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
case "RECORD": {
|
|
85
|
+
// Object literal {a: 1, b: 2}
|
|
86
|
+
if (!node.elts || node.elts.length === 0) {
|
|
87
|
+
return "{}";
|
|
88
|
+
}
|
|
89
|
+
const bindings = node.elts.map(elt => unparseNode(elt, lexicon));
|
|
90
|
+
return "{" + bindings.join(", ") + "}";
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
case "BINDING": {
|
|
94
|
+
// Key-value pair in a record
|
|
95
|
+
if (node.elts && node.elts.length >= 2) {
|
|
96
|
+
// If the key is a string node, unparse it without quotes for object keys
|
|
97
|
+
let key;
|
|
98
|
+
if (node.elts[0] && node.elts[0].tag === "STR") {
|
|
99
|
+
key = node.elts[0].elts[0]; // Get the raw string without quotes
|
|
100
|
+
} else {
|
|
101
|
+
key = unparseNode(node.elts[0], lexicon);
|
|
102
|
+
}
|
|
103
|
+
const value = unparseNode(node.elts[1], lexicon);
|
|
104
|
+
return `${key}: ${value}`;
|
|
105
|
+
}
|
|
106
|
+
return "";
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
case "PAREN":
|
|
110
|
+
// Parenthesized expression
|
|
111
|
+
if (node.elts && node.elts.length > 0) {
|
|
112
|
+
return "(" + unparseNode(node.elts[0], lexicon) + ")";
|
|
113
|
+
}
|
|
114
|
+
return "()";
|
|
115
|
+
|
|
116
|
+
case "APPLY":
|
|
117
|
+
// Function application
|
|
118
|
+
if (node.elts && node.elts.length >= 2) {
|
|
119
|
+
const func = unparseNode(node.elts[0], lexicon);
|
|
120
|
+
const args = unparseNode(node.elts[1], lexicon);
|
|
121
|
+
return func + " " + args;
|
|
122
|
+
}
|
|
123
|
+
return "";
|
|
124
|
+
|
|
125
|
+
case "LAMBDA":
|
|
126
|
+
// Lambda function
|
|
127
|
+
if (node.elts && node.elts.length >= 3) {
|
|
128
|
+
const params = node.elts[1];
|
|
129
|
+
const body = node.elts[2];
|
|
130
|
+
|
|
131
|
+
// Extract parameter names
|
|
132
|
+
let paramStr = "";
|
|
133
|
+
if (params && params.elts) {
|
|
134
|
+
paramStr = params.elts.map(p => unparseNode(p, lexicon)).join(" ");
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
// Unparse body
|
|
138
|
+
const bodyStr = unparseNode(body, lexicon);
|
|
139
|
+
|
|
140
|
+
if (paramStr) {
|
|
141
|
+
return `\\${paramStr} . ${bodyStr}`;
|
|
142
|
+
} else {
|
|
143
|
+
return `\\. ${bodyStr}`;
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
return "";
|
|
147
|
+
|
|
148
|
+
case "LET":
|
|
149
|
+
// Let binding
|
|
150
|
+
if (node.elts && node.elts.length >= 2) {
|
|
151
|
+
const bindings = node.elts[0];
|
|
152
|
+
const body = node.elts[1];
|
|
153
|
+
|
|
154
|
+
let bindingStr = "";
|
|
155
|
+
if (bindings && bindings.elts) {
|
|
156
|
+
bindingStr = bindings.elts.map(b => {
|
|
157
|
+
if (b.elts && b.elts.length >= 2) {
|
|
158
|
+
const name = unparseNode(b.elts[0], lexicon);
|
|
159
|
+
const value = unparseNode(b.elts[1], lexicon);
|
|
160
|
+
return `${name} = ${value}`;
|
|
161
|
+
}
|
|
162
|
+
return "";
|
|
163
|
+
}).filter(s => s).join(", ");
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
const bodyStr = unparseNode(body, lexicon);
|
|
167
|
+
return `let ${bindingStr} in ${bodyStr}`;
|
|
168
|
+
}
|
|
169
|
+
return "";
|
|
170
|
+
|
|
171
|
+
case "IF":
|
|
172
|
+
// If-then-else
|
|
173
|
+
if (node.elts && node.elts.length >= 2) {
|
|
174
|
+
const cond = unparseNode(node.elts[0], lexicon);
|
|
175
|
+
const thenExpr = unparseNode(node.elts[1], lexicon);
|
|
176
|
+
|
|
177
|
+
if (node.elts.length >= 3) {
|
|
178
|
+
const elseExpr = unparseNode(node.elts[2], lexicon);
|
|
179
|
+
return `if ${cond} then ${thenExpr} else ${elseExpr}`;
|
|
180
|
+
} else {
|
|
181
|
+
return `if ${cond} then ${thenExpr}`;
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
return "";
|
|
185
|
+
|
|
186
|
+
case "CASE":
|
|
187
|
+
// Case expression
|
|
188
|
+
if (node.elts && node.elts.length > 0) {
|
|
189
|
+
const expr = unparseNode(node.elts[0], lexicon);
|
|
190
|
+
const cases = node.elts.slice(1).map(c => unparseNode(c, lexicon));
|
|
191
|
+
return `case ${expr} of ${cases.join(" | ")}`;
|
|
192
|
+
}
|
|
193
|
+
return "";
|
|
194
|
+
|
|
195
|
+
case "OF":
|
|
196
|
+
// Case branch
|
|
197
|
+
if (node.elts && node.elts.length >= 2) {
|
|
198
|
+
const pattern = unparseNode(node.elts[0], lexicon);
|
|
199
|
+
const expr = unparseNode(node.elts[1], lexicon);
|
|
200
|
+
return `${pattern} => ${expr}`;
|
|
201
|
+
}
|
|
202
|
+
return "";
|
|
203
|
+
|
|
204
|
+
// Unary operator - negative
|
|
205
|
+
case "NEG":
|
|
206
|
+
if (node.elts && node.elts.length >= 1) {
|
|
207
|
+
const expr = unparseNode(node.elts[0], lexicon);
|
|
208
|
+
return `-${expr}`;
|
|
209
|
+
}
|
|
210
|
+
return "";
|
|
211
|
+
|
|
212
|
+
case "ERROR":
|
|
213
|
+
// Error nodes - include as comments
|
|
214
|
+
if (node.elts && node.elts.length > 0) {
|
|
215
|
+
return `/* ERROR: ${node.elts[0]} */`;
|
|
216
|
+
}
|
|
217
|
+
return "/* ERROR */";
|
|
218
|
+
|
|
219
|
+
default: {
|
|
220
|
+
// Check if this is a lexicon-defined function
|
|
221
|
+
// First, find the source name for this tag in the lexicon
|
|
222
|
+
let sourceName = null;
|
|
223
|
+
if (lexicon) {
|
|
224
|
+
for (const [key, value] of Object.entries(lexicon)) {
|
|
225
|
+
if (value && value.name === node.tag) {
|
|
226
|
+
sourceName = key;
|
|
227
|
+
break;
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
if (sourceName) {
|
|
233
|
+
// This is a known lexicon function - unparse in prefix notation
|
|
234
|
+
if (node.elts && node.elts.length > 0) {
|
|
235
|
+
const args = node.elts.map(elt => unparseNode(elt, lexicon)).join(" ");
|
|
236
|
+
return `${sourceName} ${args}`;
|
|
237
|
+
}
|
|
238
|
+
return sourceName;
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
// Handle identifiers that aren't in the lexicon (like lowercase "sub")
|
|
242
|
+
if (node.elts && node.elts.length === 0) {
|
|
243
|
+
// This is likely an identifier
|
|
244
|
+
return node.tag;
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
// Fallback for unknown nodes
|
|
248
|
+
console.warn(`Unknown node tag: ${node.tag}`);
|
|
249
|
+
return `/* ${node.tag} */`;
|
|
250
|
+
}
|
|
251
|
+
}
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
/**
|
|
255
|
+
* Unparse an AST pool (as returned by the parser) to source code
|
|
256
|
+
* @param {object} ast - The AST pool with a root property
|
|
257
|
+
* @param {object} dialectLexicon - The dialect-specific lexicon (optional)
|
|
258
|
+
* @returns {string} The unparsed source code
|
|
259
|
+
*/
|
|
260
|
+
export function unparse(ast, dialectLexicon = {}) {
|
|
261
|
+
if (!ast || !ast.root) {
|
|
262
|
+
return "";
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
// Merge basis lexicon with dialect lexicon (dialect takes precedence)
|
|
266
|
+
const mergedLexicon = { ...basisLexicon, ...dialectLexicon };
|
|
267
|
+
|
|
268
|
+
// The AST is in pool format - reconstruct the tree from the root
|
|
269
|
+
const rootId = ast.root;
|
|
270
|
+
const rootNode = reconstructNode(ast, rootId);
|
|
271
|
+
|
|
272
|
+
return unparseNode(rootNode, mergedLexicon);
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
/**
|
|
276
|
+
* Reconstruct a node from the AST pool format
|
|
277
|
+
* @param {object} pool - The AST pool
|
|
278
|
+
* @param {string|number} nodeId - The node ID to reconstruct
|
|
279
|
+
* @returns {object} The reconstructed node
|
|
280
|
+
*/
|
|
281
|
+
function reconstructNode(pool, nodeId) {
|
|
282
|
+
if (!nodeId || nodeId === "0" || nodeId === 0) {
|
|
283
|
+
return null;
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
const node = pool[nodeId];
|
|
287
|
+
if (!node) {
|
|
288
|
+
return null;
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
// Create a new node with the same structure
|
|
292
|
+
const result = {
|
|
293
|
+
tag: node.tag,
|
|
294
|
+
elts: []
|
|
295
|
+
};
|
|
296
|
+
|
|
297
|
+
// Handle different node types
|
|
298
|
+
switch (node.tag) {
|
|
299
|
+
case "NUM":
|
|
300
|
+
case "STR":
|
|
301
|
+
case "IDENT":
|
|
302
|
+
case "BOOL":
|
|
303
|
+
// These nodes have primitive values in elts[0]
|
|
304
|
+
result.elts = [node.elts[0]];
|
|
305
|
+
break;
|
|
306
|
+
|
|
307
|
+
case "NULL":
|
|
308
|
+
// NULL nodes have no elements
|
|
309
|
+
result.elts = [];
|
|
310
|
+
break;
|
|
311
|
+
|
|
312
|
+
default:
|
|
313
|
+
// For all other nodes, recursively reconstruct child nodes
|
|
314
|
+
if (node.elts && Array.isArray(node.elts)) {
|
|
315
|
+
result.elts = node.elts.map(eltId => {
|
|
316
|
+
// Check if this is a node ID (number or string number)
|
|
317
|
+
if (typeof eltId === "number" || (typeof eltId === "string" && /^\d+$/.test(eltId))) {
|
|
318
|
+
// This is a reference to another node in the pool
|
|
319
|
+
return reconstructNode(pool, eltId);
|
|
320
|
+
} else {
|
|
321
|
+
// This is a primitive value
|
|
322
|
+
return eltId;
|
|
323
|
+
}
|
|
324
|
+
});
|
|
325
|
+
}
|
|
326
|
+
break;
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
return result;
|
|
330
|
+
}
|
|
@@ -0,0 +1,291 @@
|
|
|
1
|
+
import { parser } from "./parser.js";
|
|
2
|
+
import { unparse } from "./unparse.js";
|
|
3
|
+
|
|
4
|
+
describe("unparse", () => {
|
|
5
|
+
// Helper function to test round-trip parsing
|
|
6
|
+
async function testRoundTrip(source, lexicon = {}) {
|
|
7
|
+
const ast = await parser.parse(0, source);
|
|
8
|
+
const unparsed = unparse(ast, lexicon);
|
|
9
|
+
return unparsed;
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
describe("literals", () => {
|
|
13
|
+
it("should unparse string literals", async () => {
|
|
14
|
+
const source = "'hello, world'..";
|
|
15
|
+
const unparsed = await testRoundTrip(source);
|
|
16
|
+
expect(unparsed).toBe("'hello, world'..");
|
|
17
|
+
});
|
|
18
|
+
|
|
19
|
+
it("should unparse string literals with escaped quotes", async () => {
|
|
20
|
+
const source = "'it\\'s working'..";
|
|
21
|
+
const unparsed = await testRoundTrip(source);
|
|
22
|
+
expect(unparsed).toBe("'it\\'s working'..");
|
|
23
|
+
});
|
|
24
|
+
|
|
25
|
+
it("should unparse numeric literals", async () => {
|
|
26
|
+
const source = "42..";
|
|
27
|
+
const unparsed = await testRoundTrip(source);
|
|
28
|
+
expect(unparsed).toBe("42..");
|
|
29
|
+
});
|
|
30
|
+
|
|
31
|
+
it("should unparse negative numbers", async () => {
|
|
32
|
+
const source = "-42..";
|
|
33
|
+
const unparsed = await testRoundTrip(source);
|
|
34
|
+
expect(unparsed).toBe("-42..");
|
|
35
|
+
});
|
|
36
|
+
|
|
37
|
+
it("should unparse decimal numbers", async () => {
|
|
38
|
+
const source = "3.14159..";
|
|
39
|
+
const unparsed = await testRoundTrip(source);
|
|
40
|
+
expect(unparsed).toBe("3.14159..");
|
|
41
|
+
});
|
|
42
|
+
|
|
43
|
+
it("should unparse boolean true", async () => {
|
|
44
|
+
const source = "true..";
|
|
45
|
+
const unparsed = await testRoundTrip(source);
|
|
46
|
+
expect(unparsed).toBe("true..");
|
|
47
|
+
});
|
|
48
|
+
|
|
49
|
+
it("should unparse boolean false", async () => {
|
|
50
|
+
const source = "false..";
|
|
51
|
+
const unparsed = await testRoundTrip(source);
|
|
52
|
+
expect(unparsed).toBe("false..");
|
|
53
|
+
});
|
|
54
|
+
|
|
55
|
+
it("should unparse null", async () => {
|
|
56
|
+
const source = "null..";
|
|
57
|
+
const unparsed = await testRoundTrip(source);
|
|
58
|
+
expect(unparsed).toBe("null..");
|
|
59
|
+
});
|
|
60
|
+
});
|
|
61
|
+
|
|
62
|
+
describe("data structures", () => {
|
|
63
|
+
it("should unparse empty list", async () => {
|
|
64
|
+
const source = "[]..";
|
|
65
|
+
const unparsed = await testRoundTrip(source);
|
|
66
|
+
expect(unparsed).toBe("[]..");
|
|
67
|
+
});
|
|
68
|
+
|
|
69
|
+
it("should unparse list with single element", async () => {
|
|
70
|
+
const source = "[42]..";
|
|
71
|
+
const unparsed = await testRoundTrip(source);
|
|
72
|
+
expect(unparsed).toBe("[42]..");
|
|
73
|
+
});
|
|
74
|
+
|
|
75
|
+
it("should unparse list with multiple elements", async () => {
|
|
76
|
+
const source = "[1, 2, 3]..";
|
|
77
|
+
const unparsed = await testRoundTrip(source);
|
|
78
|
+
expect(unparsed).toBe("[1, 2, 3]..");
|
|
79
|
+
});
|
|
80
|
+
|
|
81
|
+
it("should unparse nested lists", async () => {
|
|
82
|
+
const source = "[[1, 2], [3, 4]]..";
|
|
83
|
+
const unparsed = await testRoundTrip(source);
|
|
84
|
+
expect(unparsed).toBe("[[1, 2], [3, 4]]..");
|
|
85
|
+
});
|
|
86
|
+
|
|
87
|
+
it("should unparse empty record", async () => {
|
|
88
|
+
const source = "{}..";
|
|
89
|
+
const unparsed = await testRoundTrip(source);
|
|
90
|
+
expect(unparsed).toBe("{}..");
|
|
91
|
+
});
|
|
92
|
+
|
|
93
|
+
it("should unparse record with single field", async () => {
|
|
94
|
+
const source = "{x: 10}..";
|
|
95
|
+
const unparsed = await testRoundTrip(source);
|
|
96
|
+
expect(unparsed).toBe("{x: 10}..");
|
|
97
|
+
});
|
|
98
|
+
|
|
99
|
+
it("should unparse record with multiple fields", async () => {
|
|
100
|
+
const source = "{x: 10, y: 20}..";
|
|
101
|
+
const unparsed = await testRoundTrip(source);
|
|
102
|
+
expect(unparsed).toBe("{x: 10, y: 20}..");
|
|
103
|
+
});
|
|
104
|
+
|
|
105
|
+
it("should unparse nested records", async () => {
|
|
106
|
+
const source = "{a: {b: 1}, c: 2}..";
|
|
107
|
+
const unparsed = await testRoundTrip(source);
|
|
108
|
+
expect(unparsed).toBe("{a: {b: 1}, c: 2}..");
|
|
109
|
+
});
|
|
110
|
+
});
|
|
111
|
+
|
|
112
|
+
describe("expressions", () => {
|
|
113
|
+
it("should unparse parenthesized expression", async () => {
|
|
114
|
+
const source = "(42)..";
|
|
115
|
+
const unparsed = await testRoundTrip(source);
|
|
116
|
+
expect(unparsed).toBe("(42)..");
|
|
117
|
+
});
|
|
118
|
+
|
|
119
|
+
it("should unparse addition", async () => {
|
|
120
|
+
const source = "1 + 2..";
|
|
121
|
+
const unparsed = await testRoundTrip(source);
|
|
122
|
+
expect(unparsed).toBe("1 + 2..");
|
|
123
|
+
});
|
|
124
|
+
|
|
125
|
+
it("should unparse subtraction", async () => {
|
|
126
|
+
const source = "10 - 5..";
|
|
127
|
+
const unparsed = await testRoundTrip(source);
|
|
128
|
+
expect(unparsed).toBe("10 - 5..");
|
|
129
|
+
});
|
|
130
|
+
|
|
131
|
+
it("should unparse multiplication", async () => {
|
|
132
|
+
const source = "3 * 4..";
|
|
133
|
+
const unparsed = await testRoundTrip(source);
|
|
134
|
+
expect(unparsed).toBe("3 * 4..");
|
|
135
|
+
});
|
|
136
|
+
|
|
137
|
+
it("should unparse division", async () => {
|
|
138
|
+
const source = "10 / 2..";
|
|
139
|
+
const unparsed = await testRoundTrip(source);
|
|
140
|
+
expect(unparsed).toBe("10 / 2..");
|
|
141
|
+
});
|
|
142
|
+
|
|
143
|
+
it("should unparse modulo", async () => {
|
|
144
|
+
const source = "10 % 3..";
|
|
145
|
+
const unparsed = await testRoundTrip(source);
|
|
146
|
+
expect(unparsed).toBe("10 % 3..");
|
|
147
|
+
});
|
|
148
|
+
|
|
149
|
+
it("should unparse power", async () => {
|
|
150
|
+
const source = "2 ^ 3..";
|
|
151
|
+
const unparsed = await testRoundTrip(source);
|
|
152
|
+
expect(unparsed).toBe("2 ^ 3..");
|
|
153
|
+
});
|
|
154
|
+
|
|
155
|
+
it("should unparse string concatenation", async () => {
|
|
156
|
+
const source = "'hello' ++ ' world'..";
|
|
157
|
+
const unparsed = await testRoundTrip(source);
|
|
158
|
+
expect(unparsed).toBe("'hello' ++ ' world'..");
|
|
159
|
+
});
|
|
160
|
+
|
|
161
|
+
it("should unparse complex arithmetic expression", async () => {
|
|
162
|
+
const source = "(1 + 2) * 3..";
|
|
163
|
+
const unparsed = await testRoundTrip(source);
|
|
164
|
+
expect(unparsed).toBe("(1 + 2) * 3..");
|
|
165
|
+
});
|
|
166
|
+
});
|
|
167
|
+
|
|
168
|
+
describe("multiple expressions", () => {
|
|
169
|
+
it("should unparse multiple expressions separated by periods", async () => {
|
|
170
|
+
const source = "1.2.3..";
|
|
171
|
+
const unparsed = await testRoundTrip(source);
|
|
172
|
+
expect(unparsed).toBe("1.2.3..");
|
|
173
|
+
});
|
|
174
|
+
|
|
175
|
+
it("should unparse mixed expressions", async () => {
|
|
176
|
+
const source = "'hello'.[1, 2].{x: 10}..";
|
|
177
|
+
const unparsed = await testRoundTrip(source);
|
|
178
|
+
expect(unparsed).toBe("'hello'.[1, 2].{x: 10}..");
|
|
179
|
+
});
|
|
180
|
+
});
|
|
181
|
+
|
|
182
|
+
describe("identifiers and function calls", () => {
|
|
183
|
+
it("should unparse identifier", async () => {
|
|
184
|
+
const source = "foo..";
|
|
185
|
+
const unparsed = await testRoundTrip(source);
|
|
186
|
+
expect(unparsed).toBe("foo..");
|
|
187
|
+
});
|
|
188
|
+
|
|
189
|
+
it("should unparse function application", async () => {
|
|
190
|
+
const source = "foo 42..";
|
|
191
|
+
const unparsed = await testRoundTrip(source);
|
|
192
|
+
expect(unparsed).toBe("foo 42..");
|
|
193
|
+
});
|
|
194
|
+
|
|
195
|
+
it("should unparse function with multiple arguments", async () => {
|
|
196
|
+
const source = "foo [1, 2, 3]..";
|
|
197
|
+
const unparsed = await testRoundTrip(source);
|
|
198
|
+
expect(unparsed).toBe("foo [1, 2, 3]..");
|
|
199
|
+
});
|
|
200
|
+
|
|
201
|
+
it("should unparse nested function applications", async () => {
|
|
202
|
+
const source = "foo (bar 42)..";
|
|
203
|
+
const unparsed = await testRoundTrip(source);
|
|
204
|
+
expect(unparsed).toBe("foo (bar 42)..");
|
|
205
|
+
});
|
|
206
|
+
});
|
|
207
|
+
|
|
208
|
+
describe("control flow", () => {
|
|
209
|
+
it("should unparse if-then expression", async () => {
|
|
210
|
+
const source = "if true then 1..";
|
|
211
|
+
const unparsed = await testRoundTrip(source);
|
|
212
|
+
expect(unparsed).toBe("if true then 1..");
|
|
213
|
+
});
|
|
214
|
+
|
|
215
|
+
it("should unparse if-then-else expression", async () => {
|
|
216
|
+
const source = "if true then 1 else 2..";
|
|
217
|
+
const unparsed = await testRoundTrip(source);
|
|
218
|
+
expect(unparsed).toBe("if true then 1 else 2..");
|
|
219
|
+
});
|
|
220
|
+
|
|
221
|
+
it("should unparse nested if expressions", async () => {
|
|
222
|
+
const source = "if true then (if false then 1 else 2) else 3..";
|
|
223
|
+
const unparsed = await testRoundTrip(source);
|
|
224
|
+
expect(unparsed).toBe("if true then (if false then 1 else 2) else 3..");
|
|
225
|
+
});
|
|
226
|
+
});
|
|
227
|
+
|
|
228
|
+
describe("lambda expressions", () => {
|
|
229
|
+
it("should unparse lambda with no parameters", async () => {
|
|
230
|
+
const source = "\\. 42..";
|
|
231
|
+
const unparsed = await testRoundTrip(source);
|
|
232
|
+
expect(unparsed).toBe("\\. 42..");
|
|
233
|
+
});
|
|
234
|
+
|
|
235
|
+
it("should unparse lambda with one parameter", async () => {
|
|
236
|
+
const source = "\\x . x + 1..";
|
|
237
|
+
const unparsed = await testRoundTrip(source);
|
|
238
|
+
expect(unparsed).toBe("\\x . x + 1..");
|
|
239
|
+
});
|
|
240
|
+
|
|
241
|
+
it("should unparse lambda with multiple parameters", async () => {
|
|
242
|
+
const source = "\\x y . x + y..";
|
|
243
|
+
const unparsed = await testRoundTrip(source);
|
|
244
|
+
expect(unparsed).toBe("\\x y . x + y..");
|
|
245
|
+
});
|
|
246
|
+
|
|
247
|
+
it("should unparse lambda application", async () => {
|
|
248
|
+
const source = "(\\x . x + 1) 5..";
|
|
249
|
+
const unparsed = await testRoundTrip(source);
|
|
250
|
+
expect(unparsed).toBe("(\\x . x + 1) 5..");
|
|
251
|
+
});
|
|
252
|
+
});
|
|
253
|
+
|
|
254
|
+
describe("let bindings", () => {
|
|
255
|
+
it("should unparse let with single binding", async () => {
|
|
256
|
+
const source = "let x = 10 in x + 1..";
|
|
257
|
+
const unparsed = await testRoundTrip(source);
|
|
258
|
+
expect(unparsed).toBe("let x = 10 in x + 1..");
|
|
259
|
+
});
|
|
260
|
+
|
|
261
|
+
it("should unparse let with multiple bindings", async () => {
|
|
262
|
+
const source = "let x = 10, y = 20 in x + y..";
|
|
263
|
+
const unparsed = await testRoundTrip(source);
|
|
264
|
+
expect(unparsed).toBe("let x = 10, y = 20 in x + y..");
|
|
265
|
+
});
|
|
266
|
+
|
|
267
|
+
it("should unparse nested let bindings", async () => {
|
|
268
|
+
const source = "let x = 10 in (let y = 20 in x + y)..";
|
|
269
|
+
const unparsed = await testRoundTrip(source);
|
|
270
|
+
expect(unparsed).toBe("let x = 10 in (let y = 20 in x + y)..");
|
|
271
|
+
});
|
|
272
|
+
});
|
|
273
|
+
|
|
274
|
+
describe("edge cases", () => {
|
|
275
|
+
it("should handle empty program", async () => {
|
|
276
|
+
const source = "..";
|
|
277
|
+
const unparsed = await testRoundTrip(source);
|
|
278
|
+
expect(unparsed).toBe("..");
|
|
279
|
+
});
|
|
280
|
+
|
|
281
|
+
it("should handle null AST", () => {
|
|
282
|
+
const unparsed = unparse(null);
|
|
283
|
+
expect(unparsed).toBe("");
|
|
284
|
+
});
|
|
285
|
+
|
|
286
|
+
it("should handle AST without root", () => {
|
|
287
|
+
const unparsed = unparse({});
|
|
288
|
+
expect(unparsed).toBe("");
|
|
289
|
+
});
|
|
290
|
+
});
|
|
291
|
+
});
|