conjure-js 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. package/conjure +0 -0
  2. package/dist/assets/codicon-ngg6Pgfi.ttf +0 -0
  3. package/dist/assets/editor.worker-CdQrwHl8.js +26 -0
  4. package/dist/assets/main-A7ZMId9A.css +1 -0
  5. package/dist/assets/main-CmI-7epE.js +3137 -0
  6. package/dist/index.html +195 -0
  7. package/dist/vite.svg +1 -0
  8. package/package.json +68 -0
  9. package/src/bin/__fixtures__/smoke/app/lib.clj +4 -0
  10. package/src/bin/__fixtures__/smoke/app/main.clj +4 -0
  11. package/src/bin/__fixtures__/smoke/repl-smoke.ts +12 -0
  12. package/src/bin/bencode.ts +205 -0
  13. package/src/bin/cli.ts +250 -0
  14. package/src/bin/nrepl-utils.ts +59 -0
  15. package/src/bin/nrepl.ts +393 -0
  16. package/src/bin/version.ts +4 -0
  17. package/src/clojure/core.clj +620 -0
  18. package/src/clojure/core.clj.d.ts +189 -0
  19. package/src/clojure/demo/math.clj +16 -0
  20. package/src/clojure/demo/math.clj.d.ts +4 -0
  21. package/src/clojure/demo.clj +42 -0
  22. package/src/clojure/demo.clj.d.ts +0 -0
  23. package/src/clojure/generated/builtin-namespace-registry.ts +14 -0
  24. package/src/clojure/generated/clojure-core-source.ts +623 -0
  25. package/src/clojure/generated/clojure-string-source.ts +196 -0
  26. package/src/clojure/string.clj +192 -0
  27. package/src/clojure/string.clj.d.ts +25 -0
  28. package/src/core/assertions.ts +134 -0
  29. package/src/core/conversions.ts +108 -0
  30. package/src/core/core-env.ts +58 -0
  31. package/src/core/env.ts +78 -0
  32. package/src/core/errors.ts +39 -0
  33. package/src/core/evaluator/apply.ts +114 -0
  34. package/src/core/evaluator/arity.ts +174 -0
  35. package/src/core/evaluator/collections.ts +25 -0
  36. package/src/core/evaluator/destructure.ts +247 -0
  37. package/src/core/evaluator/dispatch.ts +73 -0
  38. package/src/core/evaluator/evaluate.ts +100 -0
  39. package/src/core/evaluator/expand.ts +79 -0
  40. package/src/core/evaluator/index.ts +72 -0
  41. package/src/core/evaluator/quasiquote.ts +87 -0
  42. package/src/core/evaluator/recur-check.ts +109 -0
  43. package/src/core/evaluator/special-forms.ts +517 -0
  44. package/src/core/factories.ts +155 -0
  45. package/src/core/gensym.ts +9 -0
  46. package/src/core/index.ts +76 -0
  47. package/src/core/positions.ts +38 -0
  48. package/src/core/printer.ts +86 -0
  49. package/src/core/reader.ts +559 -0
  50. package/src/core/scanners.ts +93 -0
  51. package/src/core/session.ts +610 -0
  52. package/src/core/stdlib/arithmetic.ts +361 -0
  53. package/src/core/stdlib/atoms.ts +88 -0
  54. package/src/core/stdlib/collections.ts +784 -0
  55. package/src/core/stdlib/errors.ts +81 -0
  56. package/src/core/stdlib/hof.ts +307 -0
  57. package/src/core/stdlib/meta.ts +48 -0
  58. package/src/core/stdlib/predicates.ts +240 -0
  59. package/src/core/stdlib/regex.ts +238 -0
  60. package/src/core/stdlib/strings.ts +311 -0
  61. package/src/core/stdlib/transducers.ts +256 -0
  62. package/src/core/stdlib/utils.ts +287 -0
  63. package/src/core/tokenizer.ts +437 -0
  64. package/src/core/transformations.ts +75 -0
  65. package/src/core/types.ts +258 -0
  66. package/src/main.ts +1 -0
  67. package/src/monaco-esm.d.ts +7 -0
  68. package/src/playground/clojure-tokens.ts +67 -0
  69. package/src/playground/editor.worker.ts +5 -0
  70. package/src/playground/find-form.ts +138 -0
  71. package/src/playground/playground.ts +342 -0
  72. package/src/playground/samples/00-welcome.clj +385 -0
  73. package/src/playground/samples/01-collections.clj +191 -0
  74. package/src/playground/samples/02-higher-order-functions.clj +215 -0
  75. package/src/playground/samples/03-destructuring.clj +194 -0
  76. package/src/playground/samples/04-strings-and-regex.clj +202 -0
  77. package/src/playground/samples/05-error-handling.clj +212 -0
  78. package/src/repl/repl.ts +116 -0
  79. package/tsconfig.build.json +10 -0
  80. package/tsconfig.json +31 -0
@@ -0,0 +1,437 @@
1
+ import { TokenizerError } from './errors'
2
+ import { makeCharScanner, type CharScanner } from './scanners'
3
+ import { tokenKeywords, tokenSymbols, type Token } from './types'
4
+
5
+ export type TokensResult = {
6
+ tokens: Token[]
7
+ error?: TokenizerError
8
+ scanner: CharScanner
9
+ }
10
+
11
+ const isNewline = (char: string) => char === '\n'
12
+ const isWhitespace = (char: string) =>
13
+ [' ', ',', '\n', '\r', '\t'].includes(char)
14
+ const isComment = (char: string) => char === ';'
15
+ const isLParen = (char: string) => char === '('
16
+ const isRParen = (char: string) => char === ')'
17
+ const isLBracket = (char: string) => char === '['
18
+ const isRBracket = (char: string) => char === ']'
19
+ const isLBrace = (char: string) => char === '{'
20
+ const isRBrace = (char: string) => char === '}'
21
+ const isDoubleQuote = (char: string) => char === '"'
22
+ const isSingleQuote = (char: string) => char === "'"
23
+ const isBacktick = (char: string) => char === '`'
24
+ const isTilde = (char: string) => char === '~'
25
+ const isAt = (char: string) => char === '@'
26
+ const isNumber = (char: string) => {
27
+ const parsed = parseInt(char)
28
+ if (isNaN(parsed)) {
29
+ return false
30
+ }
31
+ return parsed >= 0 && parsed <= 9
32
+ }
33
+ const isDot = (char: string) => char === '.'
34
+ const isKeywordStart = (char: string) => char === ':'
35
+ const isHash = (char: string) => char === '#'
36
+
37
+ const isDelimiter = (char: string) =>
38
+ isLParen(char) ||
39
+ isRParen(char) ||
40
+ isLBracket(char) ||
41
+ isRBracket(char) ||
42
+ isLBrace(char) ||
43
+ isRBrace(char) ||
44
+ isBacktick(char) ||
45
+ isSingleQuote(char) ||
46
+ isAt(char)
47
+
48
+ const parseWhitespace = (ctx: TokenizationContext): Token => {
49
+ const scanner = ctx.scanner
50
+ const start = scanner.position()
51
+ scanner.consumeWhile(isWhitespace)
52
+ return {
53
+ kind: tokenKeywords.Whitespace,
54
+ start,
55
+ end: scanner.position(),
56
+ }
57
+ }
58
+
59
+ const parseComment = (ctx: TokenizationContext): Token => {
60
+ const scanner = ctx.scanner
61
+ const start = scanner.position()
62
+ scanner.advance() // skip the `;`
63
+ const value = scanner.consumeWhile((char) => !isNewline(char))
64
+ if (!scanner.isAtEnd() && scanner.peek() === '\n') {
65
+ scanner.advance() // consume the trailing newline
66
+ }
67
+ return {
68
+ kind: tokenKeywords.Comment,
69
+ value,
70
+ start,
71
+ end: scanner.position(),
72
+ }
73
+ }
74
+
75
+ const parseString = (ctx: TokenizationContext): Token => {
76
+ const scanner = ctx.scanner
77
+ const start = scanner.position()
78
+ scanner.advance() // consume opening "
79
+ const buffer: string[] = []
80
+ let foundClosingQuote = false
81
+ while (!scanner.isAtEnd()) {
82
+ const ch = scanner.peek()!
83
+ if (ch === '\\') {
84
+ scanner.advance()! // consume the backslash
85
+ const nextChar = scanner.peek()!
86
+ switch (nextChar) {
87
+ case '"':
88
+ buffer.push('"')
89
+ break
90
+ case '\\':
91
+ buffer.push('\\')
92
+ break
93
+ case 'n':
94
+ buffer.push('\n')
95
+ break
96
+ case 'r':
97
+ buffer.push('\r')
98
+ break
99
+ case 't':
100
+ buffer.push('\t')
101
+ break
102
+ default:
103
+ buffer.push(nextChar)
104
+ }
105
+
106
+ if (!scanner.isAtEnd()) {
107
+ scanner.advance() // consume the escaped char
108
+ }
109
+ continue
110
+ }
111
+ if (ch === '"') {
112
+ scanner.advance() // consume closing "
113
+ foundClosingQuote = true
114
+ break
115
+ }
116
+ buffer.push(scanner.advance()!)
117
+ }
118
+ if (!foundClosingQuote) {
119
+ throw new TokenizerError(
120
+ `Unterminated string detected at ${start.offset}`,
121
+ scanner.position()
122
+ )
123
+ }
124
+ return {
125
+ kind: tokenKeywords.String,
126
+ value: buffer.join(''),
127
+ start,
128
+ end: scanner.position(),
129
+ }
130
+ }
131
+
132
+ const parseKeyword = (ctx: TokenizationContext): Token => {
133
+ const scanner = ctx.scanner
134
+ const start = scanner.position()
135
+ const value = scanner.consumeWhile(
136
+ (char) =>
137
+ isKeywordStart(char) ||
138
+ (!isWhitespace(char) && !isDelimiter(char) && !isComment(char))
139
+ )
140
+ return {
141
+ kind: tokenKeywords.Keyword,
142
+ value,
143
+ start,
144
+ end: scanner.position(),
145
+ }
146
+ }
147
+
148
+ function isNumberToken(char: string, ctx: TokenizationContext) {
149
+ const scanner = ctx.scanner
150
+ const next = scanner.peek(1)
151
+ return isNumber(char) || (char === '-' && next !== null && isNumber(next))
152
+ }
153
+ const parseNumber = (ctx: TokenizationContext): Token => {
154
+ const scanner = ctx.scanner
155
+ const start = scanner.position()
156
+ let value = ''
157
+ if (scanner.peek() === '-') {
158
+ value += scanner.advance()
159
+ }
160
+ value += scanner.consumeWhile(isNumber)
161
+ if (
162
+ !scanner.isAtEnd() &&
163
+ scanner.peek() === '.' &&
164
+ scanner.peek(1) !== null &&
165
+ isNumber(scanner.peek(1)!)
166
+ ) {
167
+ value += scanner.advance()! // consume '.'
168
+ value += scanner.consumeWhile(isNumber)
169
+ }
170
+ if (!scanner.isAtEnd() && (scanner.peek() === 'e' || scanner.peek() === 'E')) {
171
+ value += scanner.advance()! // consume 'e' or 'E'
172
+ if (!scanner.isAtEnd() && (scanner.peek() === '+' || scanner.peek() === '-')) {
173
+ value += scanner.advance()! // consume optional sign
174
+ }
175
+ const exponentDigits = scanner.consumeWhile(isNumber)
176
+ if (exponentDigits.length === 0) {
177
+ throw new TokenizerError(
178
+ `Invalid number format at line ${start.line} column ${start.col}: "${value}"`,
179
+ { start, end: scanner.position() }
180
+ )
181
+ }
182
+ value += exponentDigits
183
+ }
184
+ if (!scanner.isAtEnd() && isDot(scanner.peek()!)) {
185
+ throw new TokenizerError(
186
+ `Invalid number format at line ${start.line} column ${start.col}: "${value}${scanner.consumeWhile((ch) => !isWhitespace(ch) && !isDelimiter(ch))}"`,
187
+ { start, end: scanner.position() }
188
+ )
189
+ }
190
+ return {
191
+ kind: tokenKeywords.Number,
192
+ value: Number(value),
193
+ start,
194
+ end: scanner.position(),
195
+ }
196
+ }
197
+
198
+ const parseSymbol = (ctx: TokenizationContext): Token => {
199
+ const scanner = ctx.scanner
200
+ const start = scanner.position()
201
+ const value = scanner.consumeWhile(
202
+ (char) => !isWhitespace(char) && !isDelimiter(char) && !isComment(char)
203
+ )
204
+
205
+ return {
206
+ kind: tokenKeywords.Symbol,
207
+ value,
208
+ start,
209
+ end: scanner.position(),
210
+ }
211
+ }
212
+
213
+ const parseDerefToken = (ctx: TokenizationContext): Token => {
214
+ const scanner = ctx.scanner
215
+ const start = scanner.position()
216
+ scanner.advance() // consume '@'
217
+ return { kind: 'Deref', start, end: scanner.position() }
218
+ }
219
+
220
+ const parseRegexLiteral = (ctx: TokenizationContext, start: ReturnType<typeof ctx.scanner.position>): Token => {
221
+ const scanner = ctx.scanner
222
+ scanner.advance() // consume opening '"'
223
+ const buffer: string[] = []
224
+ let foundClosingQuote = false
225
+ while (!scanner.isAtEnd()) {
226
+ const ch = scanner.peek()!
227
+ if (ch === '\\') {
228
+ scanner.advance() // consume backslash
229
+ const next = scanner.peek()
230
+ if (next === null) {
231
+ throw new TokenizerError(
232
+ `Unterminated regex literal at ${start.offset}`,
233
+ scanner.position()
234
+ )
235
+ }
236
+ if (next === '"') {
237
+ // \" → " (only escape that terminates the literal meaning)
238
+ buffer.push('"')
239
+ } else {
240
+ // All other \X sequences (\\, \d, \s, \n, etc.) are passed through as-is
241
+ // for the regex engine to interpret
242
+ buffer.push('\\')
243
+ buffer.push(next)
244
+ }
245
+ scanner.advance() // consume the char after backslash
246
+ continue
247
+ }
248
+ if (ch === '"') {
249
+ scanner.advance() // consume closing '"'
250
+ foundClosingQuote = true
251
+ break
252
+ }
253
+ buffer.push(scanner.advance()!)
254
+ }
255
+ if (!foundClosingQuote) {
256
+ throw new TokenizerError(
257
+ `Unterminated regex literal at ${start.offset}`,
258
+ scanner.position()
259
+ )
260
+ }
261
+ return {
262
+ kind: tokenKeywords.Regex,
263
+ value: buffer.join(''),
264
+ start,
265
+ end: scanner.position(),
266
+ }
267
+ }
268
+
269
+ // Single routing point for all # dispatch characters.
270
+ // Add new dispatch forms here as they are supported.
271
+ function parseDispatch(ctx: TokenizationContext): Token {
272
+ const scanner = ctx.scanner
273
+ const start = scanner.position()
274
+ scanner.advance() // consume '#'
275
+ const next = scanner.peek()
276
+ if (next === '(') {
277
+ scanner.advance() // consume '('
278
+ return { kind: tokenKeywords.AnonFnStart, start, end: scanner.position() }
279
+ }
280
+ if (next === '"') {
281
+ return parseRegexLiteral(ctx, start)
282
+ }
283
+ if (next === '{') {
284
+ // TODO: set literals — #{1 2 3}
285
+ throw new TokenizerError('Set literals are not yet supported', start)
286
+ }
287
+ throw new TokenizerError(
288
+ `Unknown dispatch character: #${next ?? 'EOF'}`,
289
+ start
290
+ )
291
+ }
292
+
293
+ function parseCharToken<K extends Token['kind']>(kind: K, value: string) {
294
+ return (ctx: TokenizationContext) => {
295
+ const scanner = ctx.scanner
296
+ const start = scanner.position()
297
+ scanner.advance()
298
+
299
+ return {
300
+ kind,
301
+ value,
302
+ start,
303
+ end: scanner.position(),
304
+ } as Token & { kind: K }
305
+ }
306
+ }
307
+
308
+ function parseTilde(ctx: TokenizationContext): Token {
309
+ const scanner = ctx.scanner
310
+ const start = scanner.position()
311
+ // consume the tilde
312
+ scanner.advance()
313
+ // check if the next character is an @
314
+ const nextChar = scanner.peek()
315
+ if (!nextChar) {
316
+ throw new TokenizerError(
317
+ `Unexpected end of input while parsing unquote at ${start.offset}`,
318
+ start
319
+ )
320
+ }
321
+ if (isAt(nextChar)) {
322
+ // consume the @
323
+ scanner.advance()
324
+ return {
325
+ kind: tokenKeywords.UnquoteSplicing,
326
+ value: tokenSymbols.UnquoteSplicing,
327
+ start,
328
+ end: scanner.position(),
329
+ }
330
+ }
331
+
332
+ return {
333
+ kind: tokenKeywords.Unquote,
334
+ value: tokenSymbols.Unquote,
335
+ start,
336
+ end: scanner.position(),
337
+ }
338
+ }
339
+
340
+ type TokenParseCheck = (char: string, ctx: TokenizationContext) => boolean
341
+ type TokenParseFn = (ctx: TokenizationContext) => Token
342
+ type TokenParseEntry = [TokenParseCheck, TokenParseFn]
343
+
344
+ const tokenParseEntries: TokenParseEntry[] = [
345
+ [isWhitespace, parseWhitespace],
346
+ [isComment, parseComment],
347
+ [isLParen, parseCharToken(tokenKeywords.LParen, tokenSymbols.LParen)],
348
+ [isRParen, parseCharToken(tokenKeywords.RParen, tokenSymbols.RParen)],
349
+ [isLBracket, parseCharToken(tokenKeywords.LBracket, tokenSymbols.LBracket)],
350
+ [isRBracket, parseCharToken(tokenKeywords.RBracket, tokenSymbols.RBracket)],
351
+ [isLBrace, parseCharToken(tokenKeywords.LBrace, tokenSymbols.LBrace)],
352
+ [isRBrace, parseCharToken(tokenKeywords.RBrace, tokenSymbols.RBrace)],
353
+ [isDoubleQuote, parseString],
354
+ [isKeywordStart, parseKeyword],
355
+ [isNumberToken, parseNumber],
356
+ [isSingleQuote, parseCharToken(tokenKeywords.Quote, tokenSymbols.Quote)],
357
+ [
358
+ isBacktick,
359
+ parseCharToken(tokenKeywords.Quasiquote, tokenSymbols.Quasiquote),
360
+ ],
361
+ [isTilde, parseTilde],
362
+ [isAt, parseDerefToken],
363
+ [isHash, parseDispatch],
364
+ ]
365
+
366
+ function parseNextToken(ctx: TokenizationContext): Token {
367
+ const scanner = ctx.scanner
368
+ const char = scanner.peek()!
369
+ const entry = tokenParseEntries.find(([check]) => check(char, ctx))
370
+ if (entry) {
371
+ const [, parse] = entry
372
+ return parse(ctx)
373
+ }
374
+ // catch-all symbol parsing
375
+ return parseSymbol(ctx)
376
+ }
377
+
378
+ export function parseAllTokens(ctx: TokenizationContext): TokensResult {
379
+ const tokens: Token[] = []
380
+ let error: TokenizerError | undefined = undefined
381
+
382
+ try {
383
+ while (!ctx.scanner.isAtEnd()) {
384
+ const result = parseNextToken(ctx)
385
+
386
+ if (!result) {
387
+ break
388
+ }
389
+
390
+ // Ignore whitespace tokens
391
+ if (result.kind === tokenKeywords.Whitespace) {
392
+ continue
393
+ }
394
+
395
+ tokens.push(result)
396
+ }
397
+ } catch (e) {
398
+ error = e as TokenizerError
399
+ }
400
+
401
+ const parsed: TokensResult = {
402
+ tokens,
403
+ scanner: ctx.scanner,
404
+ error,
405
+ }
406
+ return parsed
407
+ }
408
+
409
+ export function getTokenValue(token: Token): string | number {
410
+ if ('value' in token) {
411
+ return token.value
412
+ }
413
+ return ''
414
+ }
415
+
416
+ type TokenizationContext = {
417
+ scanner: CharScanner
418
+ }
419
+
420
+ export function tokenize(input: string): Token[] {
421
+ const inputLength = input.length
422
+ const scanner = makeCharScanner(input)
423
+ const tokenizationContext = {
424
+ scanner,
425
+ }
426
+ const tokensResult = parseAllTokens(tokenizationContext)
427
+ if (tokensResult.error) {
428
+ throw tokensResult.error
429
+ }
430
+ if (tokensResult.scanner.position().offset !== inputLength) {
431
+ throw new TokenizerError(
432
+ `Unexpected end of input, expected ${inputLength} characters, got ${tokensResult.scanner.position().offset}`,
433
+ tokensResult.scanner.position()
434
+ )
435
+ }
436
+ return tokensResult.tokens
437
+ }
@@ -0,0 +1,75 @@
1
+ import { isList, isMap, isVector } from './assertions'
2
+ import { EvaluationError } from './errors'
3
+ import { cljString, cljVector } from './factories'
4
+ import { printString } from './printer'
5
+ import { type CljValue, valueKeywords } from './types'
6
+
7
+ export function valueToString(value: CljValue): string {
8
+ switch (value.kind) {
9
+ case valueKeywords.string:
10
+ return value.value
11
+ case valueKeywords.number:
12
+ return value.value.toString()
13
+ case valueKeywords.boolean:
14
+ return value.value ? 'true' : 'false'
15
+ case valueKeywords.keyword:
16
+ return value.name
17
+ case valueKeywords.symbol:
18
+ return value.name
19
+ case valueKeywords.list:
20
+ return `(${value.value.map(valueToString).join(' ')})`
21
+ case valueKeywords.vector:
22
+ return `[${value.value.map(valueToString).join(' ')}]`
23
+ case valueKeywords.map:
24
+ return `{${value.entries.map(([key, value]) => `${valueToString(key)} ${valueToString(value)}`).join(' ')}}`
25
+ case valueKeywords.function: {
26
+ if (value.arities.length === 1) {
27
+ const a = value.arities[0]
28
+ const params = a.restParam
29
+ ? [...a.params, { kind: 'symbol' as const, name: '&' }, a.restParam]
30
+ : a.params
31
+ return `(fn [${params.map(valueToString).join(' ')}] ${a.body.map(valueToString).join(' ')})`
32
+ }
33
+ const clauses = value.arities.map((a) => {
34
+ const params = a.restParam
35
+ ? [...a.params, { kind: 'symbol' as const, name: '&' }, a.restParam]
36
+ : a.params
37
+ return `([${params.map(valueToString).join(' ')}] ${a.body.map(valueToString).join(' ')})`
38
+ })
39
+ return `(fn ${clauses.join(' ')})`
40
+ }
41
+ case valueKeywords.nativeFunction:
42
+ return `(native-fn ${value.name})`
43
+ case valueKeywords.nil:
44
+ return 'nil'
45
+ // Matches Clojure's Pattern.toString() behavior: returns the pattern string
46
+ // prefixed with inline flags if present, e.g. (?i)hello
47
+ case valueKeywords.regex: {
48
+ const prefix = value.flags ? `(?${value.flags})` : ''
49
+ return `${prefix}${value.pattern}`
50
+ }
51
+ default:
52
+ throw new EvaluationError(`unhandled value type: ${value.kind}`, {
53
+ value,
54
+ })
55
+ }
56
+ }
57
+
58
+ export const toSeq = (collection: CljValue): CljValue[] => {
59
+ if (isList(collection)) {
60
+ return collection.value
61
+ }
62
+ if (isVector(collection)) {
63
+ return collection.value
64
+ }
65
+ if (isMap(collection)) {
66
+ return collection.entries.map(([k, v]) => cljVector([k, v]))
67
+ }
68
+ if (collection.kind === 'string') {
69
+ return [...collection.value].map(cljString)
70
+ }
71
+ throw new EvaluationError(
72
+ `toSeq expects a collection or string, got ${printString(collection)}`,
73
+ { collection }
74
+ )
75
+ }