sommark 1.1.0 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -29,3 +29,38 @@
29
29
  - **Transpiler**: `AtBlock` content is now **escaped by default** in the transpiler to prevent XSS.
30
30
  - **Mapper**: Added `options` to `Mapper.create` (e.g., `{ escape: false }`) to allow specific blocks (like `Code`, `List`, `Table`) to opt-out of automatic escaping when they handle raw content safely or require it for parsing.
31
31
  - **Parser**: Removed manual escaping from Parser to support the new transpiler-based architecture.
32
+
33
+ ## 1.1.1 (2026-01-10)
34
+
35
+ ### Bug Fixes
36
+
37
+ - **CLI**: Fixed a bug where passing a Mapper object in `smark.config.js` (Custom Mode) caused a crash. The CLI now correctly handles both file path strings and imported Mapper objects.
38
+
39
+
40
+ ## 1.2.0 (2026-01-14)
41
+
42
+ ### Bug Fixes
43
+
44
+ * Fixed an issue where consecutive standalone blocks were not fully rendered when not separated by a blank line.
45
+
46
+ ```ini
47
+ [Block]
48
+ This is a test.
49
+ [end]
50
+ [Block]
51
+ This is another test.
52
+ [end]
53
+ ```
54
+
55
+ * Added support for inline block content while keeping the original multiline syntax fully compatible.
56
+
57
+ ```yaml
58
+ [Block]Hello World[end]
59
+ ```
60
+
61
+ ---
62
+
63
+ ### Code Improvements
64
+
65
+ * Removed unnecessary code
66
+ * Improved internal implementation
package/cli/cli.mjs CHANGED
@@ -113,10 +113,15 @@ async function loadConfig() {
113
113
  }
114
114
 
115
115
  async function transpile({ src, format, mappingFile = "" }) {
116
+ if (typeof mappingFile === "object" && mappingFile !== null) {
117
+ return transpiler({ ast: parser(lexer(src)), format, mapperFile: mappingFile });
118
+ }
116
119
  if ((await loadConfig()).mode === "default") {
117
120
  return transpiler({ ast: parser(lexer(src)), format, mapperFile: format === "html" ? html : format === "md" ? md : mdx });
118
- } else if (mappingFile && isExist(mappingFile)) {
119
- return transpiler({ ast: parser(lexer(src)), format, mappingFile });
121
+ } else if (typeof mappingFile === "string" && (await isExist(mappingFile))) {
122
+ const mappingFileURL = pathToFileURL(path.resolve(process.cwd(), mappingFile)).href;
123
+ const loadedMapper = await import(mappingFileURL);
124
+ return transpiler({ ast: parser(lexer(src)), format, mapperFile: loadedMapper.default });
120
125
  } else {
121
126
  cliError([`{line}<$red:File$> <$blue:'${mappingFile}'$> <$red: is not found$>{line}`]);
122
127
  }
package/core/parser.js CHANGED
@@ -111,9 +111,10 @@ function parseBlock(tokens, i) {
111
111
  block_stack.push(1);
112
112
  end_stack.pop();
113
113
  const blockNode = makeBlockNode();
114
+ // consume '['
115
+ i++;
114
116
  // Update Data
115
117
  updateData(tokens, i);
116
- i++;
117
118
  if (current_token(tokens, i).type === TOKEN_TYPES.IDENTIFIER) {
118
119
  const id = current_token(tokens, i).value.trim();
119
120
  validateId(id);
@@ -122,13 +123,15 @@ function parseBlock(tokens, i) {
122
123
  } else {
123
124
  parserError(errorMessage(tokens, i, block_id, "["));
124
125
  }
126
+ // consume Block identifier
127
+ i++;
125
128
  // Update Data
126
129
  updateData(tokens, i);
127
- i++;
128
130
  if (current_token(tokens, i) && current_token(tokens, i).type === TOKEN_TYPES.EQUAL) {
131
+ // consume '='
132
+ i++;
129
133
  // Update Data
130
134
  updateData(tokens, i);
131
- i++;
132
135
  if (current_token(tokens, i).type === TOKEN_TYPES.VALUE) {
133
136
  current_token(tokens, i)
134
137
  .value.split(",")
@@ -138,9 +141,10 @@ function parseBlock(tokens, i) {
138
141
  } else {
139
142
  parserError(errorMessage(tokens, i, block_value, "="));
140
143
  }
144
+ // consume Block value
145
+ i++;
141
146
  // Update Data
142
147
  updateData(tokens, i);
143
- i++;
144
148
  }
145
149
  if (current_token(tokens, i) && current_token(tokens, i).type !== TOKEN_TYPES.CLOSE_BRACKET) {
146
150
  if (peek(tokens, i, -1) && peek(tokens, i, -1).type === TOKEN_TYPES.VALUE) {
@@ -149,20 +153,22 @@ function parseBlock(tokens, i) {
149
153
  parserError(errorMessage(tokens, i, "]", block_id));
150
154
  }
151
155
  }
152
- // Update Data
153
- updateData(tokens, i);
156
+ // consume ']'
154
157
  i++;
155
- if (!current_token(tokens, i) || current_token(tokens, i).value !== "\n") {
156
- parserError(errorMessage(tokens, i, "\\n", "]"));
157
- }
158
158
  // Update Data
159
159
  updateData(tokens, i);
160
- i++;
160
+ if (current_token(tokens, i) && current_token(tokens, i).type == TOKEN_TYPES.NEWLINE) {
161
+ // consume '\n'
162
+ i++;
163
+ // Update Data
164
+ updateData(tokens, i);
165
+ }
161
166
  tokens_stack.length = 0;
162
167
  while (i < tokens.length) {
163
168
  if (current_token(tokens, i).value === "[" && peek(tokens, i, 1).value !== "end") {
164
169
  const [childNode, nextIndex] = parseBlock(tokens, i);
165
170
  blockNode.body.push(childNode);
171
+ // consume child node
166
172
  i = nextIndex;
167
173
  // Update Data
168
174
  updateData(tokens, i);
@@ -170,20 +176,17 @@ function parseBlock(tokens, i) {
170
176
  current_token(tokens, i).type === TOKEN_TYPES.OPEN_BRACKET &&
171
177
  peek(tokens, i, 1).type === TOKEN_TYPES.END_KEYWORD
172
178
  ) {
179
+ // consume end keyword
180
+ i++;
173
181
  // Update Data
174
182
  updateData(tokens, i);
175
- i++;
176
183
  if (current_token(tokens, i).type === TOKEN_TYPES.END_KEYWORD) {
177
- // Update Data
178
- updateData(tokens, i);
179
- if (peek(tokens, i, 1) && peek(tokens, i, 1).type === TOKEN_TYPES.CLOSE_BRACKET) {
180
- // Update Data
181
- updateData(tokens, i + 1);
182
- } else {
184
+ if (peek(tokens, i, 1) && peek(tokens, i, 1).type !== TOKEN_TYPES.CLOSE_BRACKET) {
183
185
  parserError(errorMessage(tokens, i, "]", end_keyword));
184
186
  }
185
187
  }
186
188
  block_stack.pop();
189
+ // consume end keyword and ']'
187
190
  i += 2;
188
191
  // Update Data
189
192
  updateData(tokens, i);
@@ -195,49 +198,50 @@ function parseBlock(tokens, i) {
195
198
  continue;
196
199
  }
197
200
  blockNode.body.push(childNode);
198
- if (blockNode.body[0].value === "\n") {
199
- blockNode.body.splice(0, 1);
200
- }
201
201
  i = nextIndex;
202
202
  }
203
203
  }
204
- i++;
205
204
  return [blockNode, i];
206
205
  }
207
206
 
208
207
  // Parse Inline Statements
209
208
  function parseInline(tokens, i) {
210
209
  const inlineNode = makeInlineNode();
210
+ // consume '('
211
+ i++;
211
212
  // Update Data
212
213
  updateData(tokens, i);
213
- i++;
214
214
  if (current_token(tokens, i).type === TOKEN_TYPES.VALUE) {
215
215
  inlineNode.value = current_token(tokens, i).value;
216
216
  inlineNode.depth = current_token(tokens, i).depth;
217
217
  } else {
218
218
  parserError(errorMessage(tokens, i, inline_value, "("));
219
219
  }
220
+ // consume Inline Value
221
+ i++;
220
222
  // Update Data
221
223
  updateData(tokens, i);
222
- i++;
223
224
  if (!current_token(tokens, i) || current_token(tokens, i).type !== TOKEN_TYPES.CLOSE_PAREN) {
224
225
  parserError(errorMessage(tokens, i, ")", inline_value));
225
226
  }
227
+ // consume ')'
228
+ i++;
226
229
  // Update Data
227
230
  updateData(tokens, i);
228
- i++;
229
231
  if (!current_token(tokens, i) || current_token(tokens, i).type !== TOKEN_TYPES.THIN_ARROW) {
230
232
  parserError(errorMessage(tokens, i, "->", ")"));
231
233
  }
234
+ // consume '->'
235
+ i++;
232
236
  // Update Data
233
237
  updateData(tokens, i);
234
- i++;
235
238
  if (!current_token(tokens, i) || current_token(tokens, i).type !== TOKEN_TYPES.OPEN_PAREN) {
236
239
  parserError(errorMessage(tokens, i, "(", "->"));
237
240
  }
241
+ // consume '('
242
+ i++;
238
243
  // Update Data
239
244
  updateData(tokens, i);
240
- i++;
241
245
  if (current_token(tokens, i) && current_token(tokens, i).type === TOKEN_TYPES.IDENTIFIER) {
242
246
  for (const id of PREDEFINED_IDS) {
243
247
  if (current_token(tokens, i).value.includes(`${id}:`)) {
@@ -260,16 +264,18 @@ function parseInline(tokens, i) {
260
264
  } else {
261
265
  parserError(errorMessage(tokens, i, inline_id, "("));
262
266
  }
267
+ // consume Inline Identifier
268
+ i++;
263
269
  // Update Data
264
270
  updateData(tokens, i);
265
- i++;
266
271
  if (!current_token(tokens, i) || current_token(tokens, i).type !== TOKEN_TYPES.CLOSE_PAREN) {
267
272
  parserError(errorMessage(tokens, i, ")", inline_id));
268
273
  }
274
+ // consume ')'
275
+ i++;
269
276
  // Update Data
270
277
  updateData(tokens, i);
271
278
  tokens_stack.length = 0;
272
- i++;
273
279
  return [inlineNode, i];
274
280
  }
275
281
 
@@ -280,18 +286,23 @@ function parseText(tokens, i) {
280
286
  textNode.text = current_token(tokens, i).value;
281
287
  textNode.depth = current_token(tokens, i).depth;
282
288
  }
289
+ // consume TEXT
290
+ i++;
283
291
  // Update Data
284
292
  updateData(tokens, i);
285
- i++;
286
293
  return [textNode, i];
287
294
  }
288
295
 
289
296
  // Parse At_Block
290
297
  function parseAtBlock(tokens, i) {
291
298
  const atBlockNode = makeAtBlockNode();
299
+ // consume '@_'
300
+ i++;
292
301
  // Update Data
293
302
  updateData(tokens, i);
294
- i++;
303
+ if(current_token(tokens, i) && current_token(tokens, i).type === TOKEN_TYPES.END_KEYWORD) {
304
+ parserError(errorMessage(tokens, i, at_id, "@_"));
305
+ }
295
306
  if (current_token(tokens, i) && current_token(tokens, i).type === TOKEN_TYPES.IDENTIFIER) {
296
307
  const id = current_token(tokens, i).value.trim();
297
308
  validateId(id);
@@ -300,28 +311,32 @@ function parseAtBlock(tokens, i) {
300
311
  } else {
301
312
  parserError(errorMessage(tokens, i, at_id, "@_"));
302
313
  }
314
+ // consume Atblock Identifier
315
+ i++;
303
316
  // Update Data
304
317
  updateData(tokens, i);
305
- i++;
306
318
  if (!current_token(tokens, i) || current_token(tokens, i).type !== TOKEN_TYPES.CLOSE_AT) {
307
319
  parserError(errorMessage(tokens, i, "_@", at_id));
308
320
  }
321
+ // consume '_@'
322
+ i++;
309
323
  // Update Data
310
324
  updateData(tokens, i);
311
- i++;
312
325
  if (current_token(tokens, i) && current_token(tokens, i).type === TOKEN_TYPES.COLON) {
326
+ // consume ':'
327
+ i++;
313
328
  // Update Data
314
329
  updateData(tokens, i);
315
- i++;
316
330
  if (current_token(tokens, i) && current_token(tokens, i).type === TOKEN_TYPES.VALUE) {
317
331
  current_token(tokens, i)
318
332
  .value.split(",")
319
333
  .forEach(value => {
320
334
  atBlockNode.args.push(value.trim());
321
335
  });
336
+ // consume Atblock Value
337
+ i++;
322
338
  // Update Data
323
339
  updateData(tokens, i);
324
- i++;
325
340
  } else {
326
341
  parserError(errorMessage(tokens, i, at_value, ":"));
327
342
  }
@@ -329,55 +344,65 @@ function parseAtBlock(tokens, i) {
329
344
  if (current_token(tokens, i) && current_token(tokens, i).type !== TOKEN_TYPES.NEWLINE) {
330
345
  parserError(errorMessage(tokens, i, "\\n", "_@"));
331
346
  }
347
+ // consume '\n'
348
+ i++;
332
349
  // Update Data
333
350
  updateData(tokens, i);
334
- i++;
335
351
  if (current_token(tokens, i) && current_token(tokens, i).type !== TOKEN_TYPES.TEXT) {
336
352
  parserError(errorMessage(tokens, i, "Text", "\\n"));
337
353
  }
338
354
  while (i < tokens.length) {
339
355
  if (current_token(tokens, i) && current_token(tokens, i).type === TOKEN_TYPES.TEXT) {
340
356
  atBlockNode.content.push(current_token(tokens, i).value);
357
+ // consume TEXT
358
+ i++;
341
359
  // Update Data
342
360
  updateData(tokens, i);
343
- i++;
344
361
  } else if (current_token(tokens, i) && current_token(tokens, i).type === TOKEN_TYPES.NEWLINE) {
362
+ // consume '\n'
363
+ i++;
345
364
  // Update Data
346
365
  updateData(tokens, i);
347
- i++;
348
366
  continue;
349
367
  } else {
350
368
  break;
351
369
  }
352
370
  }
353
371
  if (current_token(tokens, i) && current_token(tokens, i).type === TOKEN_TYPES.NEWLINE) {
372
+ // consume '\n'
373
+ i++;
354
374
  // Update Data
355
375
  updateData(tokens, i);
356
- i++;
357
376
  }
358
377
  if (!current_token(tokens, i) || current_token(tokens, i).type !== TOKEN_TYPES.OPEN_AT) {
359
378
  parserError(errorMessage(tokens, i, "@_", "\\n"));
360
379
  }
380
+ // consume '@_'
381
+ i++;
361
382
  // Update Data
362
383
  updateData(tokens, i);
363
- i++;
364
384
  if (!current_token(tokens, i) || current_token(tokens, i).type !== TOKEN_TYPES.END_KEYWORD) {
365
385
  parserError(errorMessage(tokens, i, end_keyword, "@_"));
366
386
  }
387
+ // console end keyword
388
+ i++;
367
389
  // Update Data
368
390
  updateData(tokens, i);
369
- i++;
370
391
  if (!current_token(tokens, i) || current_token(tokens, i).type !== TOKEN_TYPES.CLOSE_AT) {
371
392
  parserError(errorMessage(tokens, i, "_@", end_keyword));
372
393
  }
394
+ // consume '_@'
395
+ i++;
373
396
  // Update Data
374
397
  updateData(tokens, i);
375
- i++;
376
398
  if (!current_token(tokens, i) || current_token(tokens, i).value !== "\n") {
377
399
  parserError(errorMessage(tokens, i, "\\n", "_@"));
378
400
  }
379
- tokens_stack.length = 0;
401
+ // consume '\n'
380
402
  i++;
403
+ // Update Data
404
+ updateData(tokens, i);
405
+ tokens_stack.length = 0;
381
406
  return [atBlockNode, i];
382
407
  }
383
408
 
@@ -388,9 +413,10 @@ function parseCommentNode(tokens, i) {
388
413
  commentNode.text = current_token(tokens, i).value;
389
414
  commentNode.depth = current_token(tokens, i).depth;
390
415
  }
416
+ // consume Comment
417
+ i++;
391
418
  // Update Data
392
419
  updateData(tokens, i);
393
- i++;
394
420
  return [commentNode, i];
395
421
  }
396
422
 
@@ -415,7 +441,7 @@ function parseNode(tokens, i) {
415
441
  return parseText(tokens, i);
416
442
  }
417
443
  // At_Block
418
- else if (current_token(tokens, i).value === "@_" && peek(tokens, i, 1).type !== TOKEN_TYPES.END_KEYWORD) {
444
+ else if (current_token(tokens, i).value === "@_") {
419
445
  return parseAtBlock(tokens, i);
420
446
  }
421
447
  // Newline
@@ -462,4 +488,4 @@ function parser(tokens) {
462
488
  return ast;
463
489
  }
464
490
 
465
- export default parser;
491
+ export default parser;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "sommark",
3
- "version": "1.1.0",
3
+ "version": "1.2.0",
4
4
  "description": "SomMark is a structural markup language for writing structured documents and converting them into HTML or Markdown or MDX(only ready components).",
5
5
  "main": "index.js",
6
6
  "directories": {