tex2typst 0.2.9 → 0.2.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/parser.ts CHANGED
@@ -1,5 +1,5 @@
1
1
  import { symbolMap } from "./map";
2
- import { TexNode, TexSupsubData, Token, TokenType } from "./types";
2
+ import { TexNode, TexSupsubData, TokenType } from "./types";
3
3
 
4
4
 
5
5
  const UNARY_COMMANDS = [
@@ -43,6 +43,21 @@ const BINARY_COMMANDS = [
43
43
  'tbinom',
44
44
  ]
45
45
 
46
+
47
+ export class Token {
48
+ type: TokenType;
49
+ value: string;
50
+
51
+ constructor(type: TokenType, value: string) {
52
+ this.type = type;
53
+ this.value = value;
54
+ }
55
+
56
+ public eq(token: Token): boolean {
57
+ return this.type === token.type && this.value === token.value;
58
+ }
59
+ }
60
+
46
61
  const EMPTY_NODE: TexNode = { type: 'empty', content: '' };
47
62
 
48
63
  function assert(condition: boolean, message: string = ''): void {
@@ -61,11 +76,11 @@ function get_command_param_num(command: string): number {
61
76
  }
62
77
  }
63
78
 
64
- const LEFT_CURLY_BRACKET: Token = {type: TokenType.CONTROL, value: '{'};
65
- const RIGHT_CURLY_BRACKET: Token = {type: TokenType.CONTROL, value: '}'};
79
+ const LEFT_CURLY_BRACKET: Token = new Token(TokenType.CONTROL, '{');
80
+ const RIGHT_CURLY_BRACKET: Token = new Token(TokenType.CONTROL, '}');
66
81
 
67
82
  function find_closing_curly_bracket(tokens: Token[], start: number): number {
68
- assert(token_eq(tokens[start], LEFT_CURLY_BRACKET));
83
+ assert(tokens[start].eq(LEFT_CURLY_BRACKET));
69
84
  let count = 1;
70
85
  let pos = start + 1;
71
86
 
@@ -73,9 +88,9 @@ function find_closing_curly_bracket(tokens: Token[], start: number): number {
73
88
  if (pos >= tokens.length) {
74
89
  throw new LatexParserError('Unmatched curly brackets');
75
90
  }
76
- if (token_eq(tokens[pos], LEFT_CURLY_BRACKET)) {
91
+ if (tokens[pos].eq(LEFT_CURLY_BRACKET)) {
77
92
  count += 1;
78
- } else if (token_eq(tokens[pos], RIGHT_CURLY_BRACKET)) {
93
+ } else if (tokens[pos].eq(RIGHT_CURLY_BRACKET)) {
79
94
  count -= 1;
80
95
  }
81
96
  pos += 1;
@@ -84,11 +99,11 @@ function find_closing_curly_bracket(tokens: Token[], start: number): number {
84
99
  return pos - 1;
85
100
  }
86
101
 
87
- const LEFT_SQUARE_BRACKET: Token = {type: TokenType.ELEMENT, value: '['};
88
- const RIGHT_SQUARE_BRACKET: Token = {type: TokenType.ELEMENT, value: ']'};
102
+ const LEFT_SQUARE_BRACKET: Token = new Token(TokenType.ELEMENT, '[');
103
+ const RIGHT_SQUARE_BRACKET: Token = new Token(TokenType.ELEMENT, ']');
89
104
 
90
105
  function find_closing_square_bracket(tokens: Token[], start: number): number {
91
- assert(token_eq(tokens[start], LEFT_SQUARE_BRACKET));
106
+ assert(tokens[start].eq(LEFT_SQUARE_BRACKET));
92
107
  let count = 1;
93
108
  let pos = start + 1;
94
109
 
@@ -96,9 +111,9 @@ function find_closing_square_bracket(tokens: Token[], start: number): number {
96
111
  if (pos >= tokens.length) {
97
112
  throw new LatexParserError('Unmatched square brackets');
98
113
  }
99
- if (token_eq(tokens[pos], LEFT_SQUARE_BRACKET)) {
114
+ if (tokens[pos].eq(LEFT_SQUARE_BRACKET)) {
100
115
  count += 1;
101
- } else if (token_eq(tokens[pos], RIGHT_SQUARE_BRACKET)) {
116
+ } else if (tokens[pos].eq(RIGHT_SQUARE_BRACKET)) {
102
117
  count -= 1;
103
118
  }
104
119
  pos += 1;
@@ -138,7 +153,7 @@ function eat_parenthesis(tokens: Token[], start: number): Token | null {
138
153
 
139
154
  function eat_primes(tokens: Token[], start: number): number {
140
155
  let pos = start;
141
- while (pos < tokens.length && token_eq(tokens[pos], { type: TokenType.ELEMENT, value: "'" })) {
156
+ while (pos < tokens.length && tokens[pos].eq(new Token(TokenType.ELEMENT, "'"))) {
142
157
  pos += 1;
143
158
  }
144
159
  return pos - start;
@@ -154,10 +169,8 @@ function eat_command_name(latex: string, start: number): string {
154
169
  }
155
170
 
156
171
 
157
-
158
-
159
- const LEFT_COMMAND: Token = { type: TokenType.COMMAND, value: '\\left' };
160
- const RIGHT_COMMAND: Token = { type: TokenType.COMMAND, value: '\\right' };
172
+ const LEFT_COMMAND: Token = new Token(TokenType.COMMAND, '\\left');
173
+ const RIGHT_COMMAND: Token = new Token(TokenType.COMMAND, '\\right');
161
174
 
162
175
  function find_closing_right_command(tokens: Token[], start: number): number {
163
176
  let count = 1;
@@ -167,9 +180,9 @@ function find_closing_right_command(tokens: Token[], start: number): number {
167
180
  if (pos >= tokens.length) {
168
181
  return -1;
169
182
  }
170
- if (token_eq(tokens[pos], LEFT_COMMAND)) {
183
+ if (tokens[pos].eq(LEFT_COMMAND)) {
171
184
  count += 1;
172
- } else if (token_eq(tokens[pos], RIGHT_COMMAND)) {
185
+ } else if (tokens[pos].eq(RIGHT_COMMAND)) {
173
186
  count -= 1;
174
187
  }
175
188
  pos += 1;
@@ -179,8 +192,8 @@ function find_closing_right_command(tokens: Token[], start: number): number {
179
192
  }
180
193
 
181
194
 
182
- const BEGIN_COMMAND: Token = { type: TokenType.COMMAND, value: '\\begin' };
183
- const END_COMMAND: Token = { type: TokenType.COMMAND, value: '\\end' };
195
+ const BEGIN_COMMAND: Token = new Token(TokenType.COMMAND, '\\begin');
196
+ const END_COMMAND: Token = new Token(TokenType.COMMAND, '\\end');
184
197
 
185
198
 
186
199
  function find_closing_end_command(tokens: Token[], start: number): number {
@@ -191,9 +204,9 @@ function find_closing_end_command(tokens: Token[], start: number): number {
191
204
  if (pos >= tokens.length) {
192
205
  return -1;
193
206
  }
194
- if (token_eq(tokens[pos], BEGIN_COMMAND)) {
207
+ if (tokens[pos].eq(BEGIN_COMMAND)) {
195
208
  count += 1;
196
- } else if (token_eq(tokens[pos], END_COMMAND)) {
209
+ } else if (tokens[pos].eq(END_COMMAND)) {
197
210
  count -= 1;
198
211
  }
199
212
  pos += 1;
@@ -240,7 +253,7 @@ export function tokenize(latex: string): Token[] {
240
253
  while (newPos < latex.length && latex[newPos] !== '\n') {
241
254
  newPos += 1;
242
255
  }
243
- token = { type: TokenType.COMMENT, value: latex.slice(pos + 1, newPos) };
256
+ token = new Token(TokenType.COMMENT, latex.slice(pos + 1, newPos));
244
257
  pos = newPos;
245
258
  break;
246
259
  }
@@ -249,19 +262,19 @@ export function tokenize(latex: string): Token[] {
249
262
  case '_':
250
263
  case '^':
251
264
  case '&':
252
- token = { type: TokenType.CONTROL, value: firstChar};
265
+ token = new Token(TokenType.CONTROL, firstChar);
253
266
  pos++;
254
267
  break;
255
268
  case '\n':
256
- token = { type: TokenType.NEWLINE, value: firstChar};
269
+ token = new Token(TokenType.NEWLINE, firstChar);
257
270
  pos++;
258
271
  break;
259
272
  case '\r': {
260
273
  if (pos + 1 < latex.length && latex[pos + 1] === '\n') {
261
- token = { type: TokenType.NEWLINE, value: '\n' };
274
+ token = new Token(TokenType.NEWLINE, '\n');
262
275
  pos += 2;
263
276
  } else {
264
- token = { type: TokenType.NEWLINE, value: '\n' };
277
+ token = new Token(TokenType.NEWLINE, '\n');
265
278
  pos ++;
266
279
  }
267
280
  break;
@@ -271,7 +284,7 @@ export function tokenize(latex: string): Token[] {
271
284
  while (newPos < latex.length && latex[newPos] === ' ') {
272
285
  newPos += 1;
273
286
  }
274
- token = {type: TokenType.WHITESPACE, value: latex.slice(pos, newPos)};
287
+ token = new Token(TokenType.WHITESPACE, latex.slice(pos, newPos));
275
288
  pos = newPos;
276
289
  break;
277
290
  }
@@ -281,12 +294,12 @@ export function tokenize(latex: string): Token[] {
281
294
  }
282
295
  const firstTwoChars = latex.slice(pos, pos + 2);
283
296
  if (['\\\\', '\\,'].includes(firstTwoChars)) {
284
- token = { type: TokenType.CONTROL, value: firstTwoChars };
297
+ token = new Token(TokenType.CONTROL, firstTwoChars);
285
298
  } else if (['\\{','\\}', '\\%', '\\$', '\\&', '\\#', '\\_'].includes(firstTwoChars)) {
286
- token = { type: TokenType.ELEMENT, value: firstTwoChars };
299
+ token = new Token(TokenType.ELEMENT, firstTwoChars);
287
300
  } else {
288
301
  const command = eat_command_name(latex, pos + 1);
289
- token = { type: TokenType.COMMAND, value: '\\' + command};
302
+ token = new Token(TokenType.COMMAND, '\\' + command);
290
303
  }
291
304
  pos += token.value.length;
292
305
  break;
@@ -297,13 +310,13 @@ export function tokenize(latex: string): Token[] {
297
310
  while (newPos < latex.length && isdigit(latex[newPos])) {
298
311
  newPos += 1;
299
312
  }
300
- token = { type: TokenType.ELEMENT, value: latex.slice(pos, newPos) }
313
+ token = new Token(TokenType.ELEMENT, latex.slice(pos, newPos));
301
314
  } else if (isalpha(firstChar)) {
302
- token = { type: TokenType.ELEMENT, value: firstChar };
315
+ token = new Token(TokenType.ELEMENT, firstChar);
303
316
  } else if ('+-*/=\'<>!.,;?()[]|'.includes(firstChar)) {
304
- token = { type: TokenType.ELEMENT, value: firstChar }
317
+ token = new Token(TokenType.ELEMENT, firstChar)
305
318
  } else {
306
- token = { type: TokenType.UNKNOWN, value: firstChar };
319
+ token = new Token(TokenType.UNKNOWN, firstChar);
307
320
  }
308
321
  pos += token.value.length;
309
322
  }
@@ -315,7 +328,7 @@ export function tokenize(latex: string): Token[] {
315
328
  if (pos >= latex.length || latex[pos] !== '{') {
316
329
  throw new LatexParserError(`No content for ${token.value} command`);
317
330
  }
318
- tokens.push({ type: TokenType.CONTROL, value: '{' });
331
+ tokens.push(new Token(TokenType.CONTROL, '{'));
319
332
  const posClosingBracket = find_closing_curly_bracket_char(latex, pos);
320
333
  pos++;
321
334
  let textInside = latex.slice(pos, posClosingBracket);
@@ -324,18 +337,14 @@ export function tokenize(latex: string): Token[] {
324
337
  for (const char of chars) {
325
338
  textInside = textInside.replaceAll('\\' + char, char);
326
339
  }
327
- tokens.push({ type: TokenType.TEXT, value: textInside });
328
- tokens.push({ type: TokenType.CONTROL, value: '}' });
340
+ tokens.push(new Token(TokenType.TEXT, textInside));
341
+ tokens.push(new Token(TokenType.CONTROL, '}'));
329
342
  pos = posClosingBracket + 1;
330
343
  }
331
344
  }
332
345
  return tokens;
333
346
  }
334
347
 
335
- function token_eq(token1: Token, token2: Token) {
336
- return token1.type == token2.type && token1.value == token2.value;
337
- }
338
-
339
348
 
340
349
  export class LatexParserError extends Error {
341
350
  constructor(message: string) {
@@ -347,8 +356,8 @@ export class LatexParserError extends Error {
347
356
 
348
357
  type ParseResult = [TexNode, number];
349
358
 
350
- const SUB_SYMBOL:Token = { type: TokenType.CONTROL, value: '_' };
351
- const SUP_SYMBOL:Token = { type: TokenType.CONTROL, value: '^' };
359
+ const SUB_SYMBOL:Token = new Token(TokenType.CONTROL, '_');
360
+ const SUP_SYMBOL:Token = new Token(TokenType.CONTROL, '^');
352
361
 
353
362
  export class LatexParser {
354
363
  space_sensitive: boolean;
@@ -408,22 +417,22 @@ export class LatexParser {
408
417
 
409
418
  num_prime += eat_primes(tokens, pos);
410
419
  pos += num_prime;
411
- if (pos < tokens.length && token_eq(tokens[pos], SUB_SYMBOL)) {
420
+ if (pos < tokens.length && tokens[pos].eq(SUB_SYMBOL)) {
412
421
  [sub, pos] = this.parseNextExprWithoutSupSub(tokens, pos + 1);
413
422
  num_prime += eat_primes(tokens, pos);
414
423
  pos += num_prime;
415
- if (pos < tokens.length && token_eq(tokens[pos], SUP_SYMBOL)) {
424
+ if (pos < tokens.length && tokens[pos].eq(SUP_SYMBOL)) {
416
425
  [sup, pos] = this.parseNextExprWithoutSupSub(tokens, pos + 1);
417
426
  if (eat_primes(tokens, pos) > 0) {
418
427
  throw new LatexParserError('Double superscript');
419
428
  }
420
429
  }
421
- } else if (pos < tokens.length && token_eq(tokens[pos], SUP_SYMBOL)) {
430
+ } else if (pos < tokens.length && tokens[pos].eq(SUP_SYMBOL)) {
422
431
  [sup, pos] = this.parseNextExprWithoutSupSub(tokens, pos + 1);
423
432
  if (eat_primes(tokens, pos) > 0) {
424
433
  throw new LatexParserError('Double superscript');
425
434
  }
426
- if (pos < tokens.length && token_eq(tokens[pos], SUB_SYMBOL)) {
435
+ if (pos < tokens.length && tokens[pos].eq(SUB_SYMBOL)) {
427
436
  [sub, pos] = this.parseNextExprWithoutSupSub(tokens, pos + 1);
428
437
  if (eat_primes(tokens, pos) > 0) {
429
438
  throw new LatexParserError('Double superscript');
@@ -471,9 +480,9 @@ export class LatexParser {
471
480
  case TokenType.NEWLINE:
472
481
  return [{ type: 'newline', content: firstToken.value }, start + 1];
473
482
  case TokenType.COMMAND:
474
- if (token_eq(firstToken, BEGIN_COMMAND)) {
483
+ if (firstToken.eq(BEGIN_COMMAND)) {
475
484
  return this.parseBeginEndExpr(tokens, start);
476
- } else if (token_eq(firstToken, LEFT_COMMAND)) {
485
+ } else if (firstToken.eq(LEFT_COMMAND)) {
477
486
  return this.parseLeftRightExpr(tokens, start);
478
487
  } else {
479
488
  return this.parseCommandExpr(tokens, start);
@@ -527,7 +536,7 @@ export class LatexParser {
527
536
  }
528
537
  return [{ type: 'symbol', content: command }, pos];
529
538
  case 1: {
530
- if (command === '\\sqrt' && pos < tokens.length && token_eq(tokens[pos], LEFT_SQUARE_BRACKET)) {
539
+ if (command === '\\sqrt' && pos < tokens.length && tokens[pos].eq(LEFT_SQUARE_BRACKET)) {
531
540
  const posLeftSquareBracket = pos;
532
541
  const posRightSquareBracket = find_closing_square_bracket(tokens, pos);
533
542
  const exprInside = tokens.slice(posLeftSquareBracket + 1, posRightSquareBracket);
@@ -538,9 +547,9 @@ export class LatexParser {
538
547
  if (pos + 2 >= tokens.length) {
539
548
  throw new LatexParserError('Expecting content for \\text command');
540
549
  }
541
- assert(token_eq(tokens[pos], LEFT_CURLY_BRACKET));
550
+ assert(tokens[pos].eq(LEFT_CURLY_BRACKET));
542
551
  assert(tokens[pos + 1].type === TokenType.TEXT);
543
- assert(token_eq(tokens[pos + 2], RIGHT_CURLY_BRACKET));
552
+ assert(tokens[pos + 2].eq(RIGHT_CURLY_BRACKET));
544
553
  const text = tokens[pos + 1].value;
545
554
  return [{ type: 'text', content: text }, pos + 3];
546
555
  }
@@ -558,7 +567,7 @@ export class LatexParser {
558
567
  }
559
568
 
560
569
  parseLeftRightExpr(tokens: Token[], start: number): ParseResult {
561
- assert(token_eq(tokens[start], LEFT_COMMAND));
570
+ assert(tokens[start].eq(LEFT_COMMAND));
562
571
 
563
572
  let pos = start + 1;
564
573
  pos += eat_whitespaces(tokens, pos).length;
@@ -603,12 +612,12 @@ export class LatexParser {
603
612
  }
604
613
 
605
614
  parseBeginEndExpr(tokens: Token[], start: number): ParseResult {
606
- assert(token_eq(tokens[start], BEGIN_COMMAND));
615
+ assert(tokens[start].eq(BEGIN_COMMAND));
607
616
 
608
617
  let pos = start + 1;
609
- assert(token_eq(tokens[pos], LEFT_CURLY_BRACKET));
618
+ assert(tokens[pos].eq(LEFT_CURLY_BRACKET));
610
619
  assert(tokens[pos + 1].type === TokenType.TEXT);
611
- assert(token_eq(tokens[pos + 2], RIGHT_CURLY_BRACKET));
620
+ assert(tokens[pos + 2].eq(RIGHT_CURLY_BRACKET));
612
621
  const envName = tokens[pos + 1].value;
613
622
  pos += 3;
614
623
 
@@ -623,9 +632,9 @@ export class LatexParser {
623
632
  const exprInsideEnd = endIdx;
624
633
  pos = endIdx + 1;
625
634
 
626
- assert(token_eq(tokens[pos], LEFT_CURLY_BRACKET));
635
+ assert(tokens[pos].eq(LEFT_CURLY_BRACKET));
627
636
  assert(tokens[pos + 1].type === TokenType.TEXT);
628
- assert(token_eq(tokens[pos + 2], RIGHT_CURLY_BRACKET));
637
+ assert(tokens[pos + 2].eq(RIGHT_CURLY_BRACKET));
629
638
  if (tokens[pos + 1].value !== envName) {
630
639
  throw new LatexParserError('Mismatched \\begin and \\end environments');
631
640
  }
@@ -674,7 +683,7 @@ export class LatexParser {
674
683
 
675
684
  // Remove all whitespace before or after _ or ^
676
685
  function passIgnoreWhitespaceBeforeScriptMark(tokens: Token[]): Token[] {
677
- const is_script_mark = (token: Token) => token_eq(token, SUB_SYMBOL) || token_eq(token, SUP_SYMBOL);
686
+ const is_script_mark = (token: Token) => token.eq(SUB_SYMBOL) || token.eq(SUP_SYMBOL);
678
687
  let out_tokens: Token[] = [];
679
688
  for (let i = 0; i < tokens.length; i++) {
680
689
  if (tokens[i].type === TokenType.WHITESPACE && i + 1 < tokens.length && is_script_mark(tokens[i + 1])) {
package/src/types.ts CHANGED
@@ -9,10 +9,6 @@ export enum TokenType {
9
9
  UNKNOWN,
10
10
  }
11
11
 
12
- export interface Token {
13
- type: TokenType;
14
- value: string;
15
- }
16
12
 
17
13
 
18
14
  export interface TexSupsubData {
package/src/writer.ts CHANGED
@@ -267,21 +267,30 @@ export class TypstWriter {
267
267
  public finalize(): string {
268
268
  this.flushQueue();
269
269
  const smartFloorPass = function (input: string): string {
270
- // Use regex to replace all " xxx " with "floor(xxx)"
271
- let res = input.replace(/⌊\s*(.*?)\s*⌋/g, "floor($1)");
270
+ // Use regex to replace all "floor.l xxx floor.r" with "floor(xxx)"
271
+ let res = input.replace(/floor\.l\s*(.*?)\s*floor\.r/g, "floor($1)");
272
272
  // Typst disallow "floor()" with empty argument, so add am empty string inside if it's empty.
273
273
  res = res.replace(/floor\(\)/g, 'floor("")');
274
274
  return res;
275
275
  };
276
276
  const smartCeilPass = function (input: string): string {
277
- // Use regex to replace all " xxx " with "ceil(xxx)"
278
- let res = input.replace(/⌈\s*(.*?)\s*⌉/g, "ceil($1)");
277
+ // Use regex to replace all "ceil.l xxx ceil.r" with "ceil(xxx)"
278
+ let res = input.replace(/ceil\.l\s*(.*?)\s*ceil\.r/g, "ceil($1)");
279
279
  // Typst disallow "ceil()" with empty argument, so add an empty string inside if it's empty.
280
280
  res = res.replace(/ceil\(\)/g, 'ceil("")');
281
281
  return res;
282
282
  }
283
- this.buffer = smartFloorPass(this.buffer);
284
- this.buffer = smartCeilPass(this.buffer);
283
+ const smartRoundPass = function (input: string): string {
284
+ // Use regex to replace all "floor.l xxx ceil.r" with "round(xxx)"
285
+ let res = input.replace(/floor\.l\s*(.*?)\s*ceil\.r/g, "round($1)");
286
+ // Typst disallow "round()" with empty argument, so add an empty string inside if it's empty.
287
+ res = res.replace(/round\(\)/g, 'round("")');
288
+ return res;
289
+ }
290
+ const all_passes = [smartFloorPass, smartCeilPass, smartRoundPass];
291
+ for (const pass of all_passes) {
292
+ this.buffer = pass(this.buffer);
293
+ }
285
294
  return this.buffer;
286
295
  }
287
296
  }
@@ -352,7 +361,12 @@ export function convertTree(node: TexNode): TypstNode {
352
361
  content: '',
353
362
  args: node.args!.map(convertTree),
354
363
  };
355
- if (["[]", "()", "\\{\\}", "\\lfloor\\rfloor", "\\lceil\\rceil"].includes(left.content + right.content)) {
364
+ if ([
365
+ "[]", "()", "\\{\\}",
366
+ "\\lfloor\\rfloor",
367
+ "\\lceil\\rceil",
368
+ "\\lfloor\\rceil",
369
+ ].includes(left.content + right.content)) {
356
370
  return group;
357
371
  }
358
372
  return {
@@ -0,0 +1,29 @@
1
+ import requests
2
+ from bs4 import BeautifulSoup
3
+
4
+
5
+ if __name__ == '__main__':
6
+ symbol_map = {}
7
+
8
+ url = "https://typst.app/docs/reference/symbols/sym/"
9
+ html_text = requests.get(url).text
10
+ soup = BeautifulSoup(html_text, 'html.parser')
11
+ # <ul class="symbol-grid">
12
+ ul = soup.find('ul', class_='symbol-grid')
13
+ li_list = ul.find_all('li')
14
+ for li in li_list:
15
+ # e.g. <li id="symbol-brace.r.double" data-latex-name="\rBrace" data-codepoint="10628"><button>...</button></li>
16
+ # ==> latex = rBrace
17
+ # ==> typst = brace.r.double
18
+ # ==> unicode = 10628 = \u2984
19
+ latex = li.get('data-latex-name', None)
20
+ typst = li['id'][7:]
21
+ unicode = int(li['data-codepoint'])
22
+ if latex is not None:
23
+ # some latex macro can be associated with multiple typst
24
+ # e.g. \equiv can be mapped to equal or equiv.triple
25
+ # We only keep the first one
26
+ if latex not in symbol_map:
27
+ symbol_map[latex] = typst
28
+ # print(f" ['{latex[1:]}', '{typst}'],")
29
+ print(f'{latex[1:]} = "{typst}"')