rapydscript-ns 0.8.4 → 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (132) hide show
  1. package/.agignore +1 -1
  2. package/.github/workflows/ci.yml +38 -38
  3. package/=template.pyj +5 -5
  4. package/CHANGELOG.md +18 -0
  5. package/HACKING.md +103 -103
  6. package/LICENSE +24 -24
  7. package/README.md +715 -169
  8. package/TODO.md +9 -2
  9. package/add-toc-to-readme +2 -2
  10. package/bin/export +75 -75
  11. package/bin/rapydscript +70 -70
  12. package/bin/web-repl-export +102 -102
  13. package/build +2 -2
  14. package/language-service/index.js +36 -27
  15. package/package.json +1 -1
  16. package/publish.py +37 -37
  17. package/release/baselib-plain-pretty.js +2358 -168
  18. package/release/baselib-plain-ugly.js +73 -3
  19. package/release/compiler.js +6282 -3092
  20. package/release/signatures.json +31 -30
  21. package/session.vim +4 -4
  22. package/setup.cfg +2 -2
  23. package/src/ast.pyj +1 -0
  24. package/src/baselib-builtins.pyj +340 -2
  25. package/src/baselib-bytes.pyj +664 -0
  26. package/src/baselib-errors.pyj +1 -1
  27. package/src/baselib-internal.pyj +267 -60
  28. package/src/baselib-itertools.pyj +110 -97
  29. package/src/baselib-str.pyj +22 -4
  30. package/src/compiler.pyj +36 -36
  31. package/src/errors.pyj +30 -30
  32. package/src/lib/abc.pyj +317 -0
  33. package/src/lib/aes.pyj +646 -646
  34. package/src/lib/copy.pyj +120 -120
  35. package/src/lib/dataclasses.pyj +532 -0
  36. package/src/lib/elementmaker.pyj +83 -83
  37. package/src/lib/encodings.pyj +126 -126
  38. package/src/lib/enum.pyj +125 -0
  39. package/src/lib/gettext.pyj +569 -569
  40. package/src/lib/itertools.pyj +580 -580
  41. package/src/lib/math.pyj +193 -193
  42. package/src/lib/operator.pyj +11 -11
  43. package/src/lib/pythonize.pyj +20 -20
  44. package/src/lib/random.pyj +118 -118
  45. package/src/lib/re.pyj +504 -470
  46. package/src/lib/react.pyj +74 -74
  47. package/src/lib/traceback.pyj +63 -63
  48. package/src/lib/typing.pyj +577 -0
  49. package/src/lib/uuid.pyj +77 -77
  50. package/src/monaco-language-service/builtins.js +14 -4
  51. package/src/monaco-language-service/diagnostics.js +19 -20
  52. package/src/monaco-language-service/dts.js +550 -550
  53. package/src/output/classes.pyj +62 -26
  54. package/src/output/comments.pyj +45 -45
  55. package/src/output/exceptions.pyj +201 -201
  56. package/src/output/functions.pyj +78 -5
  57. package/src/output/jsx.pyj +164 -164
  58. package/src/output/loops.pyj +5 -2
  59. package/src/output/operators.pyj +100 -34
  60. package/src/output/treeshake.pyj +182 -182
  61. package/src/output/utils.pyj +72 -72
  62. package/src/parse.pyj +80 -16
  63. package/src/string_interpolation.pyj +72 -72
  64. package/src/tokenizer.pyj +9 -4
  65. package/src/unicode_aliases.pyj +576 -576
  66. package/src/utils.pyj +192 -192
  67. package/test/_import_one.pyj +37 -37
  68. package/test/_import_two/__init__.pyj +11 -11
  69. package/test/_import_two/level2/deep.pyj +4 -4
  70. package/test/_import_two/other.pyj +6 -6
  71. package/test/_import_two/sub.pyj +13 -13
  72. package/test/abc.pyj +291 -0
  73. package/test/aes_vectors.pyj +421 -421
  74. package/test/annotations.pyj +80 -80
  75. package/test/arithmetic_nostrict.pyj +88 -0
  76. package/test/arithmetic_types.pyj +169 -0
  77. package/test/baselib.pyj +91 -0
  78. package/test/bytes.pyj +467 -0
  79. package/test/classes.pyj +1 -0
  80. package/test/comparison_ops.pyj +173 -0
  81. package/test/dataclasses.pyj +253 -0
  82. package/test/decorators.pyj +77 -77
  83. package/test/docstrings.pyj +39 -39
  84. package/test/elementmaker_test.pyj +45 -45
  85. package/test/enum.pyj +134 -0
  86. package/test/eval_exec.pyj +56 -0
  87. package/test/format.pyj +148 -0
  88. package/test/functions.pyj +151 -151
  89. package/test/generators.pyj +41 -41
  90. package/test/generic.pyj +370 -370
  91. package/test/imports.pyj +72 -72
  92. package/test/internationalization.pyj +73 -73
  93. package/test/lint.pyj +164 -164
  94. package/test/loops.pyj +85 -85
  95. package/test/numpy.pyj +734 -734
  96. package/test/object.pyj +64 -0
  97. package/test/omit_function_metadata.pyj +20 -20
  98. package/test/python_compat.pyj +17 -15
  99. package/test/python_features.pyj +70 -15
  100. package/test/regexp.pyj +83 -55
  101. package/test/repl.pyj +121 -121
  102. package/test/scoped_flags.pyj +76 -76
  103. package/test/tuples.pyj +96 -0
  104. package/test/typing.pyj +469 -0
  105. package/test/unit/index.js +116 -7
  106. package/test/unit/language-service-dts.js +543 -543
  107. package/test/unit/language-service-hover.js +455 -455
  108. package/test/unit/language-service.js +84 -0
  109. package/test/unit/web-repl.js +804 -1
  110. package/test/vars_locals_globals.pyj +94 -0
  111. package/tools/cli.js +558 -547
  112. package/tools/compile.js +224 -219
  113. package/tools/completer.js +131 -131
  114. package/tools/embedded_compiler.js +262 -251
  115. package/tools/gettext.js +185 -185
  116. package/tools/ini.js +65 -65
  117. package/tools/lint.js +16 -19
  118. package/tools/msgfmt.js +187 -187
  119. package/tools/repl.js +223 -223
  120. package/tools/test.js +118 -118
  121. package/tools/utils.js +128 -128
  122. package/tools/web_repl.js +95 -95
  123. package/try +41 -41
  124. package/web-repl/env.js +196 -196
  125. package/web-repl/index.html +163 -163
  126. package/web-repl/main.js +252 -252
  127. package/web-repl/prism.css +139 -139
  128. package/web-repl/prism.js +113 -113
  129. package/web-repl/rapydscript.js +224 -224
  130. package/web-repl/sha1.js +25 -25
  131. package/PYTHON_DIFFERENCES_REPORT.md +0 -291
  132. package/PYTHON_FEATURE_COVERAGE.md +0 -200
package/src/parse.pyj CHANGED
@@ -31,7 +31,7 @@ TreeWalker
31
31
  from tokenizer import tokenizer, is_token, RESERVED_WORDS
32
32
 
33
33
  COMPILER_VERSION = '__COMPILER_VERSION__'
34
- PYTHON_FLAGS = {'dict_literals':True, 'overload_getitem':True, 'bound_methods':True, 'hash_literals':True, 'overload_operators':True, 'truthiness':True, 'jsx':True}
34
+ PYTHON_FLAGS = {'dict_literals':True, 'overload_getitem':True, 'bound_methods':True, 'hash_literals':True, 'overload_operators':True, 'truthiness':True, 'jsx':True, 'strict_arithmetic':True}
35
35
 
36
36
 
37
37
  def get_compiler_version():
@@ -144,7 +144,7 @@ PRECEDENCE = (def(a, ret):
144
144
 
145
145
  STATEMENTS_WITH_LABELS = array_to_hash([ "for", "do", "while", "switch" ])
146
146
 
147
- ATOMIC_START_TOKEN = array_to_hash([ "atom", "num", "string", "regexp", "name", "js" ])
147
+ ATOMIC_START_TOKEN = array_to_hash([ "atom", "num", "imaginary", "string", "bytes_literal", "regexp", "name", "js" ])
148
148
 
149
149
  compile_time_decorators = ['staticmethod', 'classmethod', 'external', 'property']
150
150
 
@@ -512,7 +512,7 @@ def create_parser_ctx(S, import_dirs, module_id, baselib_items, imported_module_
512
512
  p = prev()
513
513
  if p and not S.token.nlb and ATOMIC_START_TOKEN[p.type] and not is_('punc', ':'):
514
514
  unexpected()
515
- if tmp_ is "string":
515
+ if tmp_ is "string" or tmp_ is "bytes_literal":
516
516
  return simple_statement()
517
517
  elif tmp_ is "shebang":
518
518
  tmp_ = S.token.value
@@ -542,7 +542,7 @@ def create_parser_ctx(S, import_dirs, module_id, baselib_items, imported_module_
542
542
  p = peek()
543
543
  # 'match' is a soft keyword: treat as match statement when followed by
544
544
  # a token that can start an expression subject (not an assignment/attr-access/call op)
545
- if p.type is 'name' or p.type is 'string' or p.type is 'num' or p.type is 'atom' or p.type is 'js' or (p.type is 'punc' and (p.value is '[' or p.value is '(')):
545
+ if p.type is 'name' or p.type is 'string' or p.type is 'bytes_literal' or p.type is 'num' or p.type is 'atom' or p.type is 'js' or (p.type is 'punc' and (p.value is '[' or p.value is '(')):
546
546
  next() # consume the 'match' name token
547
547
  return match_()
548
548
  if (is_token(peek(), 'punc', ':')):
@@ -1063,7 +1063,26 @@ def create_parser_ctx(S, import_dirs, module_id, baselib_items, imported_module_
1063
1063
  })
1064
1064
 
1065
1065
  def simple_statement(tmp):
1066
+ start = S.token
1066
1067
  tmp = expression(True)
1068
+ # Handle annotated assignment with a complex target (e.g., self.x: Type = value).
1069
+ # Simple-name annotated assignments are caught earlier (annotated_var_statement),
1070
+ # but attribute/subscript targets reach here as a completed expression followed by ':'.
1071
+ if is_("punc", ":"):
1072
+ next() # consume ':'
1073
+ annotation = maybe_conditional()
1074
+ value = None
1075
+ if is_("operator", "="):
1076
+ next() # consume '='
1077
+ value = expression(True)
1078
+ semicolon()
1079
+ return new AST_AnnotatedAssign({
1080
+ 'start': start,
1081
+ 'target': tmp,
1082
+ 'annotation': annotation,
1083
+ 'value': value,
1084
+ 'end': prev()
1085
+ })
1067
1086
  semicolon()
1068
1087
  return new AST_SimpleStatement({
1069
1088
  'body': tmp
@@ -1189,17 +1208,18 @@ def create_parser_ctx(S, import_dirs, module_id, baselib_items, imported_module_
1189
1208
  # to maintain a list of local variables for every AST_Scope and provide
1190
1209
  # an easy way to walk the ast tree upwards.
1191
1210
  if is_node_type(expr, AST_SymbolRef):
1192
- # check Native JS classes
1211
+ # traverse in reverse to check local / imported classes first
1212
+ # (user-defined classes take priority over native JS classes)
1213
+ for s in range(S.classes.length-1, -1, -1):
1214
+ if has_prop(S.classes[s], expr.name):
1215
+ return S.classes[s][expr.name]
1216
+
1217
+ # fallback to Native JS classes
1193
1218
  if has_prop(NATIVE_CLASSES, expr.name):
1194
1219
  return NATIVE_CLASSES[expr.name]
1195
1220
  if has_prop(ERROR_CLASSES, expr.name):
1196
1221
  return ERROR_CLASSES[expr.name]
1197
1222
 
1198
- # traverse in reverse to check local variables first
1199
- for s in range(S.classes.length-1, -1, -1):
1200
- if has_prop(S.classes[s], expr.name):
1201
- return S.classes[s][expr.name]
1202
-
1203
1223
  elif is_node_type(expr, AST_Dot):
1204
1224
  referenced_path = []
1205
1225
  # this one is for detecting classes inside modules and eventually nested classes
@@ -2134,6 +2154,26 @@ def create_parser_ctx(S, import_dirs, module_id, baselib_items, imported_module_
2134
2154
  'value': strings.join('')
2135
2155
  })
2136
2156
 
2157
+ def bytes_literal_():
2158
+ parts = v'[]'
2159
+ start = S.token
2160
+ while True:
2161
+ parts.push(S.token.value)
2162
+ if peek().type is not 'bytes_literal':
2163
+ break
2164
+ next()
2165
+ end_tok = S.token
2166
+ value = parts.join('')
2167
+ return new AST_Call({
2168
+ 'start': start,
2169
+ 'end': end_tok,
2170
+ 'expression': new AST_SymbolRef({'name': 'bytes', 'start': start, 'end': start}),
2171
+ 'args': [
2172
+ new AST_String({'start': start, 'end': end_tok, 'value': value}),
2173
+ new AST_String({'start': start, 'end': end_tok, 'value': 'latin-1'})
2174
+ ]
2175
+ })
2176
+
2137
2177
  def token_as_atom_node():
2138
2178
  tok = S.token
2139
2179
  tmp_ = tok.type
@@ -2145,8 +2185,20 @@ def create_parser_ctx(S, import_dirs, module_id, baselib_items, imported_module_
2145
2185
  'end': tok,
2146
2186
  'value': tok.value
2147
2187
  })
2188
+ elif tmp_ is "imaginary":
2189
+ return new AST_Call({
2190
+ 'start': tok,
2191
+ 'end': tok,
2192
+ 'expression': new AST_SymbolRef({'name': 'complex', 'start': tok, 'end': tok}),
2193
+ 'args': [
2194
+ new AST_Number({'start': tok, 'end': tok, 'value': 0}),
2195
+ new AST_Number({'start': tok, 'end': tok, 'value': tok.value})
2196
+ ]
2197
+ })
2148
2198
  elif tmp_ is "string":
2149
2199
  return string_()
2200
+ elif tmp_ is "bytes_literal":
2201
+ return bytes_literal_()
2150
2202
  elif tmp_ is "regexp":
2151
2203
  return new AST_RegExp({
2152
2204
  'start': tok,
@@ -2201,17 +2253,28 @@ def create_parser_ctx(S, import_dirs, module_id, baselib_items, imported_module_
2201
2253
  if is_('punc', ')'):
2202
2254
  next()
2203
2255
  return new AST_Array({'elements':[]})
2204
- ex = expression(True)
2256
+ ex = expression(False)
2205
2257
  if is_('keyword', 'for'):
2206
2258
  ret = read_comprehension(new AST_GeneratorComprehension({'statement': ex}), ')')
2207
2259
  S.in_parenthesized_expr = False
2208
2260
  return ret
2261
+ if is_('punc', ','):
2262
+ # Tuple literal: (a,) or (a, b, c, ...) — compile to JS array
2263
+ elements = [ex]
2264
+ while is_('punc', ','):
2265
+ next()
2266
+ if is_('punc', ')'):
2267
+ break # trailing comma is OK
2268
+ elements.push(expression(False))
2269
+ expect(')')
2270
+ arr = new AST_Array({'elements': elements, 'start': start, 'end': prev()})
2271
+ S.in_parenthesized_expr = False
2272
+ return subscripts(arr, allow_calls)
2209
2273
  ex.start = start
2210
2274
  ex.end = S.token
2211
2275
  if is_node_type(ex, AST_SymbolRef):
2212
2276
  ex.parens = True
2213
- if not is_node_type(ex, AST_GeneratorComprehension):
2214
- expect(")")
2277
+ expect(")")
2215
2278
  if is_node_type(ex, AST_UnaryPrefix):
2216
2279
  ex.parenthesized = True
2217
2280
  S.in_parenthesized_expr = False
@@ -3178,6 +3241,7 @@ def create_parser_ctx(S, import_dirs, module_id, baselib_items, imported_module_
3178
3241
  'right': right,
3179
3242
  'end': right.end,
3180
3243
  'overloaded': S.scoped_flags.get('overload_operators', False),
3244
+ 'strict_arith': S.scoped_flags.get('overload_operators', False) and S.scoped_flags.get('strict_arithmetic', True),
3181
3245
  'python_truthiness': S.scoped_flags.get('truthiness', False) and (op is '&&' or op is '||')
3182
3246
  })
3183
3247
  return expr_op(ret, min_prec, no_in)
@@ -3259,6 +3323,7 @@ def create_parser_ctx(S, import_dirs, module_id, baselib_items, imported_module_
3259
3323
  })
3260
3324
  if S.scoped_flags.get('overload_operators', False) and val is not '=':
3261
3325
  asgn.overloaded = True
3326
+ asgn.strict_arith = S.scoped_flags.get('strict_arithmetic', True)
3262
3327
  return asgn
3263
3328
  return left
3264
3329
 
@@ -3472,9 +3537,8 @@ def parse(text, options):
3472
3537
  'get': def (name, defval):
3473
3538
  for v'var i = this.stack.length - 1; i >= 0; i--':
3474
3539
  d = this.stack[i]
3475
- q = d[name]
3476
- if q:
3477
- return q
3540
+ if has_prop(d, name):
3541
+ return d[name]
3478
3542
  return defval
3479
3543
  ,
3480
3544
  'set': def (name, val): this.stack[-1][name] = val;,
@@ -1,72 +1,72 @@
1
- # vim:fileencoding=utf-8
2
- # License: BSD Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
3
- from __python__ import hash_literals
4
-
5
- def quoted_string(x):
6
- return '"' + x.replace(/\\/g, '\\\\').replace(/"/g, r'\"').replace(/\n/g, '\\n') + '"'
7
-
8
- def render_markup(markup):
9
- pos, key = 0, ''
10
- while pos < markup.length:
11
- ch = markup[pos]
12
- if ch is '!' or ch is ':':
13
- break
14
- key += ch
15
- pos += 1
16
- fmtspec = markup[pos:]
17
- prefix = ''
18
- if key.endsWith('='):
19
- prefix=key
20
- key = key[:-1]
21
- return 'ρσ_str.format("' + prefix + '{' + fmtspec + '}", ' + key + ')'
22
-
23
-
24
- def interpolate(template, raise_error):
25
- pos = in_brace = 0
26
- markup = ''
27
- ans = v'[""]'
28
- while pos < template.length:
29
- ch = template[pos]
30
- if in_brace:
31
- if ch is '{':
32
- in_brace += 1
33
- markup += '{'
34
- elif ch is '}':
35
- in_brace -= 1
36
- if in_brace > 0:
37
- markup += '}'
38
- else:
39
- ans.push(v'[markup]')
40
- ans.push('')
41
- else:
42
- markup += ch
43
- else:
44
- if ch is '{':
45
- if template[pos+1] is '{':
46
- pos += 1
47
- ans[-1] += '{'
48
- else:
49
- in_brace = 1
50
- markup = ''
51
- elif ch is '}':
52
- if template[pos+1] is '}':
53
- pos += 1
54
- ans[-1] += '}'
55
- else:
56
- raise_error("f-string: single '}' is not allowed")
57
- else:
58
- ans[-1] += ch
59
-
60
- pos += 1
61
-
62
- if in_brace:
63
- raise_error("expected '}' before end of string")
64
-
65
- if ans[-1] is '+':
66
- ans[-1] = ''
67
- for v'var i = 0; i < ans.length; i++':
68
- if jstype(ans[i]) is 'string':
69
- ans[i] = quoted_string(ans[i])
70
- else:
71
- ans[i] = '+' + render_markup.apply(this, ans[i]) + '+'
72
- return ans.join('')
1
+ # vim:fileencoding=utf-8
2
+ # License: BSD Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
3
+ from __python__ import hash_literals
4
+
5
+ def quoted_string(x):
6
+ return '"' + x.replace(/\\/g, '\\\\').replace(/"/g, r'\"').replace(/\n/g, '\\n') + '"'
7
+
8
+ def render_markup(markup):
9
+ pos, key = 0, ''
10
+ while pos < markup.length:
11
+ ch = markup[pos]
12
+ if ch is '!' or ch is ':':
13
+ break
14
+ key += ch
15
+ pos += 1
16
+ fmtspec = markup[pos:]
17
+ prefix = ''
18
+ if key.endsWith('='):
19
+ prefix=key
20
+ key = key[:-1]
21
+ return 'ρσ_str.format("' + prefix + '{' + fmtspec + '}", ' + key + ')'
22
+
23
+
24
+ def interpolate(template, raise_error):
25
+ pos = in_brace = 0
26
+ markup = ''
27
+ ans = v'[""]'
28
+ while pos < template.length:
29
+ ch = template[pos]
30
+ if in_brace:
31
+ if ch is '{':
32
+ in_brace += 1
33
+ markup += '{'
34
+ elif ch is '}':
35
+ in_brace -= 1
36
+ if in_brace > 0:
37
+ markup += '}'
38
+ else:
39
+ ans.push(v'[markup]')
40
+ ans.push('')
41
+ else:
42
+ markup += ch
43
+ else:
44
+ if ch is '{':
45
+ if template[pos+1] is '{':
46
+ pos += 1
47
+ ans[-1] += '{'
48
+ else:
49
+ in_brace = 1
50
+ markup = ''
51
+ elif ch is '}':
52
+ if template[pos+1] is '}':
53
+ pos += 1
54
+ ans[-1] += '}'
55
+ else:
56
+ raise_error("f-string: single '}' is not allowed")
57
+ else:
58
+ ans[-1] += ch
59
+
60
+ pos += 1
61
+
62
+ if in_brace:
63
+ raise_error("expected '}' before end of string")
64
+
65
+ if ans[-1] is '+':
66
+ ans[-1] = ''
67
+ for v'var i = 0; i < ans.length; i++':
68
+ if jstype(ans[i]) is 'string':
69
+ ans[i] = quoted_string(ans[i])
70
+ else:
71
+ ans[i] = '+' + render_markup.apply(this, ans[i]) + '+'
72
+ return ans.join('')
package/src/tokenizer.pyj CHANGED
@@ -90,7 +90,7 @@ KEYWORDS_ATOM = "False None True"
90
90
  # see https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Lexical_grammar
91
91
  RESERVED_WORDS = ("break case class catch const continue debugger default delete do else export extends"
92
92
  " finally for function if import in instanceof new return switch this throw try typeof var void"
93
- " while with yield enum implements static private package let public protected interface await null true false" )
93
+ " while with yield implements static private package let public protected interface await null true false" )
94
94
 
95
95
  KEYWORDS_BEFORE_EXPRESSION = "return yield new del raise elif else if"
96
96
 
@@ -104,7 +104,7 @@ IDENTIFIER_PAT = /^[a-z_$][_a-z0-9$]*$/i
104
104
 
105
105
  def is_string_modifier(val):
106
106
  for ch in val:
107
- if 'vrufVRUF'.indexOf(ch) is -1:
107
+ if 'vrufbVRUFB'.indexOf(ch) is -1:
108
108
  return False
109
109
  return True
110
110
 
@@ -363,6 +363,8 @@ def tokenizer(raw_text, filename):
363
363
  return False
364
364
  elif ch is '.':
365
365
  return (has_dot = True) if not has_dot and not has_x and not has_e else False
366
+ elif ch is 'j' or ch is 'J':
367
+ return False # imaginary suffix — stop here; handled after loop
366
368
  return is_alphanumeric_char(ch.charCodeAt(0))
367
369
  )
368
370
  if prefix:
@@ -370,6 +372,9 @@ def tokenizer(raw_text, filename):
370
372
 
371
373
  valid = parse_js_number(num)
372
374
  if not isNaN(valid):
375
+ if peek() is 'j' or peek() is 'J':
376
+ next()
377
+ return token("imaginary", valid)
373
378
  return token("num", valid)
374
379
  else:
375
380
  parse_error("Invalid syntax: " + num)
@@ -698,7 +703,7 @@ def tokenizer(raw_text, filename):
698
703
 
699
704
  if is_identifier_start(code):
700
705
  tok = read_word()
701
- if '\'"'.indexOf(peek()) is not -1 and is_string_modifier(tok.value):
706
+ if (peek() is "'" or peek() is '"') and is_string_modifier(tok.value):
702
707
  mods = tok.value.toLowerCase()
703
708
  start_pos_for_string = S.tokpos
704
709
  stok = read_string(mods.indexOf('r') is not -1, mods.indexOf('v') is not -1)
@@ -707,7 +712,7 @@ def tokenizer(raw_text, filename):
707
712
  tok.col += start_pos_for_string - tok.pos
708
713
  return handle_interpolated_string(stok.value, tok)
709
714
  tok.value = stok.value
710
- tok.type = stok.type
715
+ tok.type = 'bytes_literal' if mods.indexOf('b') is not -1 else stok.type
711
716
  return tok
712
717
 
713
718
  parse_error("Unexpected character «" + ch + "»")