terser 5.3.7 → 5.5.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/parse.js CHANGED
@@ -157,6 +157,9 @@ import {
157
157
  _PURE
158
158
  } from "./ast.js";
159
159
 
160
+ var LATEST_RAW = ""; // Only used for numbers and template strings
161
+ var LATEST_TEMPLATE_END = true;
162
+
160
163
  var KEYWORDS = "break case catch class const continue debugger default delete do else export extends finally for function if in instanceof let new return switch throw try typeof var void while with";
161
164
  var KEYWORDS_ATOM = "false null true";
162
165
  var RESERVED_WORDS = "enum implements import interface package private protected public static super this " + KEYWORDS_ATOM + " " + KEYWORDS;
@@ -212,6 +215,9 @@ var OPERATORS = makePredicate([
212
215
  "=",
213
216
  "+=",
214
217
  "-=",
218
+ "||=",
219
+ "&&=",
220
+ "??=",
215
221
  "/=",
216
222
  "*=",
217
223
  "**=",
@@ -309,12 +315,14 @@ function is_identifier_char(ch) {
309
315
  return UNICODE.ID_Continue.test(ch);
310
316
  }
311
317
 
318
+ const BASIC_IDENT = /^[a-z_$][a-z0-9_$]*$/i;
319
+
312
320
  function is_basic_identifier_string(str) {
313
- return /^[a-z_$][a-z0-9_$]*$/i.test(str);
321
+ return BASIC_IDENT.test(str);
314
322
  }
315
323
 
316
324
  function is_identifier_string(str, allow_surrogates) {
317
- if (/^[a-z_$][a-z0-9_$]*$/i.test(str)) {
325
+ if (BASIC_IDENT.test(str)) {
318
326
  return true;
319
327
  }
320
328
  if (!allow_surrogates && /[\ud800-\udfff]/.test(str)) {
@@ -472,29 +480,23 @@ function tokenizer($TEXT, filename, html5_comments, shebang) {
472
480
  } else if (!is_comment) {
473
481
  prev_was_dot = false;
474
482
  }
475
- var ret = {
476
- type : type,
477
- value : value,
478
- line : S.tokline,
479
- col : S.tokcol,
480
- pos : S.tokpos,
481
- endline : S.line,
482
- endcol : S.col,
483
- endpos : S.pos,
484
- nlb : S.newline_before,
485
- file : filename
486
- };
487
- if (/^(?:num|string|regexp)$/i.test(type)) {
488
- ret.raw = $TEXT.substring(ret.pos, ret.endpos);
489
- }
483
+ const line = S.tokline;
484
+ const col = S.tokcol;
485
+ const pos = S.tokpos;
486
+ const nlb = S.newline_before;
487
+ const file = filename;
488
+ let comments_before = [];
489
+ let comments_after = [];
490
+
490
491
  if (!is_comment) {
491
- ret.comments_before = S.comments_before;
492
- ret.comments_after = S.comments_before = [];
492
+ comments_before = S.comments_before;
493
+ comments_after = S.comments_before = [];
493
494
  }
494
495
  S.newline_before = false;
495
- ret = new AST_Token(ret);
496
- if (!is_comment) previous_token = ret;
497
- return ret;
496
+ const tok = new AST_Token(type, value, line, col, pos, nlb, comments_before, comments_after, file);
497
+
498
+ if (!is_comment) previous_token = tok;
499
+ return tok;
498
500
  }
499
501
 
500
502
  function skip_whitespace() {
@@ -546,6 +548,9 @@ function tokenizer($TEXT, filename, html5_comments, shebang) {
546
548
  return RE_NUM_LITERAL.test(ch);
547
549
  });
548
550
  if (prefix) num = prefix + num;
551
+
552
+ LATEST_RAW = num;
553
+
549
554
  if (RE_OCT_NUMBER.test(num) && next_token.has_directive("use strict")) {
550
555
  parse_error("Legacy octal literals are not allowed in strict mode");
551
556
  }
@@ -653,15 +658,17 @@ function tokenizer($TEXT, filename, html5_comments, shebang) {
653
658
  }
654
659
 
655
660
  var read_string = with_eof_error("Unterminated string constant", function() {
656
- var quote = next(), ret = "";
661
+ const start_pos = S.pos;
662
+ var quote = next(), ret = [];
657
663
  for (;;) {
658
664
  var ch = next(true, true);
659
665
  if (ch == "\\") ch = read_escaped_char(true, true);
660
666
  else if (ch == "\r" || ch == "\n") parse_error("Unterminated string constant");
661
667
  else if (ch == quote) break;
662
- ret += ch;
668
+ ret.push(ch);
663
669
  }
664
- var tok = token("string", ret);
670
+ var tok = token("string", ret.join(""));
671
+ LATEST_RAW = S.text.slice(start_pos, S.pos);
665
672
  tok.quote = quote;
666
673
  return tok;
667
674
  });
@@ -680,7 +687,8 @@ function tokenizer($TEXT, filename, html5_comments, shebang) {
680
687
  next(true, true);
681
688
  S.brace_counter++;
682
689
  tok = token(begin ? "template_head" : "template_substitution", content);
683
- tok.raw = raw;
690
+ LATEST_RAW = raw;
691
+ LATEST_TEMPLATE_END = false;
684
692
  return tok;
685
693
  }
686
694
 
@@ -696,8 +704,8 @@ function tokenizer($TEXT, filename, html5_comments, shebang) {
696
704
  }
697
705
  S.template_braces.pop();
698
706
  tok = token(begin ? "template_head" : "template_substitution", content);
699
- tok.raw = raw;
700
- tok.end = true;
707
+ LATEST_RAW = raw;
708
+ LATEST_TEMPLATE_END = true;
701
709
  return tok;
702
710
  });
703
711
 
@@ -730,7 +738,7 @@ function tokenizer($TEXT, filename, html5_comments, shebang) {
730
738
  });
731
739
 
732
740
  var read_name = with_eof_error("Unterminated identifier name", function() {
733
- var name, ch, escaped = false;
741
+ var name = [], ch, escaped = false;
734
742
  var read_escaped_identifier_char = function() {
735
743
  escaped = true;
736
744
  next();
@@ -741,17 +749,19 @@ function tokenizer($TEXT, filename, html5_comments, shebang) {
741
749
  };
742
750
 
743
751
  // Read first character (ID_Start)
744
- if ((name = peek()) === "\\") {
745
- name = read_escaped_identifier_char();
746
- if (!is_identifier_start(name)) {
752
+ if ((ch = peek()) === "\\") {
753
+ ch = read_escaped_identifier_char();
754
+ if (!is_identifier_start(ch)) {
747
755
  parse_error("First identifier char is an invalid identifier char");
748
756
  }
749
- } else if (is_identifier_start(name)) {
757
+ } else if (is_identifier_start(ch)) {
750
758
  next();
751
759
  } else {
752
760
  return "";
753
761
  }
754
762
 
763
+ name.push(ch);
764
+
755
765
  // Read ID_Continue
756
766
  while ((ch = peek()) != null) {
757
767
  if ((ch = peek()) === "\\") {
@@ -765,12 +775,13 @@ function tokenizer($TEXT, filename, html5_comments, shebang) {
765
775
  }
766
776
  next();
767
777
  }
768
- name += ch;
778
+ name.push(ch);
769
779
  }
770
- if (RESERVED_WORDS.has(name) && escaped) {
780
+ const name_str = name.join("");
781
+ if (RESERVED_WORDS.has(name_str) && escaped) {
771
782
  parse_error("Escaped characters are not allowed in keywords");
772
783
  }
773
- return name;
784
+ return name_str;
774
785
  });
775
786
 
776
787
  var read_regexp = with_eof_error("Unterminated regular expression", function(source) {
@@ -794,7 +805,7 @@ function tokenizer($TEXT, filename, html5_comments, shebang) {
794
805
  source += ch;
795
806
  }
796
807
  const flags = read_name();
797
- return token("regexp", { source, flags });
808
+ return token("regexp", "/" + source + "/" + flags);
798
809
  });
799
810
 
800
811
  function read_operator(prefix) {
@@ -987,7 +998,9 @@ var UNARY_PREFIX = makePredicate([
987
998
 
988
999
  var UNARY_POSTFIX = makePredicate([ "--", "++" ]);
989
1000
 
990
- var ASSIGNMENT = makePredicate([ "=", "+=", "-=", "/=", "*=", "**=", "%=", ">>=", "<<=", ">>>=", "|=", "^=", "&=" ]);
1001
+ var ASSIGNMENT = makePredicate([ "=", "+=", "-=", "??=", "&&=", "||=", "/=", "*=", "**=", "%=", ">>=", "<<=", ">>>=", "|=", "^=", "&=" ]);
1002
+
1003
+ var LOGICAL_ASSIGNMENT = makePredicate([ "??=", "&&=", "||=" ]);
991
1004
 
992
1005
  var PRECEDENCE = (function(a, ret) {
993
1006
  for (var i = 0; i < a.length; ++i) {
@@ -1025,7 +1038,7 @@ function parse($TEXT, options) {
1025
1038
  // Useful because comments_before property of call with parens outside
1026
1039
  // contains both comments inside and outside these parens. Used to find the
1027
1040
  // right #__PURE__ comments for an expression
1028
- const outer_comments_before_counts = new Map();
1041
+ const outer_comments_before_counts = new WeakMap();
1029
1042
 
1030
1043
  options = defaults(options, {
1031
1044
  bare_returns : false,
@@ -1159,7 +1172,7 @@ function parse($TEXT, options) {
1159
1172
  case "string":
1160
1173
  if (S.in_directives) {
1161
1174
  var token = peek();
1162
- if (!S.token.raw.includes("\\")
1175
+ if (!LATEST_RAW.includes("\\")
1163
1176
  && (is_token(token, "punc", ";")
1164
1177
  || is_token(token, "punc", "}")
1165
1178
  || has_newline_before(token)
@@ -2142,7 +2155,12 @@ function parse($TEXT, options) {
2142
2155
  ret = _make_symbol(AST_SymbolRef);
2143
2156
  break;
2144
2157
  case "num":
2145
- ret = new AST_Number({ start: tok, end: tok, value: tok.value });
2158
+ ret = new AST_Number({
2159
+ start: tok,
2160
+ end: tok,
2161
+ value: tok.value,
2162
+ raw: LATEST_RAW
2163
+ });
2146
2164
  break;
2147
2165
  case "big_int":
2148
2166
  ret = new AST_BigInt({ start: tok, end: tok, value: tok.value });
@@ -2156,7 +2174,9 @@ function parse($TEXT, options) {
2156
2174
  });
2157
2175
  break;
2158
2176
  case "regexp":
2159
- ret = new AST_RegExp({ start: tok, end: tok, value: tok.value });
2177
+ const [_, source, flags] = tok.value.match(/^\/(.*)\/(\w*)$/);
2178
+
2179
+ ret = new AST_RegExp({ start: tok, end: tok, value: { source, flags } });
2160
2180
  break;
2161
2181
  case "atom":
2162
2182
  switch (tok.value) {
@@ -2313,7 +2333,7 @@ function parse($TEXT, options) {
2313
2333
  return subscripts(cls, allow_calls);
2314
2334
  }
2315
2335
  if (is("template_head")) {
2316
- return subscripts(template_string(false), allow_calls);
2336
+ return subscripts(template_string(), allow_calls);
2317
2337
  }
2318
2338
  if (ATOMIC_START_TOKEN.has(S.token.type)) {
2319
2339
  return subscripts(as_atom_node(), allow_calls);
@@ -2326,22 +2346,19 @@ function parse($TEXT, options) {
2326
2346
 
2327
2347
  segments.push(new AST_TemplateSegment({
2328
2348
  start: S.token,
2329
- raw: S.token.raw,
2349
+ raw: LATEST_RAW,
2330
2350
  value: S.token.value,
2331
2351
  end: S.token
2332
2352
  }));
2333
- while (!S.token.end) {
2353
+
2354
+ while (!LATEST_TEMPLATE_END) {
2334
2355
  next();
2335
2356
  handle_regexp();
2336
2357
  segments.push(expression(true));
2337
2358
 
2338
- if (!is_token("template_substitution")) {
2339
- unexpected();
2340
- }
2341
-
2342
2359
  segments.push(new AST_TemplateSegment({
2343
2360
  start: S.token,
2344
- raw: S.token.raw,
2361
+ raw: LATEST_RAW,
2345
2362
  value: S.token.value,
2346
2363
  end: S.token
2347
2364
  }));
@@ -2435,6 +2452,7 @@ function parse($TEXT, options) {
2435
2452
  left: value,
2436
2453
  operator: "=",
2437
2454
  right: expression(false),
2455
+ logical: false,
2438
2456
  end: prev()
2439
2457
  });
2440
2458
  }
@@ -3017,7 +3035,7 @@ function parse($TEXT, options) {
3017
3035
  return subscripts(new AST_PrefixedTemplateString({
3018
3036
  start: start,
3019
3037
  prefix: expr,
3020
- template_string: template_string(true),
3038
+ template_string: template_string(),
3021
3039
  end: prev()
3022
3040
  }), allow_calls);
3023
3041
  }
@@ -3203,11 +3221,13 @@ function parse($TEXT, options) {
3203
3221
  if (is("operator") && ASSIGNMENT.has(val)) {
3204
3222
  if (is_assignable(left) || (left = to_destructuring(left)) instanceof AST_Destructuring) {
3205
3223
  next();
3224
+
3206
3225
  return new AST_Assign({
3207
3226
  start : start,
3208
3227
  left : left,
3209
3228
  operator : val,
3210
3229
  right : maybe_assign(no_in),
3230
+ logical : LOGICAL_ASSIGNMENT.has(val),
3211
3231
  end : prev()
3212
3232
  });
3213
3233
  }
package/package.json CHANGED
@@ -4,9 +4,9 @@
4
4
  "homepage": "https://terser.org",
5
5
  "author": "Mihai Bazon <mihai.bazon@gmail.com> (http://lisperator.net/)",
6
6
  "license": "BSD-2-Clause",
7
- "version": "5.3.7",
7
+ "version": "5.5.1",
8
8
  "engines": {
9
- "node": "^10.0.0 || ^11.0.0 || ^12.0.0 || >=14.0.0"
9
+ "node": ">=10"
10
10
  },
11
11
  "maintainers": [
12
12
  "Fábio Santos <fabiosantosart@gmail.com>"
@@ -16,16 +16,15 @@
16
16
  "type": "module",
17
17
  "module": "./main.js",
18
18
  "exports": {
19
- ".": {
20
- "import": "./main.js",
21
- "require": "./dist/bundle.min.js"
22
- },
23
- "./package": {
24
- "default": "./package.json"
25
- },
26
- "./package.json": {
27
- "default": "./package.json"
28
- }
19
+ ".": [
20
+ {
21
+ "import": "./main.js",
22
+ "require": "./dist/bundle.min.js"
23
+ },
24
+ "./dist/bundle.min.js"
25
+ ],
26
+ "./package": "./package.json",
27
+ "./package.json": "./package.json"
29
28
  },
30
29
  "types": "tools/terser.d.ts",
31
30
  "bin": {