pkgprn 0.3.1 → 0.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +51 -9
- package/index.d.ts +1 -0
- package/index.d.ts.map +1 -1
- package/index.js +15 -0
- package/package.json +4 -3
- package/prune.js +54 -2
- package/strip-comments.js +731 -0
package/README.md
CHANGED
|
@@ -58,18 +58,20 @@ Additional optional features can be enabled via flags:
|
|
|
58
58
|
|
|
59
59
|
- **Flatten** dist directories into the package root.
|
|
60
60
|
- **Remove sourcemaps** and their `//# sourceMappingURL=` references.
|
|
61
|
+
- **Strip comments** from JavaScript files, with automatic sourcemap line-mapping adjustment.
|
|
61
62
|
|
|
62
63
|
## Options
|
|
63
64
|
|
|
64
|
-
| Flag | Type | Default | Description
|
|
65
|
-
| --------------------- | ------------------- | --------- |
|
|
66
|
-
| `--profile` | `string` | `library` | Script-retention profile (`library` or `app`).
|
|
67
|
-
| `--flatten` | `string \| boolean` | `false` | Flatten dist directories to the package root. Pass without a value to auto-detect, or provide comma-separated directory names.
|
|
68
|
-
| `--remove-sourcemaps` | `boolean` | `false` | Delete `.map` files and strip `sourceMappingURL` comments from source files.
|
|
69
|
-
| `--
|
|
70
|
-
| `--
|
|
71
|
-
| `--
|
|
72
|
-
| `--
|
|
65
|
+
| Flag | Type | Default | Description |
|
|
66
|
+
| --------------------- | ------------------- | --------- | --------------------------------------------------------------------------------------------------------------------------------- |
|
|
67
|
+
| `--profile` | `string` | `library` | Script-retention profile (`library` or `app`). |
|
|
68
|
+
| `--flatten` | `string \| boolean` | `false` | Flatten dist directories to the package root. Pass without a value to auto-detect, or provide comma-separated directory names. |
|
|
69
|
+
| `--remove-sourcemaps` | `boolean` | `false` | Delete `.map` files and strip `sourceMappingURL` comments from source files. |
|
|
70
|
+
| `--strip-comments` | `string \| boolean` | `false` | Strip comments from JS files. Pass without a value to strip all, or provide comma-separated types: `jsdoc`, `license`, `regular`. |
|
|
71
|
+
| `--optimize-files` | `boolean` | `true` | Optimize the `files` array by collapsing entries. |
|
|
72
|
+
| `--cleanup-files` | `boolean` | `true` | Remove files not listed in the `files` array. |
|
|
73
|
+
| `--version` | | | Show version number. |
|
|
74
|
+
| `--help` | | | Show help message. |
|
|
73
75
|
|
|
74
76
|
## Profiles
|
|
75
77
|
|
|
@@ -124,6 +126,32 @@ pkgprn --flatten dist,lib
|
|
|
124
126
|
7. Updates the `files` array.
|
|
125
127
|
8. Cleans up any leftover export-map stub directories that only contain a `package.json`.
|
|
126
128
|
|
|
129
|
+
## Comment Stripping
|
|
130
|
+
|
|
131
|
+
The `--strip-comments` flag removes comments from `.js`, `.mjs`, and `.cjs` files. You can target specific comment types or strip them all at once.
|
|
132
|
+
|
|
133
|
+
### Usage
|
|
134
|
+
|
|
135
|
+
```sh
|
|
136
|
+
pkgprn --strip-comments # strip all comments
|
|
137
|
+
pkgprn --strip-comments jsdoc # strip only JSDoc comments
|
|
138
|
+
pkgprn --strip-comments license,regular # strip license and regular comments
|
|
139
|
+
```
|
|
140
|
+
|
|
141
|
+
### Comment types
|
|
142
|
+
|
|
143
|
+
| Type | Description |
|
|
144
|
+
| --------- | -------------------------------------------------- |
|
|
145
|
+
| `jsdoc` | `/** … */` documentation comments |
|
|
146
|
+
| `license` | Comments containing "license", "copyright", or "©" |
|
|
147
|
+
| `regular` | All other `//` and `/* … */` comments |
|
|
148
|
+
|
|
149
|
+
Passing `--strip-comments` without a value (or with `all`) strips every type.
|
|
150
|
+
|
|
151
|
+
### Sourcemap adjustment
|
|
152
|
+
|
|
153
|
+
When comments are stripped, line numbers in the affected files change. If any `.d.ts.map` files reference a stripped JS file in their `sources`, `pkgprn` automatically rewrites the sourcemap `mappings` so that line numbers stay correct. This ensures that declaration-map "Go to Definition" navigation continues to point to the right lines after comment removal.
|
|
154
|
+
|
|
127
155
|
## Examples
|
|
128
156
|
|
|
129
157
|
### Basic library
|
|
@@ -156,6 +184,19 @@ After packing, `build` and `test` are removed; `devDependencies` and `packageMan
|
|
|
156
184
|
|
|
157
185
|
After packing, `dist/index.js` becomes `index.js`, `main` points to `index.js`, and the `dist` directory is gone.
|
|
158
186
|
|
|
187
|
+
### Library with comment stripping
|
|
188
|
+
|
|
189
|
+
```json
|
|
190
|
+
{
|
|
191
|
+
"scripts": {
|
|
192
|
+
"build": "tsc",
|
|
193
|
+
"prepack": "pkgprn --strip-comments jsdoc"
|
|
194
|
+
}
|
|
195
|
+
}
|
|
196
|
+
```
|
|
197
|
+
|
|
198
|
+
After packing, all JSDoc comments are removed from JS files and any `.d.ts.map` sourcemaps are adjusted to reflect the new line numbers.
|
|
199
|
+
|
|
159
200
|
### Application with sourcemap removal
|
|
160
201
|
|
|
161
202
|
```sh
|
|
@@ -185,6 +226,7 @@ await prunePkg(
|
|
|
185
226
|
profile: "library",
|
|
186
227
|
flatten: false,
|
|
187
228
|
removeSourcemaps: false,
|
|
229
|
+
stripComments: false, // or "all", "jsdoc", "license,regular", etc.
|
|
188
230
|
optimizeFiles: true,
|
|
189
231
|
cleanupFiles: true,
|
|
190
232
|
},
|
package/index.d.ts
CHANGED
package/index.d.ts.map
CHANGED
package/index.js
CHANGED
|
@@ -43,6 +43,11 @@ try {
|
|
|
43
43
|
description: 'remove sourcemaps',
|
|
44
44
|
default: false,
|
|
45
45
|
},
|
|
46
|
+
stripComments: {
|
|
47
|
+
type: StripCommentsParam,
|
|
48
|
+
description: 'strip comments: all (default), jsdoc, license, regular (comma-separated)',
|
|
49
|
+
default: false,
|
|
50
|
+
},
|
|
46
51
|
optimizeFiles: {
|
|
47
52
|
type: Boolean,
|
|
48
53
|
description: 'optimize files array',
|
|
@@ -99,6 +104,16 @@ async function writePackage(pkg) {
|
|
|
99
104
|
await writeFile('./package.json', `${JSON.stringify(pkg, null, 2)}\n`);
|
|
100
105
|
}
|
|
101
106
|
|
|
107
|
+
/**
|
|
108
|
+
* @param {string | false} value
|
|
109
|
+
*/
|
|
110
|
+
function StripCommentsParam(value) {
|
|
111
|
+
if (value === '') {
|
|
112
|
+
return 'all';
|
|
113
|
+
}
|
|
114
|
+
return value;
|
|
115
|
+
}
|
|
116
|
+
|
|
102
117
|
/**
|
|
103
118
|
* @param {string | false} value
|
|
104
119
|
*/
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "pkgprn",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.4.1",
|
|
4
4
|
"license": "MIT",
|
|
5
5
|
"author": "Konstantin Shutkin",
|
|
6
6
|
"bin": "./index.js",
|
|
@@ -31,8 +31,9 @@
|
|
|
31
31
|
},
|
|
32
32
|
"main": "prune.js",
|
|
33
33
|
"dependencies": {
|
|
34
|
+
"@jridgewell/sourcemap-codec": "^1.5.5",
|
|
35
|
+
"@niceties/logger": "^1.1.13",
|
|
34
36
|
"cleye": "2.2.1",
|
|
35
|
-
"jsonata": "^2.1.0"
|
|
36
|
-
"@niceties/logger": "^1.1.13"
|
|
37
|
+
"jsonata": "^2.1.0"
|
|
37
38
|
}
|
|
38
39
|
}
|
package/prune.js
CHANGED
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
import { access, mkdir, readdir, readFile, rename, rm, stat, writeFile } from 'node:fs/promises';
|
|
2
2
|
import path from 'node:path';
|
|
3
3
|
|
|
4
|
+
import { adjustSourcemapLineMappings, isStrippableFile, parseCommentTypes, stripCommentsWithLineMap } from './strip-comments.js';
|
|
5
|
+
|
|
4
6
|
/**
|
|
5
7
|
* Files always included by npm regardless of the `files` array.
|
|
6
8
|
* README & LICENSE/LICENCE are matched case-insensitively by basename (without extension).
|
|
@@ -55,6 +57,7 @@ const hardIgnored = new Set(['.git', '.npmrc', 'node_modules', 'package-lock.jso
|
|
|
55
57
|
* @property {string} profile
|
|
56
58
|
* @property {string|boolean} flatten
|
|
57
59
|
* @property {boolean} removeSourcemaps
|
|
60
|
+
* @property {string|boolean} stripComments
|
|
58
61
|
* @property {boolean} optimizeFiles
|
|
59
62
|
* @property {boolean} cleanupFiles
|
|
60
63
|
*/
|
|
@@ -117,6 +120,54 @@ export async function prunePkg(pkg, options, logger) {
|
|
|
117
120
|
}
|
|
118
121
|
}
|
|
119
122
|
|
|
123
|
+
if (options.stripComments) {
|
|
124
|
+
const typesToStrip = parseCommentTypes(/** @type {string | true} */ (options.stripComments));
|
|
125
|
+
logger.update('stripping comments...');
|
|
126
|
+
const allFiles = await walkDir('.', ['node_modules']);
|
|
127
|
+
const jsFiles = allFiles.filter(isStrippableFile);
|
|
128
|
+
const dtsMapFiles = allFiles.filter(f => f.endsWith('.d.ts.map'));
|
|
129
|
+
|
|
130
|
+
// Strip comments from JS files and collect line maps keyed by file path.
|
|
131
|
+
/** @type {Map<string, Int32Array>} */
|
|
132
|
+
const lineMaps = new Map();
|
|
133
|
+
for (const file of jsFiles) {
|
|
134
|
+
const content = await readFile(file, 'utf8');
|
|
135
|
+
const { result: stripped, lineMap } = stripCommentsWithLineMap(content, typesToStrip);
|
|
136
|
+
if (lineMap) {
|
|
137
|
+
await writeFile(file, stripped, 'utf8');
|
|
138
|
+
lineMaps.set(path.normalize(file), lineMap);
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
// Adjust .d.ts.map files that reference any of the stripped JS files.
|
|
143
|
+
if (lineMaps.size > 0 && dtsMapFiles.length > 0) {
|
|
144
|
+
for (const mapFile of dtsMapFiles) {
|
|
145
|
+
const mapContent = await readFile(mapFile, 'utf8');
|
|
146
|
+
let map;
|
|
147
|
+
try {
|
|
148
|
+
map = JSON.parse(mapContent);
|
|
149
|
+
} catch {
|
|
150
|
+
continue;
|
|
151
|
+
}
|
|
152
|
+
if (map.version !== 3 || !Array.isArray(map.sources)) continue;
|
|
153
|
+
|
|
154
|
+
const mapDir = path.dirname(mapFile) || '.';
|
|
155
|
+
let adjusted = false;
|
|
156
|
+
for (let si = 0; si < map.sources.length; si++) {
|
|
157
|
+
const resolved = path.normalize(path.join(mapDir, map.sourceRoot || '', map.sources[si]));
|
|
158
|
+
const lineMap = lineMaps.get(resolved);
|
|
159
|
+
if (lineMap) {
|
|
160
|
+
adjustSourcemapLineMappings(map, si, lineMap);
|
|
161
|
+
adjusted = true;
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
if (adjusted) {
|
|
165
|
+
await writeFile(mapFile, `${JSON.stringify(map, null, '\t')}\n`, 'utf8');
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
}
|
|
170
|
+
|
|
120
171
|
if (pkg.files && Array.isArray(pkg.files) && options.optimizeFiles) {
|
|
121
172
|
const filterFiles = getAlwaysIncludedFiles(pkg);
|
|
122
173
|
|
|
@@ -532,7 +583,7 @@ async function adjustSourcemapPaths(newMapPath, oldMapPath, oldToNew) {
|
|
|
532
583
|
delete map.sourceRoot;
|
|
533
584
|
}
|
|
534
585
|
|
|
535
|
-
await writeFile(newMapPath, JSON.stringify(map, null, 2)
|
|
586
|
+
await writeFile(newMapPath, `${JSON.stringify(map, null, 2)}\n`, 'utf8');
|
|
536
587
|
}
|
|
537
588
|
|
|
538
589
|
/**
|
|
@@ -541,7 +592,8 @@ async function adjustSourcemapPaths(newMapPath, oldMapPath, oldToNew) {
|
|
|
541
592
|
* @returns {boolean}
|
|
542
593
|
*/
|
|
543
594
|
function isSubDirectory(parent, child) {
|
|
544
|
-
|
|
595
|
+
const rel = path.relative(parent, child);
|
|
596
|
+
return rel !== '' && !rel.startsWith('..');
|
|
545
597
|
}
|
|
546
598
|
|
|
547
599
|
/**
|
|
@@ -0,0 +1,731 @@
|
|
|
1
|
+
import { decode, encode } from '@jridgewell/sourcemap-codec';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* @typedef {'jsdoc' | 'license' | 'regular'} CommentType
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* @typedef {Object} CommentRange
|
|
9
|
+
* @property {number} start - Start index in source (inclusive)
|
|
10
|
+
* @property {number} end - End index in source (exclusive)
|
|
11
|
+
* @property {CommentType} type - Classification of the comment
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* @typedef {Object} StripResult
|
|
16
|
+
* @property {string} result - The stripped source text
|
|
17
|
+
* @property {Int32Array | null} lineMap - Maps 0-based original line → 0-based new line (-1 if removed). null when nothing changed.
|
|
18
|
+
*/
|
|
19
|
+
|
|
20
|
+
const jsExtensions = ['.js', '.mjs', '.cjs'];
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* Check if a file path has a JS extension that may contain comments.
|
|
24
|
+
* @param {string} file
|
|
25
|
+
* @returns {boolean}
|
|
26
|
+
*/
|
|
27
|
+
export function isStrippableFile(file) {
|
|
28
|
+
return jsExtensions.some(ext => file.endsWith(ext));
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
/**
|
|
32
|
+
* Keywords after which a `/` token begins a regex literal rather than division.
|
|
33
|
+
*/
|
|
34
|
+
const regexPrecedingKeywords = new Set([
|
|
35
|
+
'return',
|
|
36
|
+
'throw',
|
|
37
|
+
'typeof',
|
|
38
|
+
'void',
|
|
39
|
+
'delete',
|
|
40
|
+
'new',
|
|
41
|
+
'in',
|
|
42
|
+
'instanceof',
|
|
43
|
+
'case',
|
|
44
|
+
'yield',
|
|
45
|
+
'await',
|
|
46
|
+
'of',
|
|
47
|
+
'export',
|
|
48
|
+
'import',
|
|
49
|
+
'default',
|
|
50
|
+
'extends',
|
|
51
|
+
'else',
|
|
52
|
+
]);
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* Classify a block comment based on its content.
|
|
56
|
+
* Priority: license > jsdoc > regular.
|
|
57
|
+
*
|
|
58
|
+
* @param {string} source - Full source text
|
|
59
|
+
* @param {number} start - Start index of the comment (at `/`)
|
|
60
|
+
* @param {number} end - End index of the comment (after `*/`)
|
|
61
|
+
* @returns {CommentType}
|
|
62
|
+
*/
|
|
63
|
+
function classifyBlockComment(source, start, end) {
|
|
64
|
+
// License: starts with /*! or contains @license or @preserve
|
|
65
|
+
if (source[start + 2] === '!') {
|
|
66
|
+
return 'license';
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
// Check for @license or @preserve inside the comment body
|
|
70
|
+
const body = source.slice(start + 2, end - 2);
|
|
71
|
+
if (body.includes('@license') || body.includes('@preserve')) {
|
|
72
|
+
return 'license';
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
// JSDoc: starts with /** (and is not the degenerate /**/ which is length 4)
|
|
76
|
+
if (source[start + 2] === '*' && end - start > 4) {
|
|
77
|
+
return 'jsdoc';
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
return 'regular';
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
/**
|
|
84
|
+
* Scan source code and return an array of comment ranges with their types.
|
|
85
|
+
* Correctly handles:
|
|
86
|
+
* - Single and double quoted strings (with escapes)
|
|
87
|
+
* - Template literals (with nested `${…}` expressions, arbitrarily deep)
|
|
88
|
+
* - Regular expression literals (with character classes `[…]`)
|
|
89
|
+
* - Hashbang lines (`#!/…`)
|
|
90
|
+
* - Single-line comments (`// …`)
|
|
91
|
+
* - Block comments (`/* … */`)
|
|
92
|
+
*
|
|
93
|
+
* @param {string} source
|
|
94
|
+
* @returns {CommentRange[]}
|
|
95
|
+
*/
|
|
96
|
+
export function scanComments(source) {
|
|
97
|
+
/** @type {CommentRange[]} */
|
|
98
|
+
const comments = [];
|
|
99
|
+
const len = source.length;
|
|
100
|
+
let i = 0;
|
|
101
|
+
|
|
102
|
+
// Stack for template literal nesting.
|
|
103
|
+
// Each entry holds the brace depth inside a `${…}` expression.
|
|
104
|
+
// When the stack is non-empty the main loop is inside a template expression.
|
|
105
|
+
/** @type {number[]} */
|
|
106
|
+
const templateStack = [];
|
|
107
|
+
|
|
108
|
+
// For regex-vs-division disambiguation we track whether the last
|
|
109
|
+
// *significant* (non-whitespace, non-comment) token could be the end
|
|
110
|
+
// of an expression. If it could, `/` is the division operator;
|
|
111
|
+
// otherwise `/` starts a regex literal.
|
|
112
|
+
let exprEnd = false;
|
|
113
|
+
|
|
114
|
+
// --- Hashbang ----------------------------------------------------------
|
|
115
|
+
if (len >= 2 && source[0] === '#' && source[1] === '!') {
|
|
116
|
+
// Skip the entire hashbang line — it is never a comment.
|
|
117
|
+
while (i < len && source[i] !== '\n') i++;
|
|
118
|
+
// exprEnd stays false (hashbang is like the start of the file)
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
while (i < len) {
|
|
122
|
+
const ch = source.charCodeAt(i);
|
|
123
|
+
|
|
124
|
+
// ---- whitespace (skip, preserve exprEnd) --------------------------
|
|
125
|
+
// space, tab, newline, carriage return, vertical tab, form feed,
|
|
126
|
+
// BOM / NBSP (0xFEFF, 0x00A0) – we keep it simple: anything ≤ 0x20
|
|
127
|
+
// plus the two common Unicode whitespace chars.
|
|
128
|
+
if (ch <= 0x20 || ch === 0xfeff || ch === 0xa0) {
|
|
129
|
+
i++;
|
|
130
|
+
continue;
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
// ---- single-line comment ------------------------------------------
|
|
134
|
+
if (ch === 0x2f /* / */ && i + 1 < len && source.charCodeAt(i + 1) === 0x2f /* / */) {
|
|
135
|
+
const start = i;
|
|
136
|
+
i += 2;
|
|
137
|
+
while (i < len && source.charCodeAt(i) !== 0x0a /* \n */) i++;
|
|
138
|
+
comments.push({ start, end: i, type: 'regular' });
|
|
139
|
+
// exprEnd unchanged (comments are transparent)
|
|
140
|
+
continue;
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
// ---- block comment ------------------------------------------------
|
|
144
|
+
if (ch === 0x2f /* / */ && i + 1 < len && source.charCodeAt(i + 1) === 0x2a /* * */) {
|
|
145
|
+
const start = i;
|
|
146
|
+
i += 2;
|
|
147
|
+
while (i < len && !((source.charCodeAt(i) === 0x2a /* * */ && i + 1 < len && source.charCodeAt(i + 1) === 0x2f) /* / */)) {
|
|
148
|
+
i++;
|
|
149
|
+
}
|
|
150
|
+
if (i < len) i += 2; // skip closing */
|
|
151
|
+
comments.push({ start, end: i, type: classifyBlockComment(source, start, i) });
|
|
152
|
+
// exprEnd unchanged
|
|
153
|
+
continue;
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
// ---- regex literal ------------------------------------------------
|
|
157
|
+
if (ch === 0x2f /* / */ && !exprEnd) {
|
|
158
|
+
i = skipRegex(source, i, len);
|
|
159
|
+
exprEnd = true; // a regex is a value
|
|
160
|
+
continue;
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
// ---- single-quoted string ----------------------------------------
|
|
164
|
+
if (ch === 0x27 /* ' */) {
|
|
165
|
+
i = skipSingleString(source, i, len);
|
|
166
|
+
exprEnd = true;
|
|
167
|
+
continue;
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
// ---- double-quoted string ----------------------------------------
|
|
171
|
+
if (ch === 0x22 /* " */) {
|
|
172
|
+
i = skipDoubleString(source, i, len);
|
|
173
|
+
exprEnd = true;
|
|
174
|
+
continue;
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
// ---- template literal --------------------------------------------
|
|
178
|
+
if (ch === 0x60 /* ` */) {
|
|
179
|
+
i = scanTemplateTail(source, i + 1, len, templateStack, comments);
|
|
180
|
+
exprEnd = true;
|
|
181
|
+
continue;
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
// ---- closing brace: may end a template expression ----------------
|
|
185
|
+
if (ch === 0x7d /* } */) {
|
|
186
|
+
if (templateStack.length > 0) {
|
|
187
|
+
const depth = templateStack[templateStack.length - 1];
|
|
188
|
+
if (depth === 0) {
|
|
189
|
+
// Returning from a template expression back to the template body.
|
|
190
|
+
templateStack.pop();
|
|
191
|
+
i = scanTemplateTail(source, i + 1, len, templateStack, comments);
|
|
192
|
+
exprEnd = true;
|
|
193
|
+
continue;
|
|
194
|
+
}
|
|
195
|
+
templateStack[templateStack.length - 1] = depth - 1;
|
|
196
|
+
}
|
|
197
|
+
i++;
|
|
198
|
+
// After `}` we conservatively assume regex can follow.
|
|
199
|
+
// This is correct for block statements, if/for/while bodies,
|
|
200
|
+
// class bodies, etc. For the rare `({}) / x` pattern it would
|
|
201
|
+
// misidentify division as regex, but that is harmless for
|
|
202
|
+
// comment detection (we just skip over the "regex" body).
|
|
203
|
+
exprEnd = false;
|
|
204
|
+
continue;
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
// ---- opening brace -----------------------------------------------
|
|
208
|
+
if (ch === 0x7b /* { */) {
|
|
209
|
+
if (templateStack.length > 0) {
|
|
210
|
+
templateStack[templateStack.length - 1]++;
|
|
211
|
+
}
|
|
212
|
+
i++;
|
|
213
|
+
exprEnd = false;
|
|
214
|
+
continue;
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
// ---- identifier / keyword / number --------------------------------
|
|
218
|
+
if (isIdentStart(ch) || isDigit(ch)) {
|
|
219
|
+
const wordStart = i;
|
|
220
|
+
i++;
|
|
221
|
+
while (i < len && isIdentPart(source.charCodeAt(i))) i++;
|
|
222
|
+
const word = source.slice(wordStart, i);
|
|
223
|
+
exprEnd = !regexPrecedingKeywords.has(word);
|
|
224
|
+
continue;
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
// ---- ++ and -- ----------------------------------------------------
|
|
228
|
+
if ((ch === 0x2b /* + */ || ch === 0x2d) /* - */ && i + 1 < len && source.charCodeAt(i + 1) === ch) {
|
|
229
|
+
i += 2;
|
|
230
|
+
exprEnd = true; // `x++` / `x--` end an expression
|
|
231
|
+
continue;
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
// ---- closing brackets ) ] ----------------------------------------
|
|
235
|
+
if (ch === 0x29 /* ) */ || ch === 0x5d /* ] */) {
|
|
236
|
+
i++;
|
|
237
|
+
exprEnd = true;
|
|
238
|
+
continue;
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
// ---- everything else: operators, punctuation ----------------------
|
|
242
|
+
i++;
|
|
243
|
+
exprEnd = false;
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
return comments;
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
// ---------------------------------------------------------------------------
|
|
250
|
+
// Character classification helpers
|
|
251
|
+
// ---------------------------------------------------------------------------
|
|
252
|
+
|
|
253
|
+
/**
|
|
254
|
+
* @param {number} ch - char code
|
|
255
|
+
* @returns {boolean}
|
|
256
|
+
*/
|
|
257
|
+
function isDigit(ch) {
|
|
258
|
+
return ch >= 0x30 && ch <= 0x39; // 0-9
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
/**
|
|
262
|
+
* @param {number} ch - char code
|
|
263
|
+
* @returns {boolean}
|
|
264
|
+
*/
|
|
265
|
+
function isIdentStart(ch) {
|
|
266
|
+
return (
|
|
267
|
+
(ch >= 0x41 && ch <= 0x5a) || // A-Z
|
|
268
|
+
(ch >= 0x61 && ch <= 0x7a) || // a-z
|
|
269
|
+
ch === 0x5f || // _
|
|
270
|
+
ch === 0x24 || // $
|
|
271
|
+
ch === 0x5c || // \ (unicode escape in identifier)
|
|
272
|
+
ch > 0x7f // non-ASCII (simplified – covers all Unicode ID_Start)
|
|
273
|
+
);
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
/**
|
|
277
|
+
* @param {number} ch - char code
|
|
278
|
+
* @returns {boolean}
|
|
279
|
+
*/
|
|
280
|
+
function isIdentPart(ch) {
|
|
281
|
+
return isIdentStart(ch) || isDigit(ch);
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
// ---------------------------------------------------------------------------
|
|
285
|
+
// Skip helpers — each returns the new index *after* the construct.
|
|
286
|
+
// ---------------------------------------------------------------------------
|
|
287
|
+
|
|
288
|
+
/**
|
|
289
|
+
* Skip a single-quoted string starting at index `i` (which points at the
|
|
290
|
+
* opening `'`). Returns the index after the closing `'`.
|
|
291
|
+
* @param {string} s
|
|
292
|
+
* @param {number} i
|
|
293
|
+
* @param {number} len
|
|
294
|
+
* @returns {number}
|
|
295
|
+
*/
|
|
296
|
+
function skipSingleString(s, i, len) {
|
|
297
|
+
i++; // skip opening '
|
|
298
|
+
while (i < len) {
|
|
299
|
+
const ch = s.charCodeAt(i);
|
|
300
|
+
if (ch === 0x27 /* ' */) {
|
|
301
|
+
i++;
|
|
302
|
+
break;
|
|
303
|
+
}
|
|
304
|
+
if (ch === 0x5c /* \ */) {
|
|
305
|
+
i += 2;
|
|
306
|
+
continue;
|
|
307
|
+
} // escape
|
|
308
|
+
if (ch === 0x0a /* \n */ || ch === 0x0d /* \r */) break; // unterminated
|
|
309
|
+
i++;
|
|
310
|
+
}
|
|
311
|
+
return i;
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
/**
|
|
315
|
+
* Skip a double-quoted string starting at index `i` (which points at the
|
|
316
|
+
* opening `"`). Returns the index after the closing `"`.
|
|
317
|
+
* @param {string} s
|
|
318
|
+
* @param {number} i
|
|
319
|
+
* @param {number} len
|
|
320
|
+
* @returns {number}
|
|
321
|
+
*/
|
|
322
|
+
function skipDoubleString(s, i, len) {
|
|
323
|
+
i++; // skip opening "
|
|
324
|
+
while (i < len) {
|
|
325
|
+
const ch = s.charCodeAt(i);
|
|
326
|
+
if (ch === 0x22 /* " */) {
|
|
327
|
+
i++;
|
|
328
|
+
break;
|
|
329
|
+
}
|
|
330
|
+
if (ch === 0x5c /* \ */) {
|
|
331
|
+
i += 2;
|
|
332
|
+
continue;
|
|
333
|
+
}
|
|
334
|
+
if (ch === 0x0a || ch === 0x0d) break; // unterminated
|
|
335
|
+
i++;
|
|
336
|
+
}
|
|
337
|
+
return i;
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
/**
|
|
341
|
+
* Skip a regex literal starting at index `i` (which points at the opening `/`).
|
|
342
|
+
* Handles character classes `[…]` and escape sequences.
|
|
343
|
+
* Returns the index after the closing `/` and any flags.
|
|
344
|
+
* @param {string} s
|
|
345
|
+
* @param {number} i
|
|
346
|
+
* @param {number} len
|
|
347
|
+
* @returns {number}
|
|
348
|
+
*/
|
|
349
|
+
function skipRegex(s, i, len) {
|
|
350
|
+
i++; // skip opening /
|
|
351
|
+
while (i < len) {
|
|
352
|
+
const ch = s.charCodeAt(i);
|
|
353
|
+
if (ch === 0x5c /* \ */) {
|
|
354
|
+
i += 2; // skip escaped char
|
|
355
|
+
continue;
|
|
356
|
+
}
|
|
357
|
+
if (ch === 0x5b /* [ */) {
|
|
358
|
+
// character class — `]` inside does not end the regex
|
|
359
|
+
i++;
|
|
360
|
+
while (i < len) {
|
|
361
|
+
const cc = s.charCodeAt(i);
|
|
362
|
+
if (cc === 0x5c /* \ */) {
|
|
363
|
+
i += 2;
|
|
364
|
+
continue;
|
|
365
|
+
}
|
|
366
|
+
if (cc === 0x5d /* ] */) {
|
|
367
|
+
i++;
|
|
368
|
+
break;
|
|
369
|
+
}
|
|
370
|
+
if (cc === 0x0a || cc === 0x0d) break; // safety: unterminated
|
|
371
|
+
i++;
|
|
372
|
+
}
|
|
373
|
+
continue;
|
|
374
|
+
}
|
|
375
|
+
if (ch === 0x2f /* / */) {
|
|
376
|
+
i++; // skip closing /
|
|
377
|
+
// consume flags: [a-z] (dgimsvy…)
|
|
378
|
+
while (i < len && isRegexFlag(s.charCodeAt(i))) i++;
|
|
379
|
+
break;
|
|
380
|
+
}
|
|
381
|
+
if (ch === 0x0a || ch === 0x0d) break; // unterminated on this line
|
|
382
|
+
i++;
|
|
383
|
+
}
|
|
384
|
+
return i;
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
/**
|
|
388
|
+
* @param {number} ch
|
|
389
|
+
* @returns {boolean}
|
|
390
|
+
*/
|
|
391
|
+
function isRegexFlag(ch) {
|
|
392
|
+
return ch >= 0x61 && ch <= 0x7a; // a-z
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
/**
|
|
396
|
+
* Scan the body of a template literal starting *after* the opening `` ` ``
|
|
397
|
+
* (or after the `}` that closes a template expression).
|
|
398
|
+
*
|
|
399
|
+
* If we hit `${`, we push onto `templateStack` and return to the main loop
|
|
400
|
+
* so that the expression is parsed as normal code (which may contain
|
|
401
|
+
* comments, nested templates, etc.).
|
|
402
|
+
*
|
|
403
|
+
* If we hit the closing `` ` ``, we return and the template is done.
|
|
404
|
+
*
|
|
405
|
+
* @param {string} s
|
|
406
|
+
* @param {number} i - index right after the `` ` `` or `}`
|
|
407
|
+
* @param {number} len
|
|
408
|
+
* @param {number[]} templateStack
|
|
409
|
+
* @param {CommentRange[]} comments - passed through so inner comments are recorded
|
|
410
|
+
* @returns {number} new index
|
|
411
|
+
*/
|
|
412
|
+
function scanTemplateTail(s, i, len, templateStack, comments) {
|
|
413
|
+
void comments; // comments only found inside ${} which returns to main loop
|
|
414
|
+
while (i < len) {
|
|
415
|
+
const ch = s.charCodeAt(i);
|
|
416
|
+
if (ch === 0x5c /* \ */) {
|
|
417
|
+
i += 2; // skip escape sequence
|
|
418
|
+
continue;
|
|
419
|
+
}
|
|
420
|
+
if (ch === 0x60 /* ` */) {
|
|
421
|
+
i++; // closing backtick
|
|
422
|
+
return i;
|
|
423
|
+
}
|
|
424
|
+
if (ch === 0x24 /* $ */ && i + 1 < len && s.charCodeAt(i + 1) === 0x7b /* { */) {
|
|
425
|
+
i += 2; // skip ${
|
|
426
|
+
templateStack.push(0); // push new brace depth for this expression
|
|
427
|
+
return i; // return to main loop for expression parsing
|
|
428
|
+
}
|
|
429
|
+
i++;
|
|
430
|
+
}
|
|
431
|
+
return i;
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
// ---------------------------------------------------------------------------
|
|
435
|
+
// Public API
|
|
436
|
+
// ---------------------------------------------------------------------------
|
|
437
|
+
|
|
438
|
+
/**
|
|
439
|
+
* Parse the `--strip-comments` flag value into a `Set` of comment types.
|
|
440
|
+
*
|
|
441
|
+
* - `'all'` or `true` → `{'jsdoc', 'license', 'regular'}`
|
|
442
|
+
* - `'jsdoc,regular'` → `{'jsdoc', 'regular'}`
|
|
443
|
+
*
|
|
444
|
+
* @param {string | true} value
|
|
445
|
+
* @returns {Set<CommentType>}
|
|
446
|
+
*/
|
|
447
|
+
export function parseCommentTypes(value) {
|
|
448
|
+
if (value === true || value === 'all') {
|
|
449
|
+
return new Set(/** @type {CommentType[]} */ (['jsdoc', 'license', 'regular']));
|
|
450
|
+
}
|
|
451
|
+
|
|
452
|
+
const valid = /** @type {CommentType[]} */ (['jsdoc', 'license', 'regular']);
|
|
453
|
+
const parts = String(value)
|
|
454
|
+
.split(',')
|
|
455
|
+
.map(s => s.trim())
|
|
456
|
+
.filter(Boolean);
|
|
457
|
+
|
|
458
|
+
/** @type {Set<CommentType>} */
|
|
459
|
+
const result = new Set();
|
|
460
|
+
|
|
461
|
+
for (const part of parts) {
|
|
462
|
+
if (part === 'all') {
|
|
463
|
+
return new Set(valid);
|
|
464
|
+
}
|
|
465
|
+
if (!valid.includes(/** @type {CommentType} */ (part))) {
|
|
466
|
+
throw new Error(`unknown comment type "${part}" (expected: ${valid.join(', ')}, all)`);
|
|
467
|
+
}
|
|
468
|
+
result.add(/** @type {CommentType} */ (part));
|
|
469
|
+
}
|
|
470
|
+
|
|
471
|
+
if (result.size === 0) {
|
|
472
|
+
return new Set(valid); // fallback to all
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
return result;
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
/**
|
|
479
|
+
* Strip comments from `source` whose type is in `typesToStrip`.
|
|
480
|
+
*
|
|
481
|
+
* @param {string} source
|
|
482
|
+
* @param {Set<CommentType>} typesToStrip
|
|
483
|
+
* @returns {string}
|
|
484
|
+
*/
|
|
485
|
+
export function stripComments(source, typesToStrip) {
|
|
486
|
+
return stripCommentsWithLineMap(source, typesToStrip).result;
|
|
487
|
+
}
|
|
488
|
+
|
|
489
|
+
/**
|
|
490
|
+
* Strip comments and return both the stripped source and a line map that
|
|
491
|
+
* tracks where each original line ended up in the output.
|
|
492
|
+
*
|
|
493
|
+
* @param {string} source
|
|
494
|
+
* @param {Set<CommentType>} typesToStrip
|
|
495
|
+
* @returns {StripResult}
|
|
496
|
+
*/
|
|
497
|
+
export function stripCommentsWithLineMap(source, typesToStrip) {
|
|
498
|
+
const comments = scanComments(source);
|
|
499
|
+
|
|
500
|
+
if (comments.length === 0) return { result: source, lineMap: null };
|
|
501
|
+
|
|
502
|
+
// Filter to only the comments we want to remove.
|
|
503
|
+
const toRemove = comments.filter(c => typesToStrip.has(c.type));
|
|
504
|
+
|
|
505
|
+
if (toRemove.length === 0) return { result: source, lineMap: null };
|
|
506
|
+
|
|
507
|
+
// Build output by copying non-removed ranges.
|
|
508
|
+
/** @type {string[]} */
|
|
509
|
+
const parts = [];
|
|
510
|
+
let pos = 0;
|
|
511
|
+
|
|
512
|
+
for (const { start, end } of toRemove) {
|
|
513
|
+
if (start > pos) {
|
|
514
|
+
parts.push(source.slice(pos, start));
|
|
515
|
+
}
|
|
516
|
+
pos = end;
|
|
517
|
+
}
|
|
518
|
+
|
|
519
|
+
if (pos < source.length) {
|
|
520
|
+
parts.push(source.slice(pos));
|
|
521
|
+
}
|
|
522
|
+
|
|
523
|
+
let intermediate = parts.join('');
|
|
524
|
+
|
|
525
|
+
// --- Build original-line → intermediate-line mapping -------------------
|
|
526
|
+
// For every original offset, compute how many bytes were removed before it.
|
|
527
|
+
// Then convert original line-start offsets to intermediate offsets and
|
|
528
|
+
// derive intermediate line numbers.
|
|
529
|
+
|
|
530
|
+
const origLines = source.split('\n');
|
|
531
|
+
const origLineCount = origLines.length;
|
|
532
|
+
|
|
533
|
+
// Build sorted prefix-sum of removed byte counts for fast lookup.
|
|
534
|
+
// removedBefore(offset) = total chars removed in ranges fully before offset.
|
|
535
|
+
// We also detect if an offset falls inside a removed range.
|
|
536
|
+
|
|
537
|
+
/**
|
|
538
|
+
* Translate an original offset to an intermediate offset.
|
|
539
|
+
* Returns -1 if the offset is inside a removed range.
|
|
540
|
+
* @param {number} offset
|
|
541
|
+
* @returns {number}
|
|
542
|
+
*/
|
|
543
|
+
function translateOffset(offset) {
|
|
544
|
+
let removed = 0;
|
|
545
|
+
for (const { start, end } of toRemove) {
|
|
546
|
+
if (offset < start) break;
|
|
547
|
+
if (offset < end) return -1; // inside removed range
|
|
548
|
+
removed += end - start;
|
|
549
|
+
}
|
|
550
|
+
return offset - removed;
|
|
551
|
+
}
|
|
552
|
+
|
|
553
|
+
// For each original line, figure out which intermediate line it maps to.
|
|
554
|
+
// An original line maps to -1 if its entire non-whitespace content was
|
|
555
|
+
// inside removed ranges (i.e. the line becomes blank/whitespace-only).
|
|
556
|
+
const intermediateText = intermediate;
|
|
557
|
+
const intermediateLineStarts = buildLineStarts(intermediateText);
|
|
558
|
+
|
|
559
|
+
/** @type {Int32Array} */
|
|
560
|
+
const origToIntermediate = new Int32Array(origLineCount).fill(-1);
|
|
561
|
+
let origOffset = 0;
|
|
562
|
+
for (let oi = 0; oi < origLineCount; oi++) {
|
|
563
|
+
const lineLen = origLines[oi].length;
|
|
564
|
+
// Check if any content on this line survives.
|
|
565
|
+
// We try the line-start offset; if it's inside a removed range
|
|
566
|
+
// the whole beginning is gone, but content may survive later.
|
|
567
|
+
// The most reliable way: translate the offset of each non-WS char.
|
|
568
|
+
let survived = false;
|
|
569
|
+
for (let ci = 0; ci < lineLen; ci++) {
|
|
570
|
+
const ch = source.charCodeAt(origOffset + ci);
|
|
571
|
+
// skip whitespace chars — they don't count as surviving content
|
|
572
|
+
if (ch === 0x20 || ch === 0x09 || ch === 0x0d) continue;
|
|
573
|
+
const mapped = translateOffset(origOffset + ci);
|
|
574
|
+
if (mapped !== -1) {
|
|
575
|
+
survived = true;
|
|
576
|
+
// Convert intermediate offset to intermediate line number.
|
|
577
|
+
origToIntermediate[oi] = offsetToLine(intermediateLineStarts, mapped);
|
|
578
|
+
break;
|
|
579
|
+
}
|
|
580
|
+
}
|
|
581
|
+
if (!survived) {
|
|
582
|
+
origToIntermediate[oi] = -1;
|
|
583
|
+
}
|
|
584
|
+
origOffset += lineLen + 1; // +1 for the '\n' (split removed it)
|
|
585
|
+
}
|
|
586
|
+
|
|
587
|
+
// --- Apply cleanup (same logic as before) ------------------------------
|
|
588
|
+
|
|
589
|
+
// Trim trailing whitespace from every line.
|
|
590
|
+
intermediate = intermediate.replace(/[ \t]+$/gm, '');
|
|
591
|
+
|
|
592
|
+
// Collapse 3+ consecutive newlines into 2 newlines.
|
|
593
|
+
intermediate = intermediate.replace(/\n{3,}/g, '\n\n');
|
|
594
|
+
|
|
595
|
+
// Remove leading blank lines (preserve hashbang).
|
|
596
|
+
if (intermediate.startsWith('#!')) {
|
|
597
|
+
const hashbangEnd = intermediate.indexOf('\n');
|
|
598
|
+
if (hashbangEnd !== -1) {
|
|
599
|
+
const before = intermediate.slice(0, hashbangEnd + 1);
|
|
600
|
+
const after = intermediate.slice(hashbangEnd + 1).replace(/^\n+/, '');
|
|
601
|
+
intermediate = before + after;
|
|
602
|
+
}
|
|
603
|
+
} else {
|
|
604
|
+
intermediate = intermediate.replace(/^\n+/, '');
|
|
605
|
+
}
|
|
606
|
+
|
|
607
|
+
// Ensure the file ends with exactly one newline (if it originally did).
|
|
608
|
+
if (source.endsWith('\n') && intermediate.length > 0) {
|
|
609
|
+
intermediate = intermediate.replace(/\n*$/, '\n');
|
|
610
|
+
}
|
|
611
|
+
|
|
612
|
+
const result = intermediate;
|
|
613
|
+
|
|
614
|
+
// --- Build intermediate-line → final-line mapping ----------------------
|
|
615
|
+
// The cleanup may have removed/collapsed lines from the intermediateText.
|
|
616
|
+
// We line up intermediateText lines with final lines by content matching.
|
|
617
|
+
const finalLines = result.split('\n');
|
|
618
|
+
const intLines = intermediateText.split('\n');
|
|
619
|
+
|
|
620
|
+
// Trim trailing WS from intermediate lines to match what cleanup did.
|
|
621
|
+
const intLinesTrimmed = intLines.map(l => l.replace(/[ \t]+$/, ''));
|
|
622
|
+
|
|
623
|
+
/** @type {Int32Array} */
|
|
624
|
+
const intermediateToFinal = new Int32Array(intLines.length).fill(-1);
|
|
625
|
+
let fi = 0;
|
|
626
|
+
for (let ii = 0; ii < intLinesTrimmed.length && fi < finalLines.length; ii++) {
|
|
627
|
+
if (intLinesTrimmed[ii] === finalLines[fi]) {
|
|
628
|
+
intermediateToFinal[ii] = fi;
|
|
629
|
+
fi++;
|
|
630
|
+
}
|
|
631
|
+
// else: this intermediate line was removed by cleanup → stays -1
|
|
632
|
+
}
|
|
633
|
+
|
|
634
|
+
// --- Compose: original → intermediate → final --------------------------
|
|
635
|
+
/** @type {Int32Array} */
|
|
636
|
+
const lineMap = new Int32Array(origLineCount).fill(-1);
|
|
637
|
+
for (let oi = 0; oi < origLineCount; oi++) {
|
|
638
|
+
const il = origToIntermediate[oi];
|
|
639
|
+
if (il >= 0 && il < intermediateToFinal.length) {
|
|
640
|
+
lineMap[oi] = intermediateToFinal[il];
|
|
641
|
+
}
|
|
642
|
+
}
|
|
643
|
+
|
|
644
|
+
return { result, lineMap };
|
|
645
|
+
}
|
|
646
|
+
|
|
647
|
+
/**
|
|
648
|
+
* Build an array of line-start offsets for the given text.
|
|
649
|
+
* `result[i]` is the char offset where line `i` begins (0-based lines).
|
|
650
|
+
* @param {string} text
|
|
651
|
+
* @returns {number[]}
|
|
652
|
+
*/
|
|
653
|
+
function buildLineStarts(text) {
|
|
654
|
+
/** @type {number[]} */
|
|
655
|
+
const starts = [0];
|
|
656
|
+
for (let i = 0; i < text.length; i++) {
|
|
657
|
+
if (text.charCodeAt(i) === 0x0a) {
|
|
658
|
+
starts.push(i + 1);
|
|
659
|
+
}
|
|
660
|
+
}
|
|
661
|
+
return starts;
|
|
662
|
+
}
|
|
663
|
+
|
|
664
|
+
/**
|
|
665
|
+
* Given sorted line-start offsets, find which line a char offset falls on.
|
|
666
|
+
* @param {number[]} lineStarts
|
|
667
|
+
* @param {number} offset
|
|
668
|
+
* @returns {number} 0-based line number
|
|
669
|
+
*/
|
|
670
|
+
function offsetToLine(lineStarts, offset) {
|
|
671
|
+
// Binary search for the last lineStart <= offset.
|
|
672
|
+
let lo = 0;
|
|
673
|
+
let hi = lineStarts.length - 1;
|
|
674
|
+
while (lo < hi) {
|
|
675
|
+
const mid = (lo + hi + 1) >>> 1;
|
|
676
|
+
if (lineStarts[mid] <= offset) {
|
|
677
|
+
lo = mid;
|
|
678
|
+
} else {
|
|
679
|
+
hi = mid - 1;
|
|
680
|
+
}
|
|
681
|
+
}
|
|
682
|
+
return lo;
|
|
683
|
+
}
|
|
684
|
+
|
|
685
|
+
/**
|
|
686
|
+
* Adjust a parsed sourcemap (v3) whose `sources` reference a file that had
|
|
687
|
+
* comments stripped. Updates the original-line numbers in `mappings` for
|
|
688
|
+
* segments that point at the given source index.
|
|
689
|
+
*
|
|
690
|
+
* Segments whose original line maps to -1 (i.e. the line was removed) are
|
|
691
|
+
* dropped from the output.
|
|
692
|
+
*
|
|
693
|
+
* @param {{ version: number, mappings: string, sources?: string[], names?: string[], [k: string]: unknown }} map - Parsed sourcemap object (mutated in place).
|
|
694
|
+
* @param {number} sourceIndex - Index in `map.sources` of the stripped file.
|
|
695
|
+
* @param {Int32Array} lineMap - 0-based original line → 0-based new line (-1 if removed).
|
|
696
|
+
*/
|
|
697
|
+
export function adjustSourcemapLineMappings(map, sourceIndex, lineMap) {
|
|
698
|
+
if (map.version !== 3 || typeof map.mappings !== 'string') return;
|
|
699
|
+
|
|
700
|
+
const decoded = decode(map.mappings);
|
|
701
|
+
|
|
702
|
+
for (const line of decoded) {
|
|
703
|
+
// Walk backwards so we can splice without index issues.
|
|
704
|
+
for (let si = line.length - 1; si >= 0; si--) {
|
|
705
|
+
const seg = line[si];
|
|
706
|
+
// Segments with < 4 fields have no source mapping.
|
|
707
|
+
if (seg.length < 4) continue;
|
|
708
|
+
const seg4 = /** @type {[number, number, number, number, ...number[]]} */ (seg);
|
|
709
|
+
// Only adjust segments pointing at the stripped source file.
|
|
710
|
+
if (seg4[1] !== sourceIndex) continue;
|
|
711
|
+
|
|
712
|
+
const origLine = seg4[2]; // 0-based
|
|
713
|
+
if (origLine < 0 || origLine >= lineMap.length) {
|
|
714
|
+
// Out of range — drop it.
|
|
715
|
+
line.splice(si, 1);
|
|
716
|
+
continue;
|
|
717
|
+
}
|
|
718
|
+
|
|
719
|
+
const newLine = lineMap[origLine];
|
|
720
|
+
if (newLine === -1) {
|
|
721
|
+
// The line was removed — drop this segment.
|
|
722
|
+
line.splice(si, 1);
|
|
723
|
+
continue;
|
|
724
|
+
}
|
|
725
|
+
|
|
726
|
+
seg4[2] = newLine;
|
|
727
|
+
}
|
|
728
|
+
}
|
|
729
|
+
|
|
730
|
+
map.mappings = encode(decoded);
|
|
731
|
+
}
|