@percy/core 1.31.13 → 1.31.14-beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -0
- package/dist/api.js +2 -2
- package/dist/browser.js +29 -3
- package/dist/cache/byte-lru.js +352 -0
- package/dist/config.js +7 -0
- package/dist/discovery.js +170 -29
- package/dist/lock.js +215 -0
- package/dist/network.js +53 -10
- package/dist/percy.js +128 -10
- package/dist/server.js +66 -3
- package/dist/utils.js +7 -3
- package/package.json +10 -10
- package/test/helpers/index.js +10 -0
package/README.md
CHANGED
|
@@ -53,6 +53,7 @@ The following options can also be defined within a Percy config file
|
|
|
53
53
|
- `requestHeaders` — Request headers used when discovering snapshot assets
|
|
54
54
|
- `authorization` — Basic auth `username` and `password` for protected snapshot assets
|
|
55
55
|
- `disableCache` — Disable asset caching (**default** `false`)
|
|
56
|
+
- `maxCacheRam` — Cap the asset-discovery cache at this many MB (**default** unset/unbounded). When set, least-recently-used resources are evicted to stay within the cap. MB is decimal (1 MB = 1,000,000 bytes), not binary MiB (1,048,576). The cap measures cache body bytes only; process RSS is typically 1.5–2× the cap due to Node's Buffer slab allocator. Values below 25 MB are clamped to 25 MB with a warn log (the per-resource ceiling is 25 MB, so smaller caps would reject every resource). Also settable via the `--max-cache-ram <MB>` CLI flag or the `PERCY_MAX_CACHE_RAM` env var
|
|
56
57
|
- `userAgent` — Custom user-agent string used when requesting assets
|
|
57
58
|
- `cookies` — Browser cookies to use when requesting assets
|
|
58
59
|
- `networkIdleTimeout` — Milliseconds to wait for the network to idle (**default** `100`)
|
package/dist/api.js
CHANGED
|
@@ -259,7 +259,7 @@ export function createPercyServer(percy, port) {
|
|
|
259
259
|
if (cmd === 'reset') {
|
|
260
260
|
// the reset command will reset testing mode and clear any logs
|
|
261
261
|
percy.testing = {};
|
|
262
|
-
logger.instance.
|
|
262
|
+
logger.instance.reset();
|
|
263
263
|
} else if (cmd === 'version') {
|
|
264
264
|
// the version command will update the api version header for testing
|
|
265
265
|
percy.testing.version = body;
|
|
@@ -304,7 +304,7 @@ export function createPercyServer(percy, port) {
|
|
|
304
304
|
}))
|
|
305
305
|
// returns an array of raw logs from the logger
|
|
306
306
|
.route('get', '/test/logs', (req, res) => res.json(200, {
|
|
307
|
-
logs:
|
|
307
|
+
logs: logger.instance.query(() => true)
|
|
308
308
|
}))
|
|
309
309
|
// serves a very basic html page for testing snapshots
|
|
310
310
|
.route('get', '/test/snapshot', (req, res) => {
|
package/dist/browser.js
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import fs from 'fs';
|
|
2
2
|
import os from 'os';
|
|
3
3
|
import path from 'path';
|
|
4
|
+
import { execFileSync } from 'child_process';
|
|
4
5
|
import spawn from 'cross-spawn';
|
|
5
6
|
import EventEmitter from 'events';
|
|
6
7
|
import WebSocket from 'ws';
|
|
@@ -200,11 +201,36 @@ export class Browser extends EventEmitter {
|
|
|
200
201
|
/* istanbul ignore next:
|
|
201
202
|
* difficult to test failure here without mocking private properties */
|
|
202
203
|
if ((_this$process = this.process) !== null && _this$process !== void 0 && _this$process.pid && !this.process.killed) {
|
|
203
|
-
//
|
|
204
|
+
// Force-close the entire browser process tree, not just the lead
|
|
205
|
+
// pid. Chromium spawns
|
|
206
|
+
// renderer/utility/zygote children; targeting only the lead pid
|
|
207
|
+
// (the previous behavior) leaked them on every kill.
|
|
208
|
+
//
|
|
209
|
+
// Convention matches Puppeteer / Playwright: shell out to
|
|
210
|
+
// `taskkill /T /F` on Windows; on POSIX the spawn at line ~266
|
|
211
|
+
// sets `detached: true` so child.pid === pgid and a negative
|
|
212
|
+
// pid signals the entire process group.
|
|
204
213
|
try {
|
|
205
|
-
|
|
214
|
+
if (process.platform === 'win32') {
|
|
215
|
+
// Use execFileSync (no shell) so the pid argument is passed
|
|
216
|
+
// directly without interpolation — defense-in-depth against
|
|
217
|
+
// any future drift where this.process.pid isn't a clean int.
|
|
218
|
+
execFileSync('taskkill', ['/pid', String(this.process.pid), '/T', '/F'], {
|
|
219
|
+
stdio: 'ignore'
|
|
220
|
+
});
|
|
221
|
+
} else {
|
|
222
|
+
process.kill(-this.process.pid, 'SIGKILL');
|
|
223
|
+
}
|
|
206
224
|
} catch (error) {
|
|
207
|
-
|
|
225
|
+
// taskkill returns 128 if the process is already gone; the
|
|
226
|
+
// POSIX branch may also throw ESRCH for the same reason. Fall
|
|
227
|
+
// back to the lead-pid kill so a missing process doesn't
|
|
228
|
+
// wedge `_closed`.
|
|
229
|
+
try {
|
|
230
|
+
this.process.kill('SIGKILL');
|
|
231
|
+
} catch (fallbackErr) {
|
|
232
|
+
throw new Error(`Unable to close the browser: ${error.stack}`);
|
|
233
|
+
}
|
|
208
234
|
}
|
|
209
235
|
}
|
|
210
236
|
|
|
@@ -0,0 +1,352 @@
|
|
|
1
|
+
function _classPrivateMethodInitSpec(e, a) { _checkPrivateRedeclaration(e, a), a.add(e); }
|
|
2
|
+
function _classPrivateFieldInitSpec(e, t, a) { _checkPrivateRedeclaration(e, t), t.set(e, a); }
|
|
3
|
+
function _checkPrivateRedeclaration(e, t) { if (t.has(e)) throw new TypeError("Cannot initialize the same private elements twice on an object"); }
|
|
4
|
+
function _classPrivateFieldGet(s, a) { return s.get(_assertClassBrand(s, a)); }
|
|
5
|
+
function _classPrivateFieldSet(s, a, r) { return s.set(_assertClassBrand(s, a), r), r; }
|
|
6
|
+
function _assertClassBrand(e, t, n) { if ("function" == typeof e ? e === t : e.has(t)) return arguments.length < 3 ? t : n; throw new TypeError("Private element is not present on this object"); }
|
|
7
|
+
// Two-tier cache used by asset discovery:
|
|
8
|
+
// ByteLRU — byte-budget in-memory LRU; Map insertion order = LRU order.
|
|
9
|
+
// DiskSpillStore — on-disk overflow tier. RAM evictions spill here; lookups
|
|
10
|
+
// fall back to disk before refetching from origin.
|
|
11
|
+
// All operations are synchronous; callers (network intercept, ByteLRU.set)
|
|
12
|
+
// cannot yield to the event loop mid-op. Per-entry size is capped at 25MB
|
|
13
|
+
// upstream so disk I/O latency is bounded.
|
|
14
|
+
|
|
15
|
+
import fs from 'fs';
|
|
16
|
+
import os from 'os';
|
|
17
|
+
import path from 'path';
|
|
18
|
+
import crypto from 'crypto';
|
|
19
|
+
const DEFAULT_PER_ENTRY_OVERHEAD = 512;
|
|
20
|
+
export class ByteLRU {
|
|
21
|
+
#map = new Map();
|
|
22
|
+
#bytes = 0;
|
|
23
|
+
#max;
|
|
24
|
+
#stats = {
|
|
25
|
+
hits: 0,
|
|
26
|
+
misses: 0,
|
|
27
|
+
evictions: 0,
|
|
28
|
+
peakBytes: 0
|
|
29
|
+
};
|
|
30
|
+
onEvict;
|
|
31
|
+
constructor(maxBytes, {
|
|
32
|
+
onEvict
|
|
33
|
+
} = {}) {
|
|
34
|
+
this.#max = maxBytes;
|
|
35
|
+
this.onEvict = onEvict;
|
|
36
|
+
}
|
|
37
|
+
get(key) {
|
|
38
|
+
if (!this.#map.has(key)) {
|
|
39
|
+
this.#stats.misses++;
|
|
40
|
+
return undefined;
|
|
41
|
+
}
|
|
42
|
+
const rec = this.#map.get(key);
|
|
43
|
+
this.#map.delete(key);
|
|
44
|
+
this.#map.set(key, rec);
|
|
45
|
+
this.#stats.hits++;
|
|
46
|
+
return rec.value;
|
|
47
|
+
}
|
|
48
|
+
set(key, value, size) {
|
|
49
|
+
if (!Number.isFinite(size) || size < 0) return false;
|
|
50
|
+
|
|
51
|
+
// Reject oversize BEFORE touching any existing entry — a failed set on an
|
|
52
|
+
// existing key must not evict the prior (valid) entry.
|
|
53
|
+
if (this.#max !== undefined && size > this.#max) {
|
|
54
|
+
if (this.onEvict) this.onEvict(key, 'too-big', value);
|
|
55
|
+
return false;
|
|
56
|
+
}
|
|
57
|
+
if (this.#map.has(key)) {
|
|
58
|
+
this.#bytes -= this.#map.get(key).size;
|
|
59
|
+
this.#map.delete(key);
|
|
60
|
+
}
|
|
61
|
+
this.#map.set(key, {
|
|
62
|
+
value,
|
|
63
|
+
size
|
|
64
|
+
});
|
|
65
|
+
this.#bytes += size;
|
|
66
|
+
if (this.#bytes > this.#stats.peakBytes) this.#stats.peakBytes = this.#bytes;
|
|
67
|
+
while (this.#max !== undefined && this.#bytes > this.#max) {
|
|
68
|
+
const oldestKey = this.#map.keys().next().value;
|
|
69
|
+
const rec = this.#map.get(oldestKey);
|
|
70
|
+
this.#bytes -= rec.size;
|
|
71
|
+
this.#map.delete(oldestKey);
|
|
72
|
+
this.#stats.evictions++;
|
|
73
|
+
if (this.onEvict) this.onEvict(oldestKey, 'lru', rec.value);
|
|
74
|
+
}
|
|
75
|
+
return true;
|
|
76
|
+
}
|
|
77
|
+
has(key) {
|
|
78
|
+
return this.#map.has(key);
|
|
79
|
+
}
|
|
80
|
+
delete(key) {
|
|
81
|
+
if (!this.#map.has(key)) return false;
|
|
82
|
+
this.#bytes -= this.#map.get(key).size;
|
|
83
|
+
return this.#map.delete(key);
|
|
84
|
+
}
|
|
85
|
+
clear() {
|
|
86
|
+
this.#map.clear();
|
|
87
|
+
this.#bytes = 0;
|
|
88
|
+
}
|
|
89
|
+
get size() {
|
|
90
|
+
return this.#map.size;
|
|
91
|
+
}
|
|
92
|
+
get calculatedSize() {
|
|
93
|
+
return this.#bytes;
|
|
94
|
+
}
|
|
95
|
+
get stats() {
|
|
96
|
+
return {
|
|
97
|
+
...this.#stats,
|
|
98
|
+
currentBytes: this.#bytes
|
|
99
|
+
};
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
// Returns the byte length of a resource's content. Buffer.byteLength is used
|
|
104
|
+
// for strings so that multi-byte UTF-8 (CJK, emoji) is counted in bytes, not
|
|
105
|
+
// JS string units, otherwise the cache budget can drift past its cap.
|
|
106
|
+
function contentBytes(content) {
|
|
107
|
+
if (content == null) return 0;
|
|
108
|
+
if (Buffer.isBuffer(content)) return content.length;
|
|
109
|
+
if (typeof content === 'string') return Buffer.byteLength(content);
|
|
110
|
+
return content.length ?? 0;
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
// Handles the two Percy cache-entry shapes: single resource, or array of
|
|
114
|
+
// roots captured at multiple widths (see discovery.js parseDomResources).
|
|
115
|
+
export function entrySize(resource, overhead = DEFAULT_PER_ENTRY_OVERHEAD) {
|
|
116
|
+
if (Array.isArray(resource)) {
|
|
117
|
+
return resource.reduce((n, r) => n + contentBytes(r === null || r === void 0 ? void 0 : r.content) + overhead, 0);
|
|
118
|
+
}
|
|
119
|
+
return contentBytes(resource === null || resource === void 0 ? void 0 : resource.content) + overhead;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
// Multi-width root arrays carry per-element binary content. Encode buffers as
|
|
123
|
+
// base64 inside JSON so the whole array survives a disk roundtrip; null and
|
|
124
|
+
// string content pass through as themselves.
|
|
125
|
+
function encodeArrayElement(r) {
|
|
126
|
+
if (!r) return r;
|
|
127
|
+
const {
|
|
128
|
+
content,
|
|
129
|
+
...rest
|
|
130
|
+
} = r;
|
|
131
|
+
if (content == null) return {
|
|
132
|
+
...rest,
|
|
133
|
+
content: null
|
|
134
|
+
};
|
|
135
|
+
if (Buffer.isBuffer(content)) return {
|
|
136
|
+
...rest,
|
|
137
|
+
content: {
|
|
138
|
+
__buf: content.toString('base64')
|
|
139
|
+
}
|
|
140
|
+
};
|
|
141
|
+
return {
|
|
142
|
+
...rest,
|
|
143
|
+
content: String(content)
|
|
144
|
+
};
|
|
145
|
+
}
|
|
146
|
+
function decodeArrayElement(r) {
|
|
147
|
+
if (!r) return r;
|
|
148
|
+
const {
|
|
149
|
+
content,
|
|
150
|
+
...rest
|
|
151
|
+
} = r;
|
|
152
|
+
if (content && typeof content === 'object' && '__buf' in content) {
|
|
153
|
+
return {
|
|
154
|
+
...rest,
|
|
155
|
+
content: Buffer.from(content.__buf, 'base64')
|
|
156
|
+
};
|
|
157
|
+
}
|
|
158
|
+
return {
|
|
159
|
+
...rest,
|
|
160
|
+
content
|
|
161
|
+
};
|
|
162
|
+
}
|
|
163
|
+
var _index = /*#__PURE__*/new WeakMap();
|
|
164
|
+
var _bytes = /*#__PURE__*/new WeakMap();
|
|
165
|
+
var _peakBytes = /*#__PURE__*/new WeakMap();
|
|
166
|
+
var _stats = /*#__PURE__*/new WeakMap();
|
|
167
|
+
var _counter = /*#__PURE__*/new WeakMap();
|
|
168
|
+
var _ready = /*#__PURE__*/new WeakMap();
|
|
169
|
+
var _DiskSpillStore_brand = /*#__PURE__*/new WeakSet();
|
|
170
|
+
export class DiskSpillStore {
|
|
171
|
+
constructor(dir, {
|
|
172
|
+
log
|
|
173
|
+
} = {}) {
|
|
174
|
+
_classPrivateMethodInitSpec(this, _DiskSpillStore_brand);
|
|
175
|
+
_classPrivateFieldInitSpec(this, _index, new Map());
|
|
176
|
+
_classPrivateFieldInitSpec(this, _bytes, 0);
|
|
177
|
+
_classPrivateFieldInitSpec(this, _peakBytes, 0);
|
|
178
|
+
_classPrivateFieldInitSpec(this, _stats, {
|
|
179
|
+
spilled: 0,
|
|
180
|
+
restored: 0,
|
|
181
|
+
spillFailures: 0,
|
|
182
|
+
readFailures: 0
|
|
183
|
+
});
|
|
184
|
+
_classPrivateFieldInitSpec(this, _counter, 0);
|
|
185
|
+
_classPrivateFieldInitSpec(this, _ready, false);
|
|
186
|
+
this.dir = dir;
|
|
187
|
+
this.log = log;
|
|
188
|
+
try {
|
|
189
|
+
// mode 0o700: spilled bytes are origin-fetchable so the threat model is
|
|
190
|
+
// small, but on shared-tenant CI hosts other users on the same box
|
|
191
|
+
// shouldn't be able to read them.
|
|
192
|
+
fs.mkdirSync(dir, {
|
|
193
|
+
recursive: true,
|
|
194
|
+
mode: 0o700
|
|
195
|
+
});
|
|
196
|
+
_classPrivateFieldSet(_ready, this, true);
|
|
197
|
+
} catch (err) {
|
|
198
|
+
var _this$log, _this$log$debug;
|
|
199
|
+
(_this$log = this.log) === null || _this$log === void 0 || (_this$log$debug = _this$log.debug) === null || _this$log$debug === void 0 || _this$log$debug.call(_this$log, `disk-spill init failed for ${dir}: ${err.message}`);
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
// Returns true on success; false on any failure so caller falls back to drop.
|
|
204
|
+
// Overwrites prior spill for the same URL — a fresh discovery write wins.
|
|
205
|
+
// Two resource shapes are supported: a single resource with a binary
|
|
206
|
+
// .content, and a multi-width root array (see entrySize for the array
|
|
207
|
+
// shape). Arrays are JSON-encoded with base64 buffers so the whole array
|
|
208
|
+
// survives the disk roundtrip.
|
|
209
|
+
set(url, resource) {
|
|
210
|
+
var _this$counter;
|
|
211
|
+
if (!_classPrivateFieldGet(_ready, this)) return false;
|
|
212
|
+
let bytes;
|
|
213
|
+
let meta;
|
|
214
|
+
let isArray = false;
|
|
215
|
+
if (Array.isArray(resource)) {
|
|
216
|
+
isArray = true;
|
|
217
|
+
try {
|
|
218
|
+
bytes = Buffer.from(JSON.stringify(resource.map(encodeArrayElement)));
|
|
219
|
+
} catch {
|
|
220
|
+
return false;
|
|
221
|
+
}
|
|
222
|
+
} else {
|
|
223
|
+
let content = resource === null || resource === void 0 ? void 0 : resource.content;
|
|
224
|
+
if (content == null) return false;
|
|
225
|
+
if (!Buffer.isBuffer(content)) {
|
|
226
|
+
try {
|
|
227
|
+
content = Buffer.from(content);
|
|
228
|
+
} catch {
|
|
229
|
+
return false;
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
bytes = content;
|
|
233
|
+
meta = {
|
|
234
|
+
...resource
|
|
235
|
+
};
|
|
236
|
+
delete meta.content;
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
// Counter-based filename keeps URL-derived data out of path.join —
|
|
240
|
+
// avoids any path-traversal surface even though sha256 would be safe.
|
|
241
|
+
const filepath = path.join(this.dir, String(_classPrivateFieldSet(_counter, this, (_this$counter = _classPrivateFieldGet(_counter, this), ++_this$counter))));
|
|
242
|
+
try {
|
|
243
|
+
fs.writeFileSync(filepath, bytes);
|
|
244
|
+
} catch (err) {
|
|
245
|
+
var _this$log2, _this$log2$debug;
|
|
246
|
+
_classPrivateFieldGet(_stats, this).spillFailures++;
|
|
247
|
+
(_this$log2 = this.log) === null || _this$log2 === void 0 || (_this$log2$debug = _this$log2.debug) === null || _this$log2$debug === void 0 || _this$log2$debug.call(_this$log2, `disk-spill write failed for ${url}: ${err.message}`);
|
|
248
|
+
return false;
|
|
249
|
+
}
|
|
250
|
+
if (_classPrivateFieldGet(_index, this).has(url)) {
|
|
251
|
+
const prev = _classPrivateFieldGet(_index, this).get(url);
|
|
252
|
+
_classPrivateFieldSet(_bytes, this, _classPrivateFieldGet(_bytes, this) - prev.size);
|
|
253
|
+
try {
|
|
254
|
+
fs.unlinkSync(prev.path);
|
|
255
|
+
} catch {/* best-effort */}
|
|
256
|
+
}
|
|
257
|
+
_classPrivateFieldGet(_index, this).set(url, {
|
|
258
|
+
path: filepath,
|
|
259
|
+
size: bytes.length,
|
|
260
|
+
isArray,
|
|
261
|
+
meta
|
|
262
|
+
});
|
|
263
|
+
_classPrivateFieldSet(_bytes, this, _classPrivateFieldGet(_bytes, this) + bytes.length);
|
|
264
|
+
if (_classPrivateFieldGet(_bytes, this) > _classPrivateFieldGet(_peakBytes, this)) _classPrivateFieldSet(_peakBytes, this, _classPrivateFieldGet(_bytes, this));
|
|
265
|
+
_classPrivateFieldGet(_stats, this).spilled++;
|
|
266
|
+
return true;
|
|
267
|
+
}
|
|
268
|
+
get(url) {
|
|
269
|
+
const entry = _classPrivateFieldGet(_index, this).get(url);
|
|
270
|
+
if (!entry) return undefined;
|
|
271
|
+
let raw;
|
|
272
|
+
try {
|
|
273
|
+
raw = fs.readFileSync(entry.path);
|
|
274
|
+
} catch (err) {
|
|
275
|
+
var _this$log3, _this$log3$debug;
|
|
276
|
+
_classPrivateFieldGet(_stats, this).readFailures++;
|
|
277
|
+
(_this$log3 = this.log) === null || _this$log3 === void 0 || (_this$log3$debug = _this$log3.debug) === null || _this$log3$debug === void 0 || _this$log3$debug.call(_this$log3, `disk-spill read failed for ${url}: ${err.message}`);
|
|
278
|
+
_assertClassBrand(_DiskSpillStore_brand, this, _removeEntry).call(this, url, entry);
|
|
279
|
+
return undefined;
|
|
280
|
+
}
|
|
281
|
+
if (entry.isArray) {
|
|
282
|
+
let arr;
|
|
283
|
+
try {
|
|
284
|
+
arr = JSON.parse(raw.toString('utf8')).map(decodeArrayElement);
|
|
285
|
+
} catch (err) {
|
|
286
|
+
var _this$log4, _this$log4$debug;
|
|
287
|
+
_classPrivateFieldGet(_stats, this).readFailures++;
|
|
288
|
+
(_this$log4 = this.log) === null || _this$log4 === void 0 || (_this$log4$debug = _this$log4.debug) === null || _this$log4$debug === void 0 || _this$log4$debug.call(_this$log4, `disk-spill array-decode failed for ${url}: ${err.message}`);
|
|
289
|
+
_assertClassBrand(_DiskSpillStore_brand, this, _removeEntry).call(this, url, entry);
|
|
290
|
+
return undefined;
|
|
291
|
+
}
|
|
292
|
+
_classPrivateFieldGet(_stats, this).restored++;
|
|
293
|
+
return arr;
|
|
294
|
+
}
|
|
295
|
+
_classPrivateFieldGet(_stats, this).restored++;
|
|
296
|
+
return {
|
|
297
|
+
...entry.meta,
|
|
298
|
+
content: raw
|
|
299
|
+
};
|
|
300
|
+
}
|
|
301
|
+
has(url) {
|
|
302
|
+
return _classPrivateFieldGet(_index, this).has(url);
|
|
303
|
+
}
|
|
304
|
+
delete(url) {
|
|
305
|
+
const entry = _classPrivateFieldGet(_index, this).get(url);
|
|
306
|
+
if (!entry) return false;
|
|
307
|
+
_assertClassBrand(_DiskSpillStore_brand, this, _removeEntry).call(this, url, entry);
|
|
308
|
+
return true;
|
|
309
|
+
}
|
|
310
|
+
destroy() {
|
|
311
|
+
try {
|
|
312
|
+
if (_classPrivateFieldGet(_ready, this)) fs.rmSync(this.dir, {
|
|
313
|
+
recursive: true,
|
|
314
|
+
force: true
|
|
315
|
+
});
|
|
316
|
+
} catch (err) {
|
|
317
|
+
var _this$log5, _this$log5$debug;
|
|
318
|
+
(_this$log5 = this.log) === null || _this$log5 === void 0 || (_this$log5$debug = _this$log5.debug) === null || _this$log5$debug === void 0 || _this$log5$debug.call(_this$log5, `disk-spill cleanup failed for ${this.dir}: ${err.message}`);
|
|
319
|
+
}
|
|
320
|
+
_classPrivateFieldGet(_index, this).clear();
|
|
321
|
+
_classPrivateFieldSet(_bytes, this, 0);
|
|
322
|
+
_classPrivateFieldSet(_ready, this, false);
|
|
323
|
+
}
|
|
324
|
+
get size() {
|
|
325
|
+
return _classPrivateFieldGet(_index, this).size;
|
|
326
|
+
}
|
|
327
|
+
get bytes() {
|
|
328
|
+
return _classPrivateFieldGet(_bytes, this);
|
|
329
|
+
}
|
|
330
|
+
get ready() {
|
|
331
|
+
return _classPrivateFieldGet(_ready, this);
|
|
332
|
+
}
|
|
333
|
+
get stats() {
|
|
334
|
+
return {
|
|
335
|
+
..._classPrivateFieldGet(_stats, this),
|
|
336
|
+
currentBytes: _classPrivateFieldGet(_bytes, this),
|
|
337
|
+
peakBytes: _classPrivateFieldGet(_peakBytes, this),
|
|
338
|
+
entries: _classPrivateFieldGet(_index, this).size
|
|
339
|
+
};
|
|
340
|
+
}
|
|
341
|
+
}
|
|
342
|
+
function _removeEntry(url, entry) {
|
|
343
|
+
_classPrivateFieldSet(_bytes, this, _classPrivateFieldGet(_bytes, this) - entry.size);
|
|
344
|
+
_classPrivateFieldGet(_index, this).delete(url);
|
|
345
|
+
try {
|
|
346
|
+
fs.unlinkSync(entry.path);
|
|
347
|
+
} catch {/* best-effort */}
|
|
348
|
+
}
|
|
349
|
+
export function createSpillDir() {
|
|
350
|
+
const suffix = `${process.pid}-${crypto.randomBytes(4).toString('hex')}`;
|
|
351
|
+
return path.join(os.tmpdir(), `percy-cache-${suffix}`);
|
|
352
|
+
}
|
package/dist/config.js
CHANGED
|
@@ -422,6 +422,13 @@ export const configSchema = {
|
|
|
422
422
|
disableCache: {
|
|
423
423
|
type: 'boolean'
|
|
424
424
|
},
|
|
425
|
+
maxCacheRam: {
|
|
426
|
+
// 0 has no meaningful semantics — it's neither "unbounded" (use null)
|
|
427
|
+
// nor "disabled" (use --disable-cache). Reject it at schema time so the
|
|
428
|
+
// discovery clamp doesn't silently bump it to 25MB.
|
|
429
|
+
type: ['integer', 'null'],
|
|
430
|
+
minimum: 1
|
|
431
|
+
},
|
|
425
432
|
captureMockedServiceWorker: {
|
|
426
433
|
type: 'boolean',
|
|
427
434
|
default: false
|