@percy/core 1.31.14-beta.0 → 1.31.14-beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -0
- package/dist/api.js +2 -2
- package/dist/cache/byte-lru.js +352 -0
- package/dist/config.js +7 -0
- package/dist/discovery.js +170 -29
- package/dist/percy.js +68 -8
- package/package.json +9 -9
package/README.md
CHANGED
|
@@ -53,6 +53,7 @@ The following options can also be defined within a Percy config file
|
|
|
53
53
|
- `requestHeaders` — Request headers used when discovering snapshot assets
|
|
54
54
|
- `authorization` — Basic auth `username` and `password` for protected snapshot assets
|
|
55
55
|
- `disableCache` — Disable asset caching (**default** `false`)
|
|
56
|
+
- `maxCacheRam` — Cap the asset-discovery cache at this many MB (**default** unset/unbounded). When set, least-recently-used resources are evicted to stay within the cap. MB is decimal (1 MB = 1,000,000 bytes), not binary MiB (1,048,576). The cap measures cache body bytes only; process RSS is typically 1.5–2× the cap due to Node's Buffer slab allocator. Values below 25 MB are clamped to 25 MB with a warn log (the per-resource ceiling is 25 MB, so smaller caps would reject every resource). Also settable via the `--max-cache-ram <MB>` CLI flag or the `PERCY_MAX_CACHE_RAM` env var
|
|
56
57
|
- `userAgent` — Custom user-agent string used when requesting assets
|
|
57
58
|
- `cookies` — Browser cookies to use when requesting assets
|
|
58
59
|
- `networkIdleTimeout` — Milliseconds to wait for the network to idle (**default** `100`)
|
package/dist/api.js
CHANGED
|
@@ -259,7 +259,7 @@ export function createPercyServer(percy, port) {
|
|
|
259
259
|
if (cmd === 'reset') {
|
|
260
260
|
// the reset command will reset testing mode and clear any logs
|
|
261
261
|
percy.testing = {};
|
|
262
|
-
logger.instance.
|
|
262
|
+
logger.instance.reset();
|
|
263
263
|
} else if (cmd === 'version') {
|
|
264
264
|
// the version command will update the api version header for testing
|
|
265
265
|
percy.testing.version = body;
|
|
@@ -304,7 +304,7 @@ export function createPercyServer(percy, port) {
|
|
|
304
304
|
}))
|
|
305
305
|
// returns an array of raw logs from the logger
|
|
306
306
|
.route('get', '/test/logs', (req, res) => res.json(200, {
|
|
307
|
-
logs:
|
|
307
|
+
logs: logger.instance.query(() => true)
|
|
308
308
|
}))
|
|
309
309
|
// serves a very basic html page for testing snapshots
|
|
310
310
|
.route('get', '/test/snapshot', (req, res) => {
|
|
@@ -0,0 +1,352 @@
|
|
|
1
|
+
function _classPrivateMethodInitSpec(e, a) { _checkPrivateRedeclaration(e, a), a.add(e); }
|
|
2
|
+
function _classPrivateFieldInitSpec(e, t, a) { _checkPrivateRedeclaration(e, t), t.set(e, a); }
|
|
3
|
+
function _checkPrivateRedeclaration(e, t) { if (t.has(e)) throw new TypeError("Cannot initialize the same private elements twice on an object"); }
|
|
4
|
+
function _classPrivateFieldGet(s, a) { return s.get(_assertClassBrand(s, a)); }
|
|
5
|
+
function _classPrivateFieldSet(s, a, r) { return s.set(_assertClassBrand(s, a), r), r; }
|
|
6
|
+
function _assertClassBrand(e, t, n) { if ("function" == typeof e ? e === t : e.has(t)) return arguments.length < 3 ? t : n; throw new TypeError("Private element is not present on this object"); }
|
|
7
|
+
// Two-tier cache used by asset discovery:
|
|
8
|
+
// ByteLRU — byte-budget in-memory LRU; Map insertion order = LRU order.
|
|
9
|
+
// DiskSpillStore — on-disk overflow tier. RAM evictions spill here; lookups
|
|
10
|
+
// fall back to disk before refetching from origin.
|
|
11
|
+
// All operations are synchronous; callers (network intercept, ByteLRU.set)
|
|
12
|
+
// cannot yield to the event loop mid-op. Per-entry size is capped at 25MB
|
|
13
|
+
// upstream so disk I/O latency is bounded.
|
|
14
|
+
|
|
15
|
+
import fs from 'fs';
|
|
16
|
+
import os from 'os';
|
|
17
|
+
import path from 'path';
|
|
18
|
+
import crypto from 'crypto';
|
|
19
|
+
const DEFAULT_PER_ENTRY_OVERHEAD = 512;
|
|
20
|
+
export class ByteLRU {
|
|
21
|
+
#map = new Map();
|
|
22
|
+
#bytes = 0;
|
|
23
|
+
#max;
|
|
24
|
+
#stats = {
|
|
25
|
+
hits: 0,
|
|
26
|
+
misses: 0,
|
|
27
|
+
evictions: 0,
|
|
28
|
+
peakBytes: 0
|
|
29
|
+
};
|
|
30
|
+
onEvict;
|
|
31
|
+
constructor(maxBytes, {
|
|
32
|
+
onEvict
|
|
33
|
+
} = {}) {
|
|
34
|
+
this.#max = maxBytes;
|
|
35
|
+
this.onEvict = onEvict;
|
|
36
|
+
}
|
|
37
|
+
get(key) {
|
|
38
|
+
if (!this.#map.has(key)) {
|
|
39
|
+
this.#stats.misses++;
|
|
40
|
+
return undefined;
|
|
41
|
+
}
|
|
42
|
+
const rec = this.#map.get(key);
|
|
43
|
+
this.#map.delete(key);
|
|
44
|
+
this.#map.set(key, rec);
|
|
45
|
+
this.#stats.hits++;
|
|
46
|
+
return rec.value;
|
|
47
|
+
}
|
|
48
|
+
set(key, value, size) {
|
|
49
|
+
if (!Number.isFinite(size) || size < 0) return false;
|
|
50
|
+
|
|
51
|
+
// Reject oversize BEFORE touching any existing entry — a failed set on an
|
|
52
|
+
// existing key must not evict the prior (valid) entry.
|
|
53
|
+
if (this.#max !== undefined && size > this.#max) {
|
|
54
|
+
if (this.onEvict) this.onEvict(key, 'too-big', value);
|
|
55
|
+
return false;
|
|
56
|
+
}
|
|
57
|
+
if (this.#map.has(key)) {
|
|
58
|
+
this.#bytes -= this.#map.get(key).size;
|
|
59
|
+
this.#map.delete(key);
|
|
60
|
+
}
|
|
61
|
+
this.#map.set(key, {
|
|
62
|
+
value,
|
|
63
|
+
size
|
|
64
|
+
});
|
|
65
|
+
this.#bytes += size;
|
|
66
|
+
if (this.#bytes > this.#stats.peakBytes) this.#stats.peakBytes = this.#bytes;
|
|
67
|
+
while (this.#max !== undefined && this.#bytes > this.#max) {
|
|
68
|
+
const oldestKey = this.#map.keys().next().value;
|
|
69
|
+
const rec = this.#map.get(oldestKey);
|
|
70
|
+
this.#bytes -= rec.size;
|
|
71
|
+
this.#map.delete(oldestKey);
|
|
72
|
+
this.#stats.evictions++;
|
|
73
|
+
if (this.onEvict) this.onEvict(oldestKey, 'lru', rec.value);
|
|
74
|
+
}
|
|
75
|
+
return true;
|
|
76
|
+
}
|
|
77
|
+
has(key) {
|
|
78
|
+
return this.#map.has(key);
|
|
79
|
+
}
|
|
80
|
+
delete(key) {
|
|
81
|
+
if (!this.#map.has(key)) return false;
|
|
82
|
+
this.#bytes -= this.#map.get(key).size;
|
|
83
|
+
return this.#map.delete(key);
|
|
84
|
+
}
|
|
85
|
+
clear() {
|
|
86
|
+
this.#map.clear();
|
|
87
|
+
this.#bytes = 0;
|
|
88
|
+
}
|
|
89
|
+
get size() {
|
|
90
|
+
return this.#map.size;
|
|
91
|
+
}
|
|
92
|
+
get calculatedSize() {
|
|
93
|
+
return this.#bytes;
|
|
94
|
+
}
|
|
95
|
+
get stats() {
|
|
96
|
+
return {
|
|
97
|
+
...this.#stats,
|
|
98
|
+
currentBytes: this.#bytes
|
|
99
|
+
};
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
// Returns the byte length of a resource's content. Buffer.byteLength is used
|
|
104
|
+
// for strings so that multi-byte UTF-8 (CJK, emoji) is counted in bytes, not
|
|
105
|
+
// JS string units, otherwise the cache budget can drift past its cap.
|
|
106
|
+
function contentBytes(content) {
|
|
107
|
+
if (content == null) return 0;
|
|
108
|
+
if (Buffer.isBuffer(content)) return content.length;
|
|
109
|
+
if (typeof content === 'string') return Buffer.byteLength(content);
|
|
110
|
+
return content.length ?? 0;
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
// Handles the two Percy cache-entry shapes: single resource, or array of
|
|
114
|
+
// roots captured at multiple widths (see discovery.js parseDomResources).
|
|
115
|
+
export function entrySize(resource, overhead = DEFAULT_PER_ENTRY_OVERHEAD) {
|
|
116
|
+
if (Array.isArray(resource)) {
|
|
117
|
+
return resource.reduce((n, r) => n + contentBytes(r === null || r === void 0 ? void 0 : r.content) + overhead, 0);
|
|
118
|
+
}
|
|
119
|
+
return contentBytes(resource === null || resource === void 0 ? void 0 : resource.content) + overhead;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
// Multi-width root arrays carry per-element binary content. Encode buffers as
|
|
123
|
+
// base64 inside JSON so the whole array survives a disk roundtrip; null and
|
|
124
|
+
// string content pass through as themselves.
|
|
125
|
+
function encodeArrayElement(r) {
|
|
126
|
+
if (!r) return r;
|
|
127
|
+
const {
|
|
128
|
+
content,
|
|
129
|
+
...rest
|
|
130
|
+
} = r;
|
|
131
|
+
if (content == null) return {
|
|
132
|
+
...rest,
|
|
133
|
+
content: null
|
|
134
|
+
};
|
|
135
|
+
if (Buffer.isBuffer(content)) return {
|
|
136
|
+
...rest,
|
|
137
|
+
content: {
|
|
138
|
+
__buf: content.toString('base64')
|
|
139
|
+
}
|
|
140
|
+
};
|
|
141
|
+
return {
|
|
142
|
+
...rest,
|
|
143
|
+
content: String(content)
|
|
144
|
+
};
|
|
145
|
+
}
|
|
146
|
+
function decodeArrayElement(r) {
|
|
147
|
+
if (!r) return r;
|
|
148
|
+
const {
|
|
149
|
+
content,
|
|
150
|
+
...rest
|
|
151
|
+
} = r;
|
|
152
|
+
if (content && typeof content === 'object' && '__buf' in content) {
|
|
153
|
+
return {
|
|
154
|
+
...rest,
|
|
155
|
+
content: Buffer.from(content.__buf, 'base64')
|
|
156
|
+
};
|
|
157
|
+
}
|
|
158
|
+
return {
|
|
159
|
+
...rest,
|
|
160
|
+
content
|
|
161
|
+
};
|
|
162
|
+
}
|
|
163
|
+
var _index = /*#__PURE__*/new WeakMap();
|
|
164
|
+
var _bytes = /*#__PURE__*/new WeakMap();
|
|
165
|
+
var _peakBytes = /*#__PURE__*/new WeakMap();
|
|
166
|
+
var _stats = /*#__PURE__*/new WeakMap();
|
|
167
|
+
var _counter = /*#__PURE__*/new WeakMap();
|
|
168
|
+
var _ready = /*#__PURE__*/new WeakMap();
|
|
169
|
+
var _DiskSpillStore_brand = /*#__PURE__*/new WeakSet();
|
|
170
|
+
export class DiskSpillStore {
|
|
171
|
+
constructor(dir, {
|
|
172
|
+
log
|
|
173
|
+
} = {}) {
|
|
174
|
+
_classPrivateMethodInitSpec(this, _DiskSpillStore_brand);
|
|
175
|
+
_classPrivateFieldInitSpec(this, _index, new Map());
|
|
176
|
+
_classPrivateFieldInitSpec(this, _bytes, 0);
|
|
177
|
+
_classPrivateFieldInitSpec(this, _peakBytes, 0);
|
|
178
|
+
_classPrivateFieldInitSpec(this, _stats, {
|
|
179
|
+
spilled: 0,
|
|
180
|
+
restored: 0,
|
|
181
|
+
spillFailures: 0,
|
|
182
|
+
readFailures: 0
|
|
183
|
+
});
|
|
184
|
+
_classPrivateFieldInitSpec(this, _counter, 0);
|
|
185
|
+
_classPrivateFieldInitSpec(this, _ready, false);
|
|
186
|
+
this.dir = dir;
|
|
187
|
+
this.log = log;
|
|
188
|
+
try {
|
|
189
|
+
// mode 0o700: spilled bytes are origin-fetchable so the threat model is
|
|
190
|
+
// small, but on shared-tenant CI hosts other users on the same box
|
|
191
|
+
// shouldn't be able to read them.
|
|
192
|
+
fs.mkdirSync(dir, {
|
|
193
|
+
recursive: true,
|
|
194
|
+
mode: 0o700
|
|
195
|
+
});
|
|
196
|
+
_classPrivateFieldSet(_ready, this, true);
|
|
197
|
+
} catch (err) {
|
|
198
|
+
var _this$log, _this$log$debug;
|
|
199
|
+
(_this$log = this.log) === null || _this$log === void 0 || (_this$log$debug = _this$log.debug) === null || _this$log$debug === void 0 || _this$log$debug.call(_this$log, `disk-spill init failed for ${dir}: ${err.message}`);
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
// Returns true on success; false on any failure so caller falls back to drop.
|
|
204
|
+
// Overwrites prior spill for the same URL — a fresh discovery write wins.
|
|
205
|
+
// Two resource shapes are supported: a single resource with a binary
|
|
206
|
+
// .content, and a multi-width root array (see entrySize for the array
|
|
207
|
+
// shape). Arrays are JSON-encoded with base64 buffers so the whole array
|
|
208
|
+
// survives the disk roundtrip.
|
|
209
|
+
set(url, resource) {
|
|
210
|
+
var _this$counter;
|
|
211
|
+
if (!_classPrivateFieldGet(_ready, this)) return false;
|
|
212
|
+
let bytes;
|
|
213
|
+
let meta;
|
|
214
|
+
let isArray = false;
|
|
215
|
+
if (Array.isArray(resource)) {
|
|
216
|
+
isArray = true;
|
|
217
|
+
try {
|
|
218
|
+
bytes = Buffer.from(JSON.stringify(resource.map(encodeArrayElement)));
|
|
219
|
+
} catch {
|
|
220
|
+
return false;
|
|
221
|
+
}
|
|
222
|
+
} else {
|
|
223
|
+
let content = resource === null || resource === void 0 ? void 0 : resource.content;
|
|
224
|
+
if (content == null) return false;
|
|
225
|
+
if (!Buffer.isBuffer(content)) {
|
|
226
|
+
try {
|
|
227
|
+
content = Buffer.from(content);
|
|
228
|
+
} catch {
|
|
229
|
+
return false;
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
bytes = content;
|
|
233
|
+
meta = {
|
|
234
|
+
...resource
|
|
235
|
+
};
|
|
236
|
+
delete meta.content;
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
// Counter-based filename keeps URL-derived data out of path.join —
|
|
240
|
+
// avoids any path-traversal surface even though sha256 would be safe.
|
|
241
|
+
const filepath = path.join(this.dir, String(_classPrivateFieldSet(_counter, this, (_this$counter = _classPrivateFieldGet(_counter, this), ++_this$counter))));
|
|
242
|
+
try {
|
|
243
|
+
fs.writeFileSync(filepath, bytes);
|
|
244
|
+
} catch (err) {
|
|
245
|
+
var _this$log2, _this$log2$debug;
|
|
246
|
+
_classPrivateFieldGet(_stats, this).spillFailures++;
|
|
247
|
+
(_this$log2 = this.log) === null || _this$log2 === void 0 || (_this$log2$debug = _this$log2.debug) === null || _this$log2$debug === void 0 || _this$log2$debug.call(_this$log2, `disk-spill write failed for ${url}: ${err.message}`);
|
|
248
|
+
return false;
|
|
249
|
+
}
|
|
250
|
+
if (_classPrivateFieldGet(_index, this).has(url)) {
|
|
251
|
+
const prev = _classPrivateFieldGet(_index, this).get(url);
|
|
252
|
+
_classPrivateFieldSet(_bytes, this, _classPrivateFieldGet(_bytes, this) - prev.size);
|
|
253
|
+
try {
|
|
254
|
+
fs.unlinkSync(prev.path);
|
|
255
|
+
} catch {/* best-effort */}
|
|
256
|
+
}
|
|
257
|
+
_classPrivateFieldGet(_index, this).set(url, {
|
|
258
|
+
path: filepath,
|
|
259
|
+
size: bytes.length,
|
|
260
|
+
isArray,
|
|
261
|
+
meta
|
|
262
|
+
});
|
|
263
|
+
_classPrivateFieldSet(_bytes, this, _classPrivateFieldGet(_bytes, this) + bytes.length);
|
|
264
|
+
if (_classPrivateFieldGet(_bytes, this) > _classPrivateFieldGet(_peakBytes, this)) _classPrivateFieldSet(_peakBytes, this, _classPrivateFieldGet(_bytes, this));
|
|
265
|
+
_classPrivateFieldGet(_stats, this).spilled++;
|
|
266
|
+
return true;
|
|
267
|
+
}
|
|
268
|
+
get(url) {
|
|
269
|
+
const entry = _classPrivateFieldGet(_index, this).get(url);
|
|
270
|
+
if (!entry) return undefined;
|
|
271
|
+
let raw;
|
|
272
|
+
try {
|
|
273
|
+
raw = fs.readFileSync(entry.path);
|
|
274
|
+
} catch (err) {
|
|
275
|
+
var _this$log3, _this$log3$debug;
|
|
276
|
+
_classPrivateFieldGet(_stats, this).readFailures++;
|
|
277
|
+
(_this$log3 = this.log) === null || _this$log3 === void 0 || (_this$log3$debug = _this$log3.debug) === null || _this$log3$debug === void 0 || _this$log3$debug.call(_this$log3, `disk-spill read failed for ${url}: ${err.message}`);
|
|
278
|
+
_assertClassBrand(_DiskSpillStore_brand, this, _removeEntry).call(this, url, entry);
|
|
279
|
+
return undefined;
|
|
280
|
+
}
|
|
281
|
+
if (entry.isArray) {
|
|
282
|
+
let arr;
|
|
283
|
+
try {
|
|
284
|
+
arr = JSON.parse(raw.toString('utf8')).map(decodeArrayElement);
|
|
285
|
+
} catch (err) {
|
|
286
|
+
var _this$log4, _this$log4$debug;
|
|
287
|
+
_classPrivateFieldGet(_stats, this).readFailures++;
|
|
288
|
+
(_this$log4 = this.log) === null || _this$log4 === void 0 || (_this$log4$debug = _this$log4.debug) === null || _this$log4$debug === void 0 || _this$log4$debug.call(_this$log4, `disk-spill array-decode failed for ${url}: ${err.message}`);
|
|
289
|
+
_assertClassBrand(_DiskSpillStore_brand, this, _removeEntry).call(this, url, entry);
|
|
290
|
+
return undefined;
|
|
291
|
+
}
|
|
292
|
+
_classPrivateFieldGet(_stats, this).restored++;
|
|
293
|
+
return arr;
|
|
294
|
+
}
|
|
295
|
+
_classPrivateFieldGet(_stats, this).restored++;
|
|
296
|
+
return {
|
|
297
|
+
...entry.meta,
|
|
298
|
+
content: raw
|
|
299
|
+
};
|
|
300
|
+
}
|
|
301
|
+
has(url) {
|
|
302
|
+
return _classPrivateFieldGet(_index, this).has(url);
|
|
303
|
+
}
|
|
304
|
+
delete(url) {
|
|
305
|
+
const entry = _classPrivateFieldGet(_index, this).get(url);
|
|
306
|
+
if (!entry) return false;
|
|
307
|
+
_assertClassBrand(_DiskSpillStore_brand, this, _removeEntry).call(this, url, entry);
|
|
308
|
+
return true;
|
|
309
|
+
}
|
|
310
|
+
destroy() {
|
|
311
|
+
try {
|
|
312
|
+
if (_classPrivateFieldGet(_ready, this)) fs.rmSync(this.dir, {
|
|
313
|
+
recursive: true,
|
|
314
|
+
force: true
|
|
315
|
+
});
|
|
316
|
+
} catch (err) {
|
|
317
|
+
var _this$log5, _this$log5$debug;
|
|
318
|
+
(_this$log5 = this.log) === null || _this$log5 === void 0 || (_this$log5$debug = _this$log5.debug) === null || _this$log5$debug === void 0 || _this$log5$debug.call(_this$log5, `disk-spill cleanup failed for ${this.dir}: ${err.message}`);
|
|
319
|
+
}
|
|
320
|
+
_classPrivateFieldGet(_index, this).clear();
|
|
321
|
+
_classPrivateFieldSet(_bytes, this, 0);
|
|
322
|
+
_classPrivateFieldSet(_ready, this, false);
|
|
323
|
+
}
|
|
324
|
+
get size() {
|
|
325
|
+
return _classPrivateFieldGet(_index, this).size;
|
|
326
|
+
}
|
|
327
|
+
get bytes() {
|
|
328
|
+
return _classPrivateFieldGet(_bytes, this);
|
|
329
|
+
}
|
|
330
|
+
get ready() {
|
|
331
|
+
return _classPrivateFieldGet(_ready, this);
|
|
332
|
+
}
|
|
333
|
+
get stats() {
|
|
334
|
+
return {
|
|
335
|
+
..._classPrivateFieldGet(_stats, this),
|
|
336
|
+
currentBytes: _classPrivateFieldGet(_bytes, this),
|
|
337
|
+
peakBytes: _classPrivateFieldGet(_peakBytes, this),
|
|
338
|
+
entries: _classPrivateFieldGet(_index, this).size
|
|
339
|
+
};
|
|
340
|
+
}
|
|
341
|
+
}
|
|
342
|
+
function _removeEntry(url, entry) {
|
|
343
|
+
_classPrivateFieldSet(_bytes, this, _classPrivateFieldGet(_bytes, this) - entry.size);
|
|
344
|
+
_classPrivateFieldGet(_index, this).delete(url);
|
|
345
|
+
try {
|
|
346
|
+
fs.unlinkSync(entry.path);
|
|
347
|
+
} catch {/* best-effort */}
|
|
348
|
+
}
|
|
349
|
+
export function createSpillDir() {
|
|
350
|
+
const suffix = `${process.pid}-${crypto.randomBytes(4).toString('hex')}`;
|
|
351
|
+
return path.join(os.tmpdir(), `percy-cache-${suffix}`);
|
|
352
|
+
}
|
package/dist/config.js
CHANGED
|
@@ -422,6 +422,13 @@ export const configSchema = {
|
|
|
422
422
|
disableCache: {
|
|
423
423
|
type: 'boolean'
|
|
424
424
|
},
|
|
425
|
+
maxCacheRam: {
|
|
426
|
+
// 0 has no meaningful semantics — it's neither "unbounded" (use null)
|
|
427
|
+
// nor "disabled" (use --disable-cache). Reject it at schema time so the
|
|
428
|
+
// discovery clamp doesn't silently bump it to 25MB.
|
|
429
|
+
type: ['integer', 'null'],
|
|
430
|
+
minimum: 1
|
|
431
|
+
},
|
|
425
432
|
captureMockedServiceWorker: {
|
|
426
433
|
type: 'boolean',
|
|
427
434
|
default: false
|
package/dist/discovery.js
CHANGED
|
@@ -2,6 +2,7 @@ import logger from '@percy/logger';
|
|
|
2
2
|
import Queue from './queue.js';
|
|
3
3
|
import Page from './page.js';
|
|
4
4
|
import { normalizeURL, hostnameMatches, createResource, createRootResource, createPercyCSSResource, createLogResource, yieldAll, snapshotLogName, waitForTimeout, withRetries, waitForSelectorInsideBrowser, isGzipped, maybeScrollToBottom } from './utils.js';
|
|
5
|
+
import { ByteLRU, entrySize, DiskSpillStore, createSpillDir } from './cache/byte-lru.js';
|
|
5
6
|
import { sha256hash } from '@percy/client/utils';
|
|
6
7
|
import Pako from 'pako';
|
|
7
8
|
|
|
@@ -223,10 +224,8 @@ function processSnapshotResources({
|
|
|
223
224
|
resources = resources.flat();
|
|
224
225
|
|
|
225
226
|
// include associated snapshot logs matched by meta information
|
|
226
|
-
resources.push(createLogResource(logger.
|
|
227
|
-
|
|
228
|
-
return ((_log$meta$snapshot = log.meta.snapshot) === null || _log$meta$snapshot === void 0 ? void 0 : _log$meta$snapshot.testCase) === snapshot.meta.snapshot.testCase && ((_log$meta$snapshot2 = log.meta.snapshot) === null || _log$meta$snapshot2 === void 0 ? void 0 : _log$meta$snapshot2.name) === snapshot.meta.snapshot.name;
|
|
229
|
-
})));
|
|
227
|
+
resources.push(createLogResource(logger.snapshotLogs(snapshot.meta.snapshot)));
|
|
228
|
+
logger.evictSnapshot(snapshot.meta.snapshot);
|
|
230
229
|
if (process.env.PERCY_GZIP) {
|
|
231
230
|
for (let index = 0; index < resources.length; index++) {
|
|
232
231
|
const alreadyZipped = isGzipped(resources[index].content);
|
|
@@ -434,9 +433,74 @@ export async function* discoverSnapshotResources(queue, options, callback) {
|
|
|
434
433
|
return all;
|
|
435
434
|
}, []));
|
|
436
435
|
}
|
|
437
|
-
|
|
438
|
-
// Used to cache resources across core instances
|
|
439
436
|
export const RESOURCE_CACHE_KEY = Symbol('resource-cache');
|
|
437
|
+
export const CACHE_STATS_KEY = Symbol('resource-cache-stats');
|
|
438
|
+
export const DISK_SPILL_KEY = Symbol('resource-cache-disk-spill');
|
|
439
|
+
const BYTES_PER_MB = 1_000_000;
|
|
440
|
+
// MAX_RESOURCE_SIZE in network.js is 25MB; caps below that would skip every
|
|
441
|
+
// resource, so we clamp. MIN_REASONABLE_CAP_MB warns on near-useless caps.
|
|
442
|
+
const MAX_RESOURCE_SIZE_MB = 25;
|
|
443
|
+
const MIN_REASONABLE_CAP_MB = 50;
|
|
444
|
+
const DEFAULT_WARN_THRESHOLD_BYTES = 500 * BYTES_PER_MB;
|
|
445
|
+
function makeCacheStats() {
|
|
446
|
+
return {
|
|
447
|
+
effectiveMaxCacheRamMB: null,
|
|
448
|
+
oversizeSkipped: 0,
|
|
449
|
+
firstEvictionEventFired: false,
|
|
450
|
+
warningFired: false,
|
|
451
|
+
unsetModeBytes: 0
|
|
452
|
+
};
|
|
453
|
+
}
|
|
454
|
+
function readWarnThresholdBytes() {
|
|
455
|
+
const raw = Number(process.env.PERCY_CACHE_WARN_THRESHOLD_BYTES);
|
|
456
|
+
return Number.isFinite(raw) && raw > 0 ? raw : DEFAULT_WARN_THRESHOLD_BYTES;
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
// Cache lookup shared by the network intercept path. RAM miss falls through
|
|
460
|
+
// to the disk tier; read failures return undefined so the browser refetches.
|
|
461
|
+
// Also resolves the array-valued root-resource shape used for multi-width
|
|
462
|
+
// DOM snapshots, regardless of which tier returned it.
|
|
463
|
+
//
|
|
464
|
+
// Disk hits are promoted back to RAM so a hot URL that was evicted once does
|
|
465
|
+
// not pay the readFileSync cost on every subsequent access — the typical
|
|
466
|
+
// two-tier-cache promotion pattern. ByteLRU's own eviction will then re-spill
|
|
467
|
+
// the actual coldest entry if needed. DISK_SPILL_KEY is only set when the
|
|
468
|
+
// ByteLRU tier is active (see createDiscoveryQueue 'start' handler), so the
|
|
469
|
+
// cache here is guaranteed to be a ByteLRU when we enter this branch.
|
|
470
|
+
export function lookupCacheResource(percy, snapshotResources, cache, url, width) {
|
|
471
|
+
let resource = snapshotResources.get(url) || cache.get(url);
|
|
472
|
+
const disk = percy[DISK_SPILL_KEY];
|
|
473
|
+
if (!resource && disk) {
|
|
474
|
+
resource = disk.get(url);
|
|
475
|
+
if (resource) {
|
|
476
|
+
percy.log.debug(`cache disk-hit: ${url} (disk=${disk.size}/` + `${Math.round(disk.bytes / BYTES_PER_MB)}MB)`);
|
|
477
|
+
// Promote back to RAM and drop the disk copy. cache.set may itself
|
|
478
|
+
// evict the LRU entry (which spills back to disk) — that's the
|
|
479
|
+
// intended LRU dance, not a bug.
|
|
480
|
+
cache.set(url, resource, entrySize(resource));
|
|
481
|
+
disk.delete(url);
|
|
482
|
+
}
|
|
483
|
+
}
|
|
484
|
+
if (resource && Array.isArray(resource) && resource[0].root) {
|
|
485
|
+
const rootResource = resource.find(r => {
|
|
486
|
+
var _r$widths;
|
|
487
|
+
return (_r$widths = r.widths) === null || _r$widths === void 0 ? void 0 : _r$widths.includes(width);
|
|
488
|
+
});
|
|
489
|
+
resource = rootResource || resource[0];
|
|
490
|
+
}
|
|
491
|
+
return resource;
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
// Fire-and-forget wrapper around the shared telemetry egress on Percy.
|
|
495
|
+
// onEvict callbacks are sync; the microtask hop keeps even sendCacheTelemetry's
|
|
496
|
+
// pre-await synchronous work (header construction, payload serialization) off
|
|
497
|
+
// the eviction-loop hot path.
|
|
498
|
+
function fireCacheEventSafe(percy, message, extra) {
|
|
499
|
+
// sendCacheTelemetry already swallows pager errors. The trailing .catch is
|
|
500
|
+
// belt-and-suspenders against Node 14's unhandled-rejection-as-fatal mode
|
|
501
|
+
// if the catch arm itself ever throws (e.g. log.debug stub explodes).
|
|
502
|
+
Promise.resolve().then(() => percy.sendCacheTelemetry(message, extra)).catch(() => {});
|
|
503
|
+
}
|
|
440
504
|
|
|
441
505
|
// Creates an asset discovery queue that uses the percy browser instance to create a page for each
|
|
442
506
|
// snapshot which is used to intercept and capture snapshot resource requests.
|
|
@@ -446,21 +510,84 @@ export function createDiscoveryQueue(percy) {
|
|
|
446
510
|
} = percy.config.discovery;
|
|
447
511
|
let queue = new Queue('discovery');
|
|
448
512
|
let cache;
|
|
513
|
+
let capBytes = null;
|
|
514
|
+
// Read once: saveResource consults this on every call.
|
|
515
|
+
const warnThreshold = readWarnThresholdBytes();
|
|
449
516
|
return queue.set({
|
|
450
517
|
concurrency
|
|
451
|
-
})
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
518
|
+
}).handle('start', async () => {
|
|
519
|
+
const configuredMaxCacheRamMB = percy.config.discovery.maxCacheRam;
|
|
520
|
+
let effectiveMaxCacheRamMB = configuredMaxCacheRamMB;
|
|
521
|
+
|
|
522
|
+
// User's config is not mutated; the post-clamp value lives on stats.
|
|
523
|
+
if (configuredMaxCacheRamMB != null) {
|
|
524
|
+
if (configuredMaxCacheRamMB < MAX_RESOURCE_SIZE_MB) {
|
|
525
|
+
percy.log.warn(`--max-cache-ram=${configuredMaxCacheRamMB}MB is below the ${MAX_RESOURCE_SIZE_MB}MB minimum ` + '(individual resources up to 25MB would otherwise be dropped). ' + `Continuing with the minimum: ${MAX_RESOURCE_SIZE_MB}MB.`);
|
|
526
|
+
effectiveMaxCacheRamMB = MAX_RESOURCE_SIZE_MB;
|
|
527
|
+
} else if (configuredMaxCacheRamMB < MIN_REASONABLE_CAP_MB) {
|
|
528
|
+
percy.log.warn(`--max-cache-ram=${configuredMaxCacheRamMB}MB is very small; ` + 'most resources will not fit and hit rate will be near zero.');
|
|
529
|
+
}
|
|
530
|
+
if (percy.config.discovery.disableCache) {
|
|
531
|
+
percy.log.info('--max-cache-ram is ignored because --disable-cache is set.');
|
|
532
|
+
}
|
|
533
|
+
capBytes = effectiveMaxCacheRamMB * BYTES_PER_MB;
|
|
534
|
+
}
|
|
535
|
+
if (warnThreshold !== DEFAULT_WARN_THRESHOLD_BYTES) {
|
|
536
|
+
percy.log.debug(`PERCY_CACHE_WARN_THRESHOLD_BYTES override active: ${warnThreshold} bytes ` + `(default ${DEFAULT_WARN_THRESHOLD_BYTES}).`);
|
|
537
|
+
}
|
|
538
|
+
percy[CACHE_STATS_KEY] = makeCacheStats();
|
|
539
|
+
percy[CACHE_STATS_KEY].effectiveMaxCacheRamMB = capBytes != null ? effectiveMaxCacheRamMB : null;
|
|
540
|
+
if (capBytes != null) {
|
|
541
|
+
// Overflow tier: RAM evictions spill here. diskStore.set returns
|
|
542
|
+
// false on any I/O failure → caller falls back to drop automatically.
|
|
543
|
+
const diskStore = new DiskSpillStore(createSpillDir(), {
|
|
544
|
+
log: percy.log
|
|
545
|
+
});
|
|
546
|
+
percy[DISK_SPILL_KEY] = diskStore;
|
|
547
|
+
cache = percy[RESOURCE_CACHE_KEY] = new ByteLRU(capBytes, {
|
|
548
|
+
onEvict: (key, reason, value) => {
|
|
549
|
+
if (reason === 'too-big') {
|
|
550
|
+
percy[CACHE_STATS_KEY].oversizeSkipped++;
|
|
551
|
+
percy.log.debug(`cache skip (oversize): ${key}`);
|
|
552
|
+
return;
|
|
553
|
+
}
|
|
554
|
+
const spilled = diskStore.set(key, value);
|
|
555
|
+
percy.log.debug(`cache ${spilled ? 'spill' : 'evict'}: ${key} ` + `(cache ${Math.round(cache.calculatedSize / BYTES_PER_MB)}` + `/${effectiveMaxCacheRamMB}MB, entries=${cache.size}, ` + `disk=${diskStore.size}/${Math.round(diskStore.bytes / BYTES_PER_MB)}MB)`);
|
|
556
|
+
const stats = percy[CACHE_STATS_KEY];
|
|
557
|
+
if (stats && !stats.firstEvictionEventFired) {
|
|
558
|
+
stats.firstEvictionEventFired = true;
|
|
559
|
+
percy.log.info('Cache eviction active — cap reached, oldest entries spilling to disk.');
|
|
560
|
+
fireCacheEventSafe(percy, 'cache_eviction_started', {
|
|
561
|
+
cache_budget_ram_mb: effectiveMaxCacheRamMB,
|
|
562
|
+
cache_peak_bytes_seen: cache.stats.peakBytes,
|
|
563
|
+
eviction_count: cache.stats.evictions,
|
|
564
|
+
disk_spill_enabled: diskStore.ready
|
|
565
|
+
});
|
|
566
|
+
}
|
|
567
|
+
}
|
|
568
|
+
});
|
|
569
|
+
} else {
|
|
570
|
+
cache = percy[RESOURCE_CACHE_KEY] = new Map();
|
|
571
|
+
}
|
|
458
572
|
await percy.browser.launch();
|
|
459
573
|
queue.run();
|
|
460
|
-
})
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
574
|
+
}).handle('end', async () => {
|
|
575
|
+
// Disk-spill cleanup must run even if browser.close() throws — otherwise
|
|
576
|
+
// the per-run temp dir under os.tmpdir() leaks. CACHE_STATS_KEY is set
|
|
577
|
+
// alongside DISK_SPILL_KEY in 'start', so the snapshot is always safe.
|
|
578
|
+
try {
|
|
579
|
+
await percy.browser.close();
|
|
580
|
+
} finally {
|
|
581
|
+
const diskStore = percy[DISK_SPILL_KEY];
|
|
582
|
+
if (diskStore) {
|
|
583
|
+
percy[CACHE_STATS_KEY].finalDiskStats = {
|
|
584
|
+
...diskStore.stats,
|
|
585
|
+
ready: diskStore.ready
|
|
586
|
+
};
|
|
587
|
+
diskStore.destroy();
|
|
588
|
+
delete percy[DISK_SPILL_KEY];
|
|
589
|
+
}
|
|
590
|
+
}
|
|
464
591
|
})
|
|
465
592
|
// snapshots are unique by name and testCase; when deferred also by widths
|
|
466
593
|
.handle('find', ({
|
|
@@ -509,18 +636,9 @@ export function createDiscoveryQueue(percy) {
|
|
|
509
636
|
disableCache: snapshot.discovery.disableCache,
|
|
510
637
|
allowedHostnames: snapshot.discovery.allowedHostnames,
|
|
511
638
|
disallowedHostnames: snapshot.discovery.disallowedHostnames,
|
|
512
|
-
getResource: (u, width = null) =>
|
|
513
|
-
let resource = snapshot.resources.get(u) || cache.get(u);
|
|
514
|
-
if (resource && Array.isArray(resource) && resource[0].root) {
|
|
515
|
-
const rootResource = resource.find(r => {
|
|
516
|
-
var _r$widths;
|
|
517
|
-
return (_r$widths = r.widths) === null || _r$widths === void 0 ? void 0 : _r$widths.includes(width);
|
|
518
|
-
});
|
|
519
|
-
resource = rootResource || resource[0];
|
|
520
|
-
}
|
|
521
|
-
return resource;
|
|
522
|
-
},
|
|
639
|
+
getResource: (u, width = null) => lookupCacheResource(percy, snapshot.resources, cache, u, width),
|
|
523
640
|
saveResource: r => {
|
|
641
|
+
var _percy$DISK_SPILL_KEY;
|
|
524
642
|
const limitResources = process.env.LIMIT_SNAPSHOT_RESOURCES || false;
|
|
525
643
|
const MAX_RESOURCES = Number(process.env.MAX_SNAPSHOT_RESOURCES) || 749;
|
|
526
644
|
if (limitResources && snapshot.resources.size >= MAX_RESOURCES) {
|
|
@@ -528,8 +646,31 @@ export function createDiscoveryQueue(percy) {
|
|
|
528
646
|
return;
|
|
529
647
|
}
|
|
530
648
|
snapshot.resources.set(r.url, r);
|
|
531
|
-
if (
|
|
649
|
+
if (snapshot.discovery.disableCache) return;
|
|
650
|
+
|
|
651
|
+
// Fresh write supersedes any prior spill — prevents races
|
|
652
|
+
// where getResource could serve a stale disk copy.
|
|
653
|
+
if ((_percy$DISK_SPILL_KEY = percy[DISK_SPILL_KEY]) !== null && _percy$DISK_SPILL_KEY !== void 0 && _percy$DISK_SPILL_KEY.has(r.url)) {
|
|
654
|
+
percy[DISK_SPILL_KEY].delete(r.url);
|
|
655
|
+
}
|
|
656
|
+
if (capBytes != null) {
|
|
657
|
+
// ByteLRU fires onEvict('too-big') for oversize entries;
|
|
658
|
+
// the oversize_skipped stat + debug log live there.
|
|
659
|
+
cache.set(r.url, r, entrySize(r));
|
|
660
|
+
} else {
|
|
661
|
+
// Subtract the prior entry's footprint before overwriting so
|
|
662
|
+
// the byte counter tracks current cache contents rather than
|
|
663
|
+
// cumulative writes. Without this, the same shared CSS saved
|
|
664
|
+
// across N snapshots would inflate unsetModeBytes by N×.
|
|
665
|
+
const stats = percy[CACHE_STATS_KEY];
|
|
666
|
+
const prior = cache.get(r.url);
|
|
667
|
+
if (prior) stats.unsetModeBytes -= entrySize(prior);
|
|
532
668
|
cache.set(r.url, r);
|
|
669
|
+
stats.unsetModeBytes += entrySize(r);
|
|
670
|
+
if (!stats.warningFired && stats.unsetModeBytes >= warnThreshold) {
|
|
671
|
+
stats.warningFired = true;
|
|
672
|
+
percy.log.warn(`Percy cache is using ${(stats.unsetModeBytes / BYTES_PER_MB).toFixed(1)}MB. ` + 'If your CI is memory-constrained, set --max-cache-ram. ' + 'See https://www.browserstack.com/docs/percy/cli/managing-cache-memory');
|
|
673
|
+
}
|
|
533
674
|
}
|
|
534
675
|
}
|
|
535
676
|
}
|
package/dist/percy.js
CHANGED
|
@@ -17,7 +17,7 @@ import Pako from 'pako';
|
|
|
17
17
|
import { base64encode, generatePromise, yieldAll, yieldTo, redactSecrets, detectSystemProxyAndLog, checkSDKVersion, processCorsIframes } from './utils.js';
|
|
18
18
|
import { createPercyServer, createStaticServer } from './api.js';
|
|
19
19
|
import { gatherSnapshots, createSnapshotsQueue, validateSnapshotOptions } from './snapshot.js';
|
|
20
|
-
import { discoverSnapshotResources, createDiscoveryQueue } from './discovery.js';
|
|
20
|
+
import { discoverSnapshotResources, createDiscoveryQueue, RESOURCE_CACHE_KEY, CACHE_STATS_KEY, DISK_SPILL_KEY } from './discovery.js';
|
|
21
21
|
import Monitoring from '@percy/monitoring';
|
|
22
22
|
import { WaitForJob } from './wait-for-job.js';
|
|
23
23
|
const MAX_SUGGESTION_CALLS = 10;
|
|
@@ -429,7 +429,67 @@ export class Percy {
|
|
|
429
429
|
// This issue doesn't comes under regular error logs,
|
|
430
430
|
// it's detected if we just and stop percy server
|
|
431
431
|
await this.checkForNoSnapshotCommandError();
|
|
432
|
+
// sendBuildLogs goes first — it's the primary egress. cache_summary is
|
|
433
|
+
// analytics, ordered after so a slow pager hop cannot delay the logs.
|
|
432
434
|
await this.sendBuildLogs();
|
|
435
|
+
await this.sendCacheSummary();
|
|
436
|
+
}
|
|
437
|
+
}
|
|
438
|
+
|
|
439
|
+
// Single egress point for cache-tier telemetry. Used by sendCacheSummary
|
|
440
|
+
// (awaited at stop) and discovery's fire-and-forget eviction event. Returns
|
|
441
|
+
// early if no build is associated, swallows pager rejections — telemetry
|
|
442
|
+
// loss must never fail a build.
|
|
443
|
+
async sendCacheTelemetry(message, extra) {
|
|
444
|
+
var _this$build2;
|
|
445
|
+
if (!((_this$build2 = this.build) !== null && _this$build2 !== void 0 && _this$build2.id)) return;
|
|
446
|
+
try {
|
|
447
|
+
await this.client.sendBuildEvents(this.build.id, {
|
|
448
|
+
message,
|
|
449
|
+
cliVersion: this.client.cliVersion,
|
|
450
|
+
clientInfo: this.clientInfo,
|
|
451
|
+
extra
|
|
452
|
+
});
|
|
453
|
+
} catch (err) {
|
|
454
|
+
this.log.debug(`${message} telemetry failed`, err);
|
|
455
|
+
}
|
|
456
|
+
}
|
|
457
|
+
|
|
458
|
+
// Cache-usage summary fired at stop. The whole method is wrapped — the
|
|
459
|
+
// contract is "telemetry must never fail percy.stop()", which covers the
|
|
460
|
+
// payload-construction block as well as the egress.
|
|
461
|
+
async sendCacheSummary() {
|
|
462
|
+
try {
|
|
463
|
+
var _stats$finalDiskStats;
|
|
464
|
+
const cache = this[RESOURCE_CACHE_KEY];
|
|
465
|
+
const stats = this[CACHE_STATS_KEY];
|
|
466
|
+
if (!cache || !stats) return;
|
|
467
|
+
const cacheStats = typeof cache.stats === 'object' ? cache.stats : null;
|
|
468
|
+
// diskStore is destroyed by discovery 'end' before this runs, so fall
|
|
469
|
+
// back to the snapshot captured in stats.finalDiskStats.
|
|
470
|
+
const diskStore = this[DISK_SPILL_KEY];
|
|
471
|
+
const diskSnap = (diskStore === null || diskStore === void 0 ? void 0 : diskStore.stats) ?? stats.finalDiskStats;
|
|
472
|
+
const diskReady = diskStore ? diskStore.ready : !!((_stats$finalDiskStats = stats.finalDiskStats) !== null && _stats$finalDiskStats !== void 0 && _stats$finalDiskStats.ready);
|
|
473
|
+
await this.sendCacheTelemetry('cache_summary', {
|
|
474
|
+
cache_budget_ram_mb: stats.effectiveMaxCacheRamMB,
|
|
475
|
+
hits: (cacheStats === null || cacheStats === void 0 ? void 0 : cacheStats.hits) ?? 0,
|
|
476
|
+
misses: (cacheStats === null || cacheStats === void 0 ? void 0 : cacheStats.misses) ?? 0,
|
|
477
|
+
evictions: (cacheStats === null || cacheStats === void 0 ? void 0 : cacheStats.evictions) ?? 0,
|
|
478
|
+
peak_bytes: (cacheStats === null || cacheStats === void 0 ? void 0 : cacheStats.peakBytes) ?? stats.unsetModeBytes,
|
|
479
|
+
final_bytes: cache.calculatedSize ?? stats.unsetModeBytes,
|
|
480
|
+
entry_count: cache.size ?? 0,
|
|
481
|
+
oversize_skipped: stats.oversizeSkipped,
|
|
482
|
+
disk_spill_enabled: diskReady,
|
|
483
|
+
disk_spilled_count: (diskSnap === null || diskSnap === void 0 ? void 0 : diskSnap.spilled) ?? 0,
|
|
484
|
+
disk_restored_count: (diskSnap === null || diskSnap === void 0 ? void 0 : diskSnap.restored) ?? 0,
|
|
485
|
+
disk_spill_failures: (diskSnap === null || diskSnap === void 0 ? void 0 : diskSnap.spillFailures) ?? 0,
|
|
486
|
+
disk_read_failures: (diskSnap === null || diskSnap === void 0 ? void 0 : diskSnap.readFailures) ?? 0,
|
|
487
|
+
disk_peak_bytes: (diskSnap === null || diskSnap === void 0 ? void 0 : diskSnap.peakBytes) ?? 0,
|
|
488
|
+
disk_final_bytes: (diskSnap === null || diskSnap === void 0 ? void 0 : diskSnap.currentBytes) ?? 0,
|
|
489
|
+
disk_final_entries: (diskSnap === null || diskSnap === void 0 ? void 0 : diskSnap.entries) ?? 0
|
|
490
|
+
});
|
|
491
|
+
} catch (err) {
|
|
492
|
+
this.log.debug('cache_summary build failed', err);
|
|
433
493
|
}
|
|
434
494
|
}
|
|
435
495
|
checkAndUpdateConcurrency() {
|
|
@@ -487,10 +547,10 @@ export class Percy {
|
|
|
487
547
|
// snapshots. Once asset discovery has completed for the provided snapshots, the queued task will
|
|
488
548
|
// resolve and an upload task will be queued separately.
|
|
489
549
|
snapshot(options, snapshotPromise = {}) {
|
|
490
|
-
var _this$
|
|
550
|
+
var _this$build3;
|
|
491
551
|
if (this.readyState !== 1) {
|
|
492
552
|
throw new Error('Not running');
|
|
493
|
-
} else if ((_this$
|
|
553
|
+
} else if ((_this$build3 = this.build) !== null && _this$build3 !== void 0 && _this$build3.error) {
|
|
494
554
|
throw new Error(this.build.error);
|
|
495
555
|
} else if (Array.isArray(options)) {
|
|
496
556
|
return yieldAll(options.map(o => this.yield.snapshot(o, snapshotPromise)));
|
|
@@ -720,7 +780,7 @@ export class Percy {
|
|
|
720
780
|
async sendBuildLogs() {
|
|
721
781
|
if (!process.env.PERCY_TOKEN) return;
|
|
722
782
|
try {
|
|
723
|
-
var _this$
|
|
783
|
+
var _this$build4, _this$build5, _this$build6, _this$build7;
|
|
724
784
|
const logsObject = {
|
|
725
785
|
clilogs: logger.query(log => !['ci'].includes(log.debug))
|
|
726
786
|
};
|
|
@@ -732,10 +792,10 @@ export class Percy {
|
|
|
732
792
|
logsObject.cilogs = redactedContent;
|
|
733
793
|
}
|
|
734
794
|
const content = base64encode(Pako.gzip(JSON.stringify(logsObject)));
|
|
735
|
-
const referenceId = (_this$
|
|
795
|
+
const referenceId = (_this$build4 = this.build) !== null && _this$build4 !== void 0 && _this$build4.id ? `build_${(_this$build5 = this.build) === null || _this$build5 === void 0 ? void 0 : _this$build5.id}` : (_this$build6 = this.build) === null || _this$build6 === void 0 ? void 0 : _this$build6.id;
|
|
736
796
|
const eventObject = {
|
|
737
797
|
content: content,
|
|
738
|
-
build_id: (_this$
|
|
798
|
+
build_id: (_this$build7 = this.build) === null || _this$build7 === void 0 ? void 0 : _this$build7.id,
|
|
739
799
|
reference_id: referenceId,
|
|
740
800
|
service_name: 'cli',
|
|
741
801
|
base64encoded: true
|
|
@@ -808,9 +868,9 @@ export class Percy {
|
|
|
808
868
|
const newAllowedDomains = Array.from(processedHosts).filter(domain => !autoConfiguredHosts.has(domain));
|
|
809
869
|
const hasNewDomains = newAllowedDomains.length > 0 || newErrorHosts.size > 0;
|
|
810
870
|
try {
|
|
811
|
-
var _this$
|
|
871
|
+
var _this$build8;
|
|
812
872
|
await this.client.updateProjectDomainConfig({
|
|
813
|
-
buildId: (_this$
|
|
873
|
+
buildId: (_this$build8 = this.build) === null || _this$build8 === void 0 ? void 0 : _this$build8.id,
|
|
814
874
|
allowedDomains: Array.from(processedHosts),
|
|
815
875
|
errorDomains: Array.from(newErrorHosts)
|
|
816
876
|
});
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@percy/core",
|
|
3
|
-
"version": "1.31.14-beta.
|
|
3
|
+
"version": "1.31.14-beta.1",
|
|
4
4
|
"license": "MIT",
|
|
5
5
|
"repository": {
|
|
6
6
|
"type": "git",
|
|
@@ -43,12 +43,12 @@
|
|
|
43
43
|
"test:types": "tsd"
|
|
44
44
|
},
|
|
45
45
|
"dependencies": {
|
|
46
|
-
"@percy/client": "1.31.14-beta.
|
|
47
|
-
"@percy/config": "1.31.14-beta.
|
|
48
|
-
"@percy/dom": "1.31.14-beta.
|
|
49
|
-
"@percy/logger": "1.31.14-beta.
|
|
50
|
-
"@percy/monitoring": "1.31.14-beta.
|
|
51
|
-
"@percy/webdriver-utils": "1.31.14-beta.
|
|
46
|
+
"@percy/client": "1.31.14-beta.1",
|
|
47
|
+
"@percy/config": "1.31.14-beta.1",
|
|
48
|
+
"@percy/dom": "1.31.14-beta.1",
|
|
49
|
+
"@percy/logger": "1.31.14-beta.1",
|
|
50
|
+
"@percy/monitoring": "1.31.14-beta.1",
|
|
51
|
+
"@percy/webdriver-utils": "1.31.14-beta.1",
|
|
52
52
|
"content-disposition": "^0.5.4",
|
|
53
53
|
"cross-spawn": "^7.0.3",
|
|
54
54
|
"extract-zip": "^2.0.1",
|
|
@@ -62,7 +62,7 @@
|
|
|
62
62
|
"yaml": "^2.4.1"
|
|
63
63
|
},
|
|
64
64
|
"optionalDependencies": {
|
|
65
|
-
"@percy/cli-doctor": "1.31.14-beta.
|
|
65
|
+
"@percy/cli-doctor": "1.31.14-beta.1"
|
|
66
66
|
},
|
|
67
|
-
"gitHead": "
|
|
67
|
+
"gitHead": "dd6957822cc94d460fd9c043a44cc4c9c5fcba23"
|
|
68
68
|
}
|