@percy/core 1.31.14-beta.0 → 1.31.14-beta.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -0
- package/dist/api.js +2 -2
- package/dist/archive.js +117 -0
- package/dist/cache/byte-lru.js +352 -0
- package/dist/config.js +10 -0
- package/dist/discovery.js +170 -29
- package/dist/percy.js +117 -9
- package/package.json +10 -9
package/README.md
CHANGED
|
@@ -53,6 +53,7 @@ The following options can also be defined within a Percy config file
|
|
|
53
53
|
- `requestHeaders` — Request headers used when discovering snapshot assets
|
|
54
54
|
- `authorization` — Basic auth `username` and `password` for protected snapshot assets
|
|
55
55
|
- `disableCache` — Disable asset caching (**default** `false`)
|
|
56
|
+
- `maxCacheRam` — Cap the asset-discovery cache at this many MB (**default** unset/unbounded). When set, least-recently-used resources are evicted to stay within the cap. MB is decimal (1 MB = 1,000,000 bytes), not binary MiB (1,048,576). The cap measures cache body bytes only; process RSS is typically 1.5–2× the cap due to Node's Buffer slab allocator. Values below 25 MB are clamped to 25 MB with a warn log (the per-resource ceiling is 25 MB, so smaller caps would reject every resource). Also settable via the `--max-cache-ram <MB>` CLI flag or the `PERCY_MAX_CACHE_RAM` env var
|
|
56
57
|
- `userAgent` — Custom user-agent string used when requesting assets
|
|
57
58
|
- `cookies` — Browser cookies to use when requesting assets
|
|
58
59
|
- `networkIdleTimeout` — Milliseconds to wait for the network to idle (**default** `100`)
|
package/dist/api.js
CHANGED
|
@@ -259,7 +259,7 @@ export function createPercyServer(percy, port) {
|
|
|
259
259
|
if (cmd === 'reset') {
|
|
260
260
|
// the reset command will reset testing mode and clear any logs
|
|
261
261
|
percy.testing = {};
|
|
262
|
-
logger.instance.
|
|
262
|
+
logger.instance.reset();
|
|
263
263
|
} else if (cmd === 'version') {
|
|
264
264
|
// the version command will update the api version header for testing
|
|
265
265
|
percy.testing.version = body;
|
|
@@ -304,7 +304,7 @@ export function createPercyServer(percy, port) {
|
|
|
304
304
|
}))
|
|
305
305
|
// returns an array of raw logs from the logger
|
|
306
306
|
.route('get', '/test/logs', (req, res) => res.json(200, {
|
|
307
|
-
logs:
|
|
307
|
+
logs: logger.instance.query(() => true)
|
|
308
308
|
}))
|
|
309
309
|
// serves a very basic html page for testing snapshots
|
|
310
310
|
.route('get', '/test/snapshot', (req, res) => {
|
package/dist/archive.js
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
import fs from 'fs';
|
|
2
|
+
import path from 'path';
|
|
3
|
+
import crypto from 'crypto';
|
|
4
|
+
const ARCHIVE_VERSION = 1;
|
|
5
|
+
const MAX_FILENAME_LENGTH = 200;
|
|
6
|
+
const UNSAFE_CHARS = /[/\\:*?"<>|]/g;
|
|
7
|
+
|
|
8
|
+
// Validates the archive dir to prevent path traversal attacks.
|
|
9
|
+
// Returns the resolved absolute path.
|
|
10
|
+
export function validateArchiveDir(archiveDir) {
|
|
11
|
+
let resolved = path.resolve(archiveDir);
|
|
12
|
+
let normalized = path.normalize(resolved);
|
|
13
|
+
|
|
14
|
+
// Reject if the normalized path still contains '..' segments
|
|
15
|
+
if (normalized.split(path.sep).includes('..')) {
|
|
16
|
+
throw new Error(`Invalid archive dir: path traversal detected in "${archiveDir}"`);
|
|
17
|
+
}
|
|
18
|
+
return resolved;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
// Sanitizes a snapshot name into a safe filename.
|
|
22
|
+
// Strips unsafe characters and appends a hash to prevent collisions.
|
|
23
|
+
export function sanitizeFilename(name) {
|
|
24
|
+
let safe = name.replace(UNSAFE_CHARS, '_');
|
|
25
|
+
if (safe.length > MAX_FILENAME_LENGTH) {
|
|
26
|
+
safe = safe.substring(0, MAX_FILENAME_LENGTH);
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
// Append a short hash of the original name for collision prevention
|
|
30
|
+
let hash = crypto.createHash('sha256').update(name).digest('hex').substring(0, 8);
|
|
31
|
+
return `${safe}-${hash}`;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
// Serializes a snapshot into a JSON-safe object for archiving.
|
|
35
|
+
// Resources have their binary content base64-encoded.
|
|
36
|
+
export function serializeSnapshot(snapshot) {
|
|
37
|
+
let {
|
|
38
|
+
resources,
|
|
39
|
+
...snapshotData
|
|
40
|
+
} = snapshot;
|
|
41
|
+
return {
|
|
42
|
+
version: ARCHIVE_VERSION,
|
|
43
|
+
snapshot: snapshotData,
|
|
44
|
+
resources: (resources || []).map(r => ({
|
|
45
|
+
...r,
|
|
46
|
+
content: r.content ? Buffer.from(r.content).toString('base64') : null
|
|
47
|
+
}))
|
|
48
|
+
};
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
// Validates and deserializes an archived snapshot from parsed JSON.
|
|
52
|
+
// Decodes base64 resource content back to Buffers.
|
|
53
|
+
export function deserializeSnapshot(data) {
|
|
54
|
+
if (!data || typeof data !== 'object') {
|
|
55
|
+
throw new Error('Invalid archive: expected an object');
|
|
56
|
+
}
|
|
57
|
+
if (data.version !== ARCHIVE_VERSION) {
|
|
58
|
+
throw new Error(`Unsupported archive version: ${data.version} (expected ${ARCHIVE_VERSION})`);
|
|
59
|
+
}
|
|
60
|
+
if (!data.snapshot || typeof data.snapshot.name !== 'string' || !data.snapshot.name) {
|
|
61
|
+
throw new Error('Invalid archive: missing snapshot name');
|
|
62
|
+
}
|
|
63
|
+
if (!Array.isArray(data.resources) || data.resources.length === 0) {
|
|
64
|
+
throw new Error('Invalid archive: missing or empty resources');
|
|
65
|
+
}
|
|
66
|
+
return {
|
|
67
|
+
...data.snapshot,
|
|
68
|
+
resources: data.resources.map(r => ({
|
|
69
|
+
...r,
|
|
70
|
+
content: r.content ? Buffer.from(r.content, 'base64') : null
|
|
71
|
+
}))
|
|
72
|
+
};
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
// Archives a single snapshot to the archive directory.
|
|
76
|
+
// Creates the directory if it doesn't exist.
|
|
77
|
+
export function archiveSnapshot(archiveDir, snapshot) {
|
|
78
|
+
fs.mkdirSync(archiveDir, {
|
|
79
|
+
recursive: true
|
|
80
|
+
});
|
|
81
|
+
let filename = sanitizeFilename(snapshot.name);
|
|
82
|
+
let filepath = path.join(archiveDir, `${filename}.json`);
|
|
83
|
+
let serialized = serializeSnapshot(snapshot);
|
|
84
|
+
fs.writeFileSync(filepath, JSON.stringify(serialized));
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
// Reads all archived snapshots from the given directory.
|
|
88
|
+
// Skips symlinks and invalid files with warnings.
|
|
89
|
+
export function readArchivedSnapshots(archiveDir, log) {
|
|
90
|
+
let resolved = validateArchiveDir(archiveDir);
|
|
91
|
+
if (!fs.existsSync(resolved) || !fs.lstatSync(resolved).isDirectory()) {
|
|
92
|
+
throw new Error(`Archive directory not found: ${archiveDir}`);
|
|
93
|
+
}
|
|
94
|
+
let entries = fs.readdirSync(resolved);
|
|
95
|
+
let snapshots = [];
|
|
96
|
+
for (let entry of entries) {
|
|
97
|
+
if (!entry.endsWith('.json')) continue;
|
|
98
|
+
let filepath = path.join(resolved, entry);
|
|
99
|
+
let stat = fs.lstatSync(filepath);
|
|
100
|
+
|
|
101
|
+
// Skip symlinks for security
|
|
102
|
+
if (stat.isSymbolicLink()) {
|
|
103
|
+
log === null || log === void 0 || log.warn(`Skipping symlink: ${entry}`);
|
|
104
|
+
continue;
|
|
105
|
+
}
|
|
106
|
+
if (!stat.isFile()) continue;
|
|
107
|
+
try {
|
|
108
|
+
let raw = fs.readFileSync(filepath, 'utf-8');
|
|
109
|
+
let data = JSON.parse(raw);
|
|
110
|
+
let snapshot = deserializeSnapshot(data);
|
|
111
|
+
snapshots.push(snapshot);
|
|
112
|
+
} catch (error) {
|
|
113
|
+
log === null || log === void 0 || log.warn(`Skipping invalid archive file "${entry}": ${error.message}`);
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
return snapshots;
|
|
117
|
+
}
|
|
@@ -0,0 +1,352 @@
|
|
|
1
|
+
function _classPrivateMethodInitSpec(e, a) { _checkPrivateRedeclaration(e, a), a.add(e); }
|
|
2
|
+
function _classPrivateFieldInitSpec(e, t, a) { _checkPrivateRedeclaration(e, t), t.set(e, a); }
|
|
3
|
+
function _checkPrivateRedeclaration(e, t) { if (t.has(e)) throw new TypeError("Cannot initialize the same private elements twice on an object"); }
|
|
4
|
+
function _classPrivateFieldGet(s, a) { return s.get(_assertClassBrand(s, a)); }
|
|
5
|
+
function _classPrivateFieldSet(s, a, r) { return s.set(_assertClassBrand(s, a), r), r; }
|
|
6
|
+
function _assertClassBrand(e, t, n) { if ("function" == typeof e ? e === t : e.has(t)) return arguments.length < 3 ? t : n; throw new TypeError("Private element is not present on this object"); }
|
|
7
|
+
// Two-tier cache used by asset discovery:
|
|
8
|
+
// ByteLRU — byte-budget in-memory LRU; Map insertion order = LRU order.
|
|
9
|
+
// DiskSpillStore — on-disk overflow tier. RAM evictions spill here; lookups
|
|
10
|
+
// fall back to disk before refetching from origin.
|
|
11
|
+
// All operations are synchronous; callers (network intercept, ByteLRU.set)
|
|
12
|
+
// cannot yield to the event loop mid-op. Per-entry size is capped at 25MB
|
|
13
|
+
// upstream so disk I/O latency is bounded.
|
|
14
|
+
|
|
15
|
+
import fs from 'fs';
|
|
16
|
+
import os from 'os';
|
|
17
|
+
import path from 'path';
|
|
18
|
+
import crypto from 'crypto';
|
|
19
|
+
const DEFAULT_PER_ENTRY_OVERHEAD = 512;
|
|
20
|
+
export class ByteLRU {
|
|
21
|
+
#map = new Map();
|
|
22
|
+
#bytes = 0;
|
|
23
|
+
#max;
|
|
24
|
+
#stats = {
|
|
25
|
+
hits: 0,
|
|
26
|
+
misses: 0,
|
|
27
|
+
evictions: 0,
|
|
28
|
+
peakBytes: 0
|
|
29
|
+
};
|
|
30
|
+
onEvict;
|
|
31
|
+
constructor(maxBytes, {
|
|
32
|
+
onEvict
|
|
33
|
+
} = {}) {
|
|
34
|
+
this.#max = maxBytes;
|
|
35
|
+
this.onEvict = onEvict;
|
|
36
|
+
}
|
|
37
|
+
get(key) {
|
|
38
|
+
if (!this.#map.has(key)) {
|
|
39
|
+
this.#stats.misses++;
|
|
40
|
+
return undefined;
|
|
41
|
+
}
|
|
42
|
+
const rec = this.#map.get(key);
|
|
43
|
+
this.#map.delete(key);
|
|
44
|
+
this.#map.set(key, rec);
|
|
45
|
+
this.#stats.hits++;
|
|
46
|
+
return rec.value;
|
|
47
|
+
}
|
|
48
|
+
set(key, value, size) {
|
|
49
|
+
if (!Number.isFinite(size) || size < 0) return false;
|
|
50
|
+
|
|
51
|
+
// Reject oversize BEFORE touching any existing entry — a failed set on an
|
|
52
|
+
// existing key must not evict the prior (valid) entry.
|
|
53
|
+
if (this.#max !== undefined && size > this.#max) {
|
|
54
|
+
if (this.onEvict) this.onEvict(key, 'too-big', value);
|
|
55
|
+
return false;
|
|
56
|
+
}
|
|
57
|
+
if (this.#map.has(key)) {
|
|
58
|
+
this.#bytes -= this.#map.get(key).size;
|
|
59
|
+
this.#map.delete(key);
|
|
60
|
+
}
|
|
61
|
+
this.#map.set(key, {
|
|
62
|
+
value,
|
|
63
|
+
size
|
|
64
|
+
});
|
|
65
|
+
this.#bytes += size;
|
|
66
|
+
if (this.#bytes > this.#stats.peakBytes) this.#stats.peakBytes = this.#bytes;
|
|
67
|
+
while (this.#max !== undefined && this.#bytes > this.#max) {
|
|
68
|
+
const oldestKey = this.#map.keys().next().value;
|
|
69
|
+
const rec = this.#map.get(oldestKey);
|
|
70
|
+
this.#bytes -= rec.size;
|
|
71
|
+
this.#map.delete(oldestKey);
|
|
72
|
+
this.#stats.evictions++;
|
|
73
|
+
if (this.onEvict) this.onEvict(oldestKey, 'lru', rec.value);
|
|
74
|
+
}
|
|
75
|
+
return true;
|
|
76
|
+
}
|
|
77
|
+
has(key) {
|
|
78
|
+
return this.#map.has(key);
|
|
79
|
+
}
|
|
80
|
+
delete(key) {
|
|
81
|
+
if (!this.#map.has(key)) return false;
|
|
82
|
+
this.#bytes -= this.#map.get(key).size;
|
|
83
|
+
return this.#map.delete(key);
|
|
84
|
+
}
|
|
85
|
+
clear() {
|
|
86
|
+
this.#map.clear();
|
|
87
|
+
this.#bytes = 0;
|
|
88
|
+
}
|
|
89
|
+
get size() {
|
|
90
|
+
return this.#map.size;
|
|
91
|
+
}
|
|
92
|
+
get calculatedSize() {
|
|
93
|
+
return this.#bytes;
|
|
94
|
+
}
|
|
95
|
+
get stats() {
|
|
96
|
+
return {
|
|
97
|
+
...this.#stats,
|
|
98
|
+
currentBytes: this.#bytes
|
|
99
|
+
};
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
// Returns the byte length of a resource's content. Buffer.byteLength is used
|
|
104
|
+
// for strings so that multi-byte UTF-8 (CJK, emoji) is counted in bytes, not
|
|
105
|
+
// JS string units, otherwise the cache budget can drift past its cap.
|
|
106
|
+
function contentBytes(content) {
|
|
107
|
+
if (content == null) return 0;
|
|
108
|
+
if (Buffer.isBuffer(content)) return content.length;
|
|
109
|
+
if (typeof content === 'string') return Buffer.byteLength(content);
|
|
110
|
+
return content.length ?? 0;
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
// Handles the two Percy cache-entry shapes: single resource, or array of
|
|
114
|
+
// roots captured at multiple widths (see discovery.js parseDomResources).
|
|
115
|
+
export function entrySize(resource, overhead = DEFAULT_PER_ENTRY_OVERHEAD) {
|
|
116
|
+
if (Array.isArray(resource)) {
|
|
117
|
+
return resource.reduce((n, r) => n + contentBytes(r === null || r === void 0 ? void 0 : r.content) + overhead, 0);
|
|
118
|
+
}
|
|
119
|
+
return contentBytes(resource === null || resource === void 0 ? void 0 : resource.content) + overhead;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
// Multi-width root arrays carry per-element binary content. Encode buffers as
|
|
123
|
+
// base64 inside JSON so the whole array survives a disk roundtrip; null and
|
|
124
|
+
// string content pass through as themselves.
|
|
125
|
+
function encodeArrayElement(r) {
|
|
126
|
+
if (!r) return r;
|
|
127
|
+
const {
|
|
128
|
+
content,
|
|
129
|
+
...rest
|
|
130
|
+
} = r;
|
|
131
|
+
if (content == null) return {
|
|
132
|
+
...rest,
|
|
133
|
+
content: null
|
|
134
|
+
};
|
|
135
|
+
if (Buffer.isBuffer(content)) return {
|
|
136
|
+
...rest,
|
|
137
|
+
content: {
|
|
138
|
+
__buf: content.toString('base64')
|
|
139
|
+
}
|
|
140
|
+
};
|
|
141
|
+
return {
|
|
142
|
+
...rest,
|
|
143
|
+
content: String(content)
|
|
144
|
+
};
|
|
145
|
+
}
|
|
146
|
+
function decodeArrayElement(r) {
|
|
147
|
+
if (!r) return r;
|
|
148
|
+
const {
|
|
149
|
+
content,
|
|
150
|
+
...rest
|
|
151
|
+
} = r;
|
|
152
|
+
if (content && typeof content === 'object' && '__buf' in content) {
|
|
153
|
+
return {
|
|
154
|
+
...rest,
|
|
155
|
+
content: Buffer.from(content.__buf, 'base64')
|
|
156
|
+
};
|
|
157
|
+
}
|
|
158
|
+
return {
|
|
159
|
+
...rest,
|
|
160
|
+
content
|
|
161
|
+
};
|
|
162
|
+
}
|
|
163
|
+
var _index = /*#__PURE__*/new WeakMap();
|
|
164
|
+
var _bytes = /*#__PURE__*/new WeakMap();
|
|
165
|
+
var _peakBytes = /*#__PURE__*/new WeakMap();
|
|
166
|
+
var _stats = /*#__PURE__*/new WeakMap();
|
|
167
|
+
var _counter = /*#__PURE__*/new WeakMap();
|
|
168
|
+
var _ready = /*#__PURE__*/new WeakMap();
|
|
169
|
+
var _DiskSpillStore_brand = /*#__PURE__*/new WeakSet();
|
|
170
|
+
export class DiskSpillStore {
|
|
171
|
+
constructor(dir, {
|
|
172
|
+
log
|
|
173
|
+
} = {}) {
|
|
174
|
+
_classPrivateMethodInitSpec(this, _DiskSpillStore_brand);
|
|
175
|
+
_classPrivateFieldInitSpec(this, _index, new Map());
|
|
176
|
+
_classPrivateFieldInitSpec(this, _bytes, 0);
|
|
177
|
+
_classPrivateFieldInitSpec(this, _peakBytes, 0);
|
|
178
|
+
_classPrivateFieldInitSpec(this, _stats, {
|
|
179
|
+
spilled: 0,
|
|
180
|
+
restored: 0,
|
|
181
|
+
spillFailures: 0,
|
|
182
|
+
readFailures: 0
|
|
183
|
+
});
|
|
184
|
+
_classPrivateFieldInitSpec(this, _counter, 0);
|
|
185
|
+
_classPrivateFieldInitSpec(this, _ready, false);
|
|
186
|
+
this.dir = dir;
|
|
187
|
+
this.log = log;
|
|
188
|
+
try {
|
|
189
|
+
// mode 0o700: spilled bytes are origin-fetchable so the threat model is
|
|
190
|
+
// small, but on shared-tenant CI hosts other users on the same box
|
|
191
|
+
// shouldn't be able to read them.
|
|
192
|
+
fs.mkdirSync(dir, {
|
|
193
|
+
recursive: true,
|
|
194
|
+
mode: 0o700
|
|
195
|
+
});
|
|
196
|
+
_classPrivateFieldSet(_ready, this, true);
|
|
197
|
+
} catch (err) {
|
|
198
|
+
var _this$log, _this$log$debug;
|
|
199
|
+
(_this$log = this.log) === null || _this$log === void 0 || (_this$log$debug = _this$log.debug) === null || _this$log$debug === void 0 || _this$log$debug.call(_this$log, `disk-spill init failed for ${dir}: ${err.message}`);
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
// Returns true on success; false on any failure so caller falls back to drop.
|
|
204
|
+
// Overwrites prior spill for the same URL — a fresh discovery write wins.
|
|
205
|
+
// Two resource shapes are supported: a single resource with a binary
|
|
206
|
+
// .content, and a multi-width root array (see entrySize for the array
|
|
207
|
+
// shape). Arrays are JSON-encoded with base64 buffers so the whole array
|
|
208
|
+
// survives the disk roundtrip.
|
|
209
|
+
set(url, resource) {
|
|
210
|
+
var _this$counter;
|
|
211
|
+
if (!_classPrivateFieldGet(_ready, this)) return false;
|
|
212
|
+
let bytes;
|
|
213
|
+
let meta;
|
|
214
|
+
let isArray = false;
|
|
215
|
+
if (Array.isArray(resource)) {
|
|
216
|
+
isArray = true;
|
|
217
|
+
try {
|
|
218
|
+
bytes = Buffer.from(JSON.stringify(resource.map(encodeArrayElement)));
|
|
219
|
+
} catch {
|
|
220
|
+
return false;
|
|
221
|
+
}
|
|
222
|
+
} else {
|
|
223
|
+
let content = resource === null || resource === void 0 ? void 0 : resource.content;
|
|
224
|
+
if (content == null) return false;
|
|
225
|
+
if (!Buffer.isBuffer(content)) {
|
|
226
|
+
try {
|
|
227
|
+
content = Buffer.from(content);
|
|
228
|
+
} catch {
|
|
229
|
+
return false;
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
bytes = content;
|
|
233
|
+
meta = {
|
|
234
|
+
...resource
|
|
235
|
+
};
|
|
236
|
+
delete meta.content;
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
// Counter-based filename keeps URL-derived data out of path.join —
|
|
240
|
+
// avoids any path-traversal surface even though sha256 would be safe.
|
|
241
|
+
const filepath = path.join(this.dir, String(_classPrivateFieldSet(_counter, this, (_this$counter = _classPrivateFieldGet(_counter, this), ++_this$counter))));
|
|
242
|
+
try {
|
|
243
|
+
fs.writeFileSync(filepath, bytes);
|
|
244
|
+
} catch (err) {
|
|
245
|
+
var _this$log2, _this$log2$debug;
|
|
246
|
+
_classPrivateFieldGet(_stats, this).spillFailures++;
|
|
247
|
+
(_this$log2 = this.log) === null || _this$log2 === void 0 || (_this$log2$debug = _this$log2.debug) === null || _this$log2$debug === void 0 || _this$log2$debug.call(_this$log2, `disk-spill write failed for ${url}: ${err.message}`);
|
|
248
|
+
return false;
|
|
249
|
+
}
|
|
250
|
+
if (_classPrivateFieldGet(_index, this).has(url)) {
|
|
251
|
+
const prev = _classPrivateFieldGet(_index, this).get(url);
|
|
252
|
+
_classPrivateFieldSet(_bytes, this, _classPrivateFieldGet(_bytes, this) - prev.size);
|
|
253
|
+
try {
|
|
254
|
+
fs.unlinkSync(prev.path);
|
|
255
|
+
} catch {/* best-effort */}
|
|
256
|
+
}
|
|
257
|
+
_classPrivateFieldGet(_index, this).set(url, {
|
|
258
|
+
path: filepath,
|
|
259
|
+
size: bytes.length,
|
|
260
|
+
isArray,
|
|
261
|
+
meta
|
|
262
|
+
});
|
|
263
|
+
_classPrivateFieldSet(_bytes, this, _classPrivateFieldGet(_bytes, this) + bytes.length);
|
|
264
|
+
if (_classPrivateFieldGet(_bytes, this) > _classPrivateFieldGet(_peakBytes, this)) _classPrivateFieldSet(_peakBytes, this, _classPrivateFieldGet(_bytes, this));
|
|
265
|
+
_classPrivateFieldGet(_stats, this).spilled++;
|
|
266
|
+
return true;
|
|
267
|
+
}
|
|
268
|
+
get(url) {
|
|
269
|
+
const entry = _classPrivateFieldGet(_index, this).get(url);
|
|
270
|
+
if (!entry) return undefined;
|
|
271
|
+
let raw;
|
|
272
|
+
try {
|
|
273
|
+
raw = fs.readFileSync(entry.path);
|
|
274
|
+
} catch (err) {
|
|
275
|
+
var _this$log3, _this$log3$debug;
|
|
276
|
+
_classPrivateFieldGet(_stats, this).readFailures++;
|
|
277
|
+
(_this$log3 = this.log) === null || _this$log3 === void 0 || (_this$log3$debug = _this$log3.debug) === null || _this$log3$debug === void 0 || _this$log3$debug.call(_this$log3, `disk-spill read failed for ${url}: ${err.message}`);
|
|
278
|
+
_assertClassBrand(_DiskSpillStore_brand, this, _removeEntry).call(this, url, entry);
|
|
279
|
+
return undefined;
|
|
280
|
+
}
|
|
281
|
+
if (entry.isArray) {
|
|
282
|
+
let arr;
|
|
283
|
+
try {
|
|
284
|
+
arr = JSON.parse(raw.toString('utf8')).map(decodeArrayElement);
|
|
285
|
+
} catch (err) {
|
|
286
|
+
var _this$log4, _this$log4$debug;
|
|
287
|
+
_classPrivateFieldGet(_stats, this).readFailures++;
|
|
288
|
+
(_this$log4 = this.log) === null || _this$log4 === void 0 || (_this$log4$debug = _this$log4.debug) === null || _this$log4$debug === void 0 || _this$log4$debug.call(_this$log4, `disk-spill array-decode failed for ${url}: ${err.message}`);
|
|
289
|
+
_assertClassBrand(_DiskSpillStore_brand, this, _removeEntry).call(this, url, entry);
|
|
290
|
+
return undefined;
|
|
291
|
+
}
|
|
292
|
+
_classPrivateFieldGet(_stats, this).restored++;
|
|
293
|
+
return arr;
|
|
294
|
+
}
|
|
295
|
+
_classPrivateFieldGet(_stats, this).restored++;
|
|
296
|
+
return {
|
|
297
|
+
...entry.meta,
|
|
298
|
+
content: raw
|
|
299
|
+
};
|
|
300
|
+
}
|
|
301
|
+
has(url) {
|
|
302
|
+
return _classPrivateFieldGet(_index, this).has(url);
|
|
303
|
+
}
|
|
304
|
+
delete(url) {
|
|
305
|
+
const entry = _classPrivateFieldGet(_index, this).get(url);
|
|
306
|
+
if (!entry) return false;
|
|
307
|
+
_assertClassBrand(_DiskSpillStore_brand, this, _removeEntry).call(this, url, entry);
|
|
308
|
+
return true;
|
|
309
|
+
}
|
|
310
|
+
destroy() {
|
|
311
|
+
try {
|
|
312
|
+
if (_classPrivateFieldGet(_ready, this)) fs.rmSync(this.dir, {
|
|
313
|
+
recursive: true,
|
|
314
|
+
force: true
|
|
315
|
+
});
|
|
316
|
+
} catch (err) {
|
|
317
|
+
var _this$log5, _this$log5$debug;
|
|
318
|
+
(_this$log5 = this.log) === null || _this$log5 === void 0 || (_this$log5$debug = _this$log5.debug) === null || _this$log5$debug === void 0 || _this$log5$debug.call(_this$log5, `disk-spill cleanup failed for ${this.dir}: ${err.message}`);
|
|
319
|
+
}
|
|
320
|
+
_classPrivateFieldGet(_index, this).clear();
|
|
321
|
+
_classPrivateFieldSet(_bytes, this, 0);
|
|
322
|
+
_classPrivateFieldSet(_ready, this, false);
|
|
323
|
+
}
|
|
324
|
+
get size() {
|
|
325
|
+
return _classPrivateFieldGet(_index, this).size;
|
|
326
|
+
}
|
|
327
|
+
get bytes() {
|
|
328
|
+
return _classPrivateFieldGet(_bytes, this);
|
|
329
|
+
}
|
|
330
|
+
get ready() {
|
|
331
|
+
return _classPrivateFieldGet(_ready, this);
|
|
332
|
+
}
|
|
333
|
+
get stats() {
|
|
334
|
+
return {
|
|
335
|
+
..._classPrivateFieldGet(_stats, this),
|
|
336
|
+
currentBytes: _classPrivateFieldGet(_bytes, this),
|
|
337
|
+
peakBytes: _classPrivateFieldGet(_peakBytes, this),
|
|
338
|
+
entries: _classPrivateFieldGet(_index, this).size
|
|
339
|
+
};
|
|
340
|
+
}
|
|
341
|
+
}
|
|
342
|
+
function _removeEntry(url, entry) {
|
|
343
|
+
_classPrivateFieldSet(_bytes, this, _classPrivateFieldGet(_bytes, this) - entry.size);
|
|
344
|
+
_classPrivateFieldGet(_index, this).delete(url);
|
|
345
|
+
try {
|
|
346
|
+
fs.unlinkSync(entry.path);
|
|
347
|
+
} catch {/* best-effort */}
|
|
348
|
+
}
|
|
349
|
+
export function createSpillDir() {
|
|
350
|
+
const suffix = `${process.pid}-${crypto.randomBytes(4).toString('hex')}`;
|
|
351
|
+
return path.join(os.tmpdir(), `percy-cache-${suffix}`);
|
|
352
|
+
}
|
package/dist/config.js
CHANGED
|
@@ -7,6 +7,9 @@ export const configSchema = {
|
|
|
7
7
|
deferUploads: {
|
|
8
8
|
type: 'boolean'
|
|
9
9
|
},
|
|
10
|
+
archiveDir: {
|
|
11
|
+
type: 'string'
|
|
12
|
+
},
|
|
10
13
|
useSystemProxy: {
|
|
11
14
|
type: 'boolean',
|
|
12
15
|
default: false
|
|
@@ -422,6 +425,13 @@ export const configSchema = {
|
|
|
422
425
|
disableCache: {
|
|
423
426
|
type: 'boolean'
|
|
424
427
|
},
|
|
428
|
+
maxCacheRam: {
|
|
429
|
+
// 0 has no meaningful semantics — it's neither "unbounded" (use null)
|
|
430
|
+
// nor "disabled" (use --disable-cache). Reject it at schema time so the
|
|
431
|
+
// discovery clamp doesn't silently bump it to 25MB.
|
|
432
|
+
type: ['integer', 'null'],
|
|
433
|
+
minimum: 1
|
|
434
|
+
},
|
|
425
435
|
captureMockedServiceWorker: {
|
|
426
436
|
type: 'boolean',
|
|
427
437
|
default: false
|
package/dist/discovery.js
CHANGED
|
@@ -2,6 +2,7 @@ import logger from '@percy/logger';
|
|
|
2
2
|
import Queue from './queue.js';
|
|
3
3
|
import Page from './page.js';
|
|
4
4
|
import { normalizeURL, hostnameMatches, createResource, createRootResource, createPercyCSSResource, createLogResource, yieldAll, snapshotLogName, waitForTimeout, withRetries, waitForSelectorInsideBrowser, isGzipped, maybeScrollToBottom } from './utils.js';
|
|
5
|
+
import { ByteLRU, entrySize, DiskSpillStore, createSpillDir } from './cache/byte-lru.js';
|
|
5
6
|
import { sha256hash } from '@percy/client/utils';
|
|
6
7
|
import Pako from 'pako';
|
|
7
8
|
|
|
@@ -223,10 +224,8 @@ function processSnapshotResources({
|
|
|
223
224
|
resources = resources.flat();
|
|
224
225
|
|
|
225
226
|
// include associated snapshot logs matched by meta information
|
|
226
|
-
resources.push(createLogResource(logger.
|
|
227
|
-
|
|
228
|
-
return ((_log$meta$snapshot = log.meta.snapshot) === null || _log$meta$snapshot === void 0 ? void 0 : _log$meta$snapshot.testCase) === snapshot.meta.snapshot.testCase && ((_log$meta$snapshot2 = log.meta.snapshot) === null || _log$meta$snapshot2 === void 0 ? void 0 : _log$meta$snapshot2.name) === snapshot.meta.snapshot.name;
|
|
229
|
-
})));
|
|
227
|
+
resources.push(createLogResource(logger.snapshotLogs(snapshot.meta.snapshot)));
|
|
228
|
+
logger.evictSnapshot(snapshot.meta.snapshot);
|
|
230
229
|
if (process.env.PERCY_GZIP) {
|
|
231
230
|
for (let index = 0; index < resources.length; index++) {
|
|
232
231
|
const alreadyZipped = isGzipped(resources[index].content);
|
|
@@ -434,9 +433,74 @@ export async function* discoverSnapshotResources(queue, options, callback) {
|
|
|
434
433
|
return all;
|
|
435
434
|
}, []));
|
|
436
435
|
}
|
|
437
|
-
|
|
438
|
-
// Used to cache resources across core instances
|
|
439
436
|
export const RESOURCE_CACHE_KEY = Symbol('resource-cache');
|
|
437
|
+
export const CACHE_STATS_KEY = Symbol('resource-cache-stats');
|
|
438
|
+
export const DISK_SPILL_KEY = Symbol('resource-cache-disk-spill');
|
|
439
|
+
const BYTES_PER_MB = 1_000_000;
|
|
440
|
+
// MAX_RESOURCE_SIZE in network.js is 25MB; caps below that would skip every
|
|
441
|
+
// resource, so we clamp. MIN_REASONABLE_CAP_MB warns on near-useless caps.
|
|
442
|
+
const MAX_RESOURCE_SIZE_MB = 25;
|
|
443
|
+
const MIN_REASONABLE_CAP_MB = 50;
|
|
444
|
+
const DEFAULT_WARN_THRESHOLD_BYTES = 500 * BYTES_PER_MB;
|
|
445
|
+
function makeCacheStats() {
|
|
446
|
+
return {
|
|
447
|
+
effectiveMaxCacheRamMB: null,
|
|
448
|
+
oversizeSkipped: 0,
|
|
449
|
+
firstEvictionEventFired: false,
|
|
450
|
+
warningFired: false,
|
|
451
|
+
unsetModeBytes: 0
|
|
452
|
+
};
|
|
453
|
+
}
|
|
454
|
+
function readWarnThresholdBytes() {
|
|
455
|
+
const raw = Number(process.env.PERCY_CACHE_WARN_THRESHOLD_BYTES);
|
|
456
|
+
return Number.isFinite(raw) && raw > 0 ? raw : DEFAULT_WARN_THRESHOLD_BYTES;
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
// Cache lookup shared by the network intercept path. RAM miss falls through
|
|
460
|
+
// to the disk tier; read failures return undefined so the browser refetches.
|
|
461
|
+
// Also resolves the array-valued root-resource shape used for multi-width
|
|
462
|
+
// DOM snapshots, regardless of which tier returned it.
|
|
463
|
+
//
|
|
464
|
+
// Disk hits are promoted back to RAM so a hot URL that was evicted once does
|
|
465
|
+
// not pay the readFileSync cost on every subsequent access — the typical
|
|
466
|
+
// two-tier-cache promotion pattern. ByteLRU's own eviction will then re-spill
|
|
467
|
+
// the actual coldest entry if needed. DISK_SPILL_KEY is only set when the
|
|
468
|
+
// ByteLRU tier is active (see createDiscoveryQueue 'start' handler), so the
|
|
469
|
+
// cache here is guaranteed to be a ByteLRU when we enter this branch.
|
|
470
|
+
export function lookupCacheResource(percy, snapshotResources, cache, url, width) {
|
|
471
|
+
let resource = snapshotResources.get(url) || cache.get(url);
|
|
472
|
+
const disk = percy[DISK_SPILL_KEY];
|
|
473
|
+
if (!resource && disk) {
|
|
474
|
+
resource = disk.get(url);
|
|
475
|
+
if (resource) {
|
|
476
|
+
percy.log.debug(`cache disk-hit: ${url} (disk=${disk.size}/` + `${Math.round(disk.bytes / BYTES_PER_MB)}MB)`);
|
|
477
|
+
// Promote back to RAM and drop the disk copy. cache.set may itself
|
|
478
|
+
// evict the LRU entry (which spills back to disk) — that's the
|
|
479
|
+
// intended LRU dance, not a bug.
|
|
480
|
+
cache.set(url, resource, entrySize(resource));
|
|
481
|
+
disk.delete(url);
|
|
482
|
+
}
|
|
483
|
+
}
|
|
484
|
+
if (resource && Array.isArray(resource) && resource[0].root) {
|
|
485
|
+
const rootResource = resource.find(r => {
|
|
486
|
+
var _r$widths;
|
|
487
|
+
return (_r$widths = r.widths) === null || _r$widths === void 0 ? void 0 : _r$widths.includes(width);
|
|
488
|
+
});
|
|
489
|
+
resource = rootResource || resource[0];
|
|
490
|
+
}
|
|
491
|
+
return resource;
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
// Fire-and-forget wrapper around the shared telemetry egress on Percy.
|
|
495
|
+
// onEvict callbacks are sync; the microtask hop keeps even sendCacheTelemetry's
|
|
496
|
+
// pre-await synchronous work (header construction, payload serialization) off
|
|
497
|
+
// the eviction-loop hot path.
|
|
498
|
+
function fireCacheEventSafe(percy, message, extra) {
|
|
499
|
+
// sendCacheTelemetry already swallows pager errors. The trailing .catch is
|
|
500
|
+
// belt-and-suspenders against Node 14's unhandled-rejection-as-fatal mode
|
|
501
|
+
// if the catch arm itself ever throws (e.g. log.debug stub explodes).
|
|
502
|
+
Promise.resolve().then(() => percy.sendCacheTelemetry(message, extra)).catch(() => {});
|
|
503
|
+
}
|
|
440
504
|
|
|
441
505
|
// Creates an asset discovery queue that uses the percy browser instance to create a page for each
|
|
442
506
|
// snapshot which is used to intercept and capture snapshot resource requests.
|
|
@@ -446,21 +510,84 @@ export function createDiscoveryQueue(percy) {
|
|
|
446
510
|
} = percy.config.discovery;
|
|
447
511
|
let queue = new Queue('discovery');
|
|
448
512
|
let cache;
|
|
513
|
+
let capBytes = null;
|
|
514
|
+
// Read once: saveResource consults this on every call.
|
|
515
|
+
const warnThreshold = readWarnThresholdBytes();
|
|
449
516
|
return queue.set({
|
|
450
517
|
concurrency
|
|
451
|
-
})
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
518
|
+
}).handle('start', async () => {
|
|
519
|
+
const configuredMaxCacheRamMB = percy.config.discovery.maxCacheRam;
|
|
520
|
+
let effectiveMaxCacheRamMB = configuredMaxCacheRamMB;
|
|
521
|
+
|
|
522
|
+
// User's config is not mutated; the post-clamp value lives on stats.
|
|
523
|
+
if (configuredMaxCacheRamMB != null) {
|
|
524
|
+
if (configuredMaxCacheRamMB < MAX_RESOURCE_SIZE_MB) {
|
|
525
|
+
percy.log.warn(`--max-cache-ram=${configuredMaxCacheRamMB}MB is below the ${MAX_RESOURCE_SIZE_MB}MB minimum ` + '(individual resources up to 25MB would otherwise be dropped). ' + `Continuing with the minimum: ${MAX_RESOURCE_SIZE_MB}MB.`);
|
|
526
|
+
effectiveMaxCacheRamMB = MAX_RESOURCE_SIZE_MB;
|
|
527
|
+
} else if (configuredMaxCacheRamMB < MIN_REASONABLE_CAP_MB) {
|
|
528
|
+
percy.log.warn(`--max-cache-ram=${configuredMaxCacheRamMB}MB is very small; ` + 'most resources will not fit and hit rate will be near zero.');
|
|
529
|
+
}
|
|
530
|
+
if (percy.config.discovery.disableCache) {
|
|
531
|
+
percy.log.info('--max-cache-ram is ignored because --disable-cache is set.');
|
|
532
|
+
}
|
|
533
|
+
capBytes = effectiveMaxCacheRamMB * BYTES_PER_MB;
|
|
534
|
+
}
|
|
535
|
+
if (warnThreshold !== DEFAULT_WARN_THRESHOLD_BYTES) {
|
|
536
|
+
percy.log.debug(`PERCY_CACHE_WARN_THRESHOLD_BYTES override active: ${warnThreshold} bytes ` + `(default ${DEFAULT_WARN_THRESHOLD_BYTES}).`);
|
|
537
|
+
}
|
|
538
|
+
percy[CACHE_STATS_KEY] = makeCacheStats();
|
|
539
|
+
percy[CACHE_STATS_KEY].effectiveMaxCacheRamMB = capBytes != null ? effectiveMaxCacheRamMB : null;
|
|
540
|
+
if (capBytes != null) {
|
|
541
|
+
// Overflow tier: RAM evictions spill here. diskStore.set returns
|
|
542
|
+
// false on any I/O failure → caller falls back to drop automatically.
|
|
543
|
+
const diskStore = new DiskSpillStore(createSpillDir(), {
|
|
544
|
+
log: percy.log
|
|
545
|
+
});
|
|
546
|
+
percy[DISK_SPILL_KEY] = diskStore;
|
|
547
|
+
cache = percy[RESOURCE_CACHE_KEY] = new ByteLRU(capBytes, {
|
|
548
|
+
onEvict: (key, reason, value) => {
|
|
549
|
+
if (reason === 'too-big') {
|
|
550
|
+
percy[CACHE_STATS_KEY].oversizeSkipped++;
|
|
551
|
+
percy.log.debug(`cache skip (oversize): ${key}`);
|
|
552
|
+
return;
|
|
553
|
+
}
|
|
554
|
+
const spilled = diskStore.set(key, value);
|
|
555
|
+
percy.log.debug(`cache ${spilled ? 'spill' : 'evict'}: ${key} ` + `(cache ${Math.round(cache.calculatedSize / BYTES_PER_MB)}` + `/${effectiveMaxCacheRamMB}MB, entries=${cache.size}, ` + `disk=${diskStore.size}/${Math.round(diskStore.bytes / BYTES_PER_MB)}MB)`);
|
|
556
|
+
const stats = percy[CACHE_STATS_KEY];
|
|
557
|
+
if (stats && !stats.firstEvictionEventFired) {
|
|
558
|
+
stats.firstEvictionEventFired = true;
|
|
559
|
+
percy.log.info('Cache eviction active — cap reached, oldest entries spilling to disk.');
|
|
560
|
+
fireCacheEventSafe(percy, 'cache_eviction_started', {
|
|
561
|
+
cache_budget_ram_mb: effectiveMaxCacheRamMB,
|
|
562
|
+
cache_peak_bytes_seen: cache.stats.peakBytes,
|
|
563
|
+
eviction_count: cache.stats.evictions,
|
|
564
|
+
disk_spill_enabled: diskStore.ready
|
|
565
|
+
});
|
|
566
|
+
}
|
|
567
|
+
}
|
|
568
|
+
});
|
|
569
|
+
} else {
|
|
570
|
+
cache = percy[RESOURCE_CACHE_KEY] = new Map();
|
|
571
|
+
}
|
|
458
572
|
await percy.browser.launch();
|
|
459
573
|
queue.run();
|
|
460
|
-
})
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
574
|
+
}).handle('end', async () => {
|
|
575
|
+
// Disk-spill cleanup must run even if browser.close() throws — otherwise
|
|
576
|
+
// the per-run temp dir under os.tmpdir() leaks. CACHE_STATS_KEY is set
|
|
577
|
+
// alongside DISK_SPILL_KEY in 'start', so the snapshot is always safe.
|
|
578
|
+
try {
|
|
579
|
+
await percy.browser.close();
|
|
580
|
+
} finally {
|
|
581
|
+
const diskStore = percy[DISK_SPILL_KEY];
|
|
582
|
+
if (diskStore) {
|
|
583
|
+
percy[CACHE_STATS_KEY].finalDiskStats = {
|
|
584
|
+
...diskStore.stats,
|
|
585
|
+
ready: diskStore.ready
|
|
586
|
+
};
|
|
587
|
+
diskStore.destroy();
|
|
588
|
+
delete percy[DISK_SPILL_KEY];
|
|
589
|
+
}
|
|
590
|
+
}
|
|
464
591
|
})
|
|
465
592
|
// snapshots are unique by name and testCase; when deferred also by widths
|
|
466
593
|
.handle('find', ({
|
|
@@ -509,18 +636,9 @@ export function createDiscoveryQueue(percy) {
|
|
|
509
636
|
disableCache: snapshot.discovery.disableCache,
|
|
510
637
|
allowedHostnames: snapshot.discovery.allowedHostnames,
|
|
511
638
|
disallowedHostnames: snapshot.discovery.disallowedHostnames,
|
|
512
|
-
getResource: (u, width = null) =>
|
|
513
|
-
let resource = snapshot.resources.get(u) || cache.get(u);
|
|
514
|
-
if (resource && Array.isArray(resource) && resource[0].root) {
|
|
515
|
-
const rootResource = resource.find(r => {
|
|
516
|
-
var _r$widths;
|
|
517
|
-
return (_r$widths = r.widths) === null || _r$widths === void 0 ? void 0 : _r$widths.includes(width);
|
|
518
|
-
});
|
|
519
|
-
resource = rootResource || resource[0];
|
|
520
|
-
}
|
|
521
|
-
return resource;
|
|
522
|
-
},
|
|
639
|
+
getResource: (u, width = null) => lookupCacheResource(percy, snapshot.resources, cache, u, width),
|
|
523
640
|
saveResource: r => {
|
|
641
|
+
var _percy$DISK_SPILL_KEY;
|
|
524
642
|
const limitResources = process.env.LIMIT_SNAPSHOT_RESOURCES || false;
|
|
525
643
|
const MAX_RESOURCES = Number(process.env.MAX_SNAPSHOT_RESOURCES) || 749;
|
|
526
644
|
if (limitResources && snapshot.resources.size >= MAX_RESOURCES) {
|
|
@@ -528,8 +646,31 @@ export function createDiscoveryQueue(percy) {
|
|
|
528
646
|
return;
|
|
529
647
|
}
|
|
530
648
|
snapshot.resources.set(r.url, r);
|
|
531
|
-
if (
|
|
649
|
+
if (snapshot.discovery.disableCache) return;
|
|
650
|
+
|
|
651
|
+
// Fresh write supersedes any prior spill — prevents races
|
|
652
|
+
// where getResource could serve a stale disk copy.
|
|
653
|
+
if ((_percy$DISK_SPILL_KEY = percy[DISK_SPILL_KEY]) !== null && _percy$DISK_SPILL_KEY !== void 0 && _percy$DISK_SPILL_KEY.has(r.url)) {
|
|
654
|
+
percy[DISK_SPILL_KEY].delete(r.url);
|
|
655
|
+
}
|
|
656
|
+
if (capBytes != null) {
|
|
657
|
+
// ByteLRU fires onEvict('too-big') for oversize entries;
|
|
658
|
+
// the oversize_skipped stat + debug log live there.
|
|
659
|
+
cache.set(r.url, r, entrySize(r));
|
|
660
|
+
} else {
|
|
661
|
+
// Subtract the prior entry's footprint before overwriting so
|
|
662
|
+
// the byte counter tracks current cache contents rather than
|
|
663
|
+
// cumulative writes. Without this, the same shared CSS saved
|
|
664
|
+
// across N snapshots would inflate unsetModeBytes by N×.
|
|
665
|
+
const stats = percy[CACHE_STATS_KEY];
|
|
666
|
+
const prior = cache.get(r.url);
|
|
667
|
+
if (prior) stats.unsetModeBytes -= entrySize(prior);
|
|
532
668
|
cache.set(r.url, r);
|
|
669
|
+
stats.unsetModeBytes += entrySize(r);
|
|
670
|
+
if (!stats.warningFired && stats.unsetModeBytes >= warnThreshold) {
|
|
671
|
+
stats.warningFired = true;
|
|
672
|
+
percy.log.warn(`Percy cache is using ${(stats.unsetModeBytes / BYTES_PER_MB).toFixed(1)}MB. ` + 'If your CI is memory-constrained, set --max-cache-ram. ' + 'See https://www.browserstack.com/docs/percy/cli/managing-cache-memory');
|
|
673
|
+
}
|
|
533
674
|
}
|
|
534
675
|
}
|
|
535
676
|
}
|
package/dist/percy.js
CHANGED
|
@@ -17,7 +17,7 @@ import Pako from 'pako';
|
|
|
17
17
|
import { base64encode, generatePromise, yieldAll, yieldTo, redactSecrets, detectSystemProxyAndLog, checkSDKVersion, processCorsIframes } from './utils.js';
|
|
18
18
|
import { createPercyServer, createStaticServer } from './api.js';
|
|
19
19
|
import { gatherSnapshots, createSnapshotsQueue, validateSnapshotOptions } from './snapshot.js';
|
|
20
|
-
import { discoverSnapshotResources, createDiscoveryQueue } from './discovery.js';
|
|
20
|
+
import { discoverSnapshotResources, createDiscoveryQueue, RESOURCE_CACHE_KEY, CACHE_STATS_KEY, DISK_SPILL_KEY } from './discovery.js';
|
|
21
21
|
import Monitoring from '@percy/monitoring';
|
|
22
22
|
import { WaitForJob } from './wait-for-job.js';
|
|
23
23
|
const MAX_SUGGESTION_CALLS = 10;
|
|
@@ -43,6 +43,8 @@ export class Percy {
|
|
|
43
43
|
constructor({
|
|
44
44
|
// initial log level
|
|
45
45
|
loglevel,
|
|
46
|
+
// path to save snapshot data to disk
|
|
47
|
+
archiveDir,
|
|
46
48
|
// process uploads before the next snapshot
|
|
47
49
|
delayUploads,
|
|
48
50
|
// process uploads after all snapshots
|
|
@@ -83,6 +85,7 @@ export class Percy {
|
|
|
83
85
|
});
|
|
84
86
|
labels ?? (labels = (_config$percy = config.percy) === null || _config$percy === void 0 ? void 0 : _config$percy.labels);
|
|
85
87
|
deferUploads ?? (deferUploads = (_config$percy2 = config.percy) === null || _config$percy2 === void 0 ? void 0 : _config$percy2.deferUploads);
|
|
88
|
+
if (archiveDir) skipUploads = skipUploads != null ? skipUploads : true;
|
|
86
89
|
this.config = config;
|
|
87
90
|
this.cliStartTime = null;
|
|
88
91
|
if (testing) loglevel = 'silent';
|
|
@@ -95,6 +98,7 @@ export class Percy {
|
|
|
95
98
|
this.skipDiscovery = this.dryRun || !!skipDiscovery;
|
|
96
99
|
this.delayUploads = this.skipUploads || !!delayUploads;
|
|
97
100
|
this.deferUploads = this.skipUploads || !!deferUploads;
|
|
101
|
+
this.archiveDir = this.skipUploads && archiveDir ? archiveDir : null;
|
|
98
102
|
this.labels = labels;
|
|
99
103
|
this.suggestionsCallCounter = suggestionsCallCounter;
|
|
100
104
|
this.client = new PercyClient({
|
|
@@ -132,7 +136,7 @@ export class Percy {
|
|
|
132
136
|
};
|
|
133
137
|
|
|
134
138
|
// generator methods are wrapped to autorun and return promises
|
|
135
|
-
for (let m of ['start', 'stop', 'flush', 'idle', 'snapshot', 'upload']) {
|
|
139
|
+
for (let m of ['start', 'stop', 'flush', 'idle', 'snapshot', 'upload', 'replaySnapshot']) {
|
|
136
140
|
// the original generator can be referenced with percy.yield.<method>
|
|
137
141
|
let method = (this.yield || (this.yield = {}))[m] = this[m].bind(this);
|
|
138
142
|
this[m] = (...args) => generatePromise(method(...args));
|
|
@@ -259,6 +263,15 @@ export class Percy {
|
|
|
259
263
|
await this.loadAutoConfiguredHostnames();
|
|
260
264
|
}
|
|
261
265
|
|
|
266
|
+
// validate and log archive dir if configured
|
|
267
|
+
if (this.archiveDir) {
|
|
268
|
+
let {
|
|
269
|
+
validateArchiveDir
|
|
270
|
+
} = await import('./archive.js');
|
|
271
|
+
this.archiveDir = validateArchiveDir(this.archiveDir);
|
|
272
|
+
this.log.info(`Archiving snapshots to: ${this.archiveDir}`);
|
|
273
|
+
}
|
|
274
|
+
|
|
262
275
|
// start the snapshots queue immediately when not delayed or deferred
|
|
263
276
|
if (!this.delayUploads && !this.deferUploads) yield _classPrivateFieldGet(_snapshots, this).start();
|
|
264
277
|
// do not start the discovery queue when not needed
|
|
@@ -400,6 +413,11 @@ export class Percy {
|
|
|
400
413
|
this.log.info(info('Found', _classPrivateFieldGet(_snapshots, this).size));
|
|
401
414
|
}
|
|
402
415
|
|
|
416
|
+
// log archive summary
|
|
417
|
+
if (this.archiveDir && _classPrivateFieldGet(_snapshots, this).size) {
|
|
418
|
+
this.log.info(`Archived ${_classPrivateFieldGet(_snapshots, this).size} snapshot(s) to: ${this.archiveDir}`);
|
|
419
|
+
}
|
|
420
|
+
|
|
403
421
|
// Save domain validation config before closing
|
|
404
422
|
if (!this.skipUploads && !this.skipDiscovery) {
|
|
405
423
|
await this.saveHostnamesToAutoConfigure();
|
|
@@ -429,7 +447,67 @@ export class Percy {
|
|
|
429
447
|
// This issue doesn't comes under regular error logs,
|
|
430
448
|
// it's detected if we just and stop percy server
|
|
431
449
|
await this.checkForNoSnapshotCommandError();
|
|
450
|
+
// sendBuildLogs goes first — it's the primary egress. cache_summary is
|
|
451
|
+
// analytics, ordered after so a slow pager hop cannot delay the logs.
|
|
432
452
|
await this.sendBuildLogs();
|
|
453
|
+
await this.sendCacheSummary();
|
|
454
|
+
}
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
// Single egress point for cache-tier telemetry. Used by sendCacheSummary
|
|
458
|
+
// (awaited at stop) and discovery's fire-and-forget eviction event. Returns
|
|
459
|
+
// early if no build is associated, swallows pager rejections — telemetry
|
|
460
|
+
// loss must never fail a build.
|
|
461
|
+
async sendCacheTelemetry(message, extra) {
|
|
462
|
+
var _this$build2;
|
|
463
|
+
if (!((_this$build2 = this.build) !== null && _this$build2 !== void 0 && _this$build2.id)) return;
|
|
464
|
+
try {
|
|
465
|
+
await this.client.sendBuildEvents(this.build.id, {
|
|
466
|
+
message,
|
|
467
|
+
cliVersion: this.client.cliVersion,
|
|
468
|
+
clientInfo: this.clientInfo,
|
|
469
|
+
extra
|
|
470
|
+
});
|
|
471
|
+
} catch (err) {
|
|
472
|
+
this.log.debug(`${message} telemetry failed`, err);
|
|
473
|
+
}
|
|
474
|
+
}
|
|
475
|
+
|
|
476
|
+
// Cache-usage summary fired at stop. The whole method is wrapped — the
|
|
477
|
+
// contract is "telemetry must never fail percy.stop()", which covers the
|
|
478
|
+
// payload-construction block as well as the egress.
|
|
479
|
+
async sendCacheSummary() {
|
|
480
|
+
try {
|
|
481
|
+
var _stats$finalDiskStats;
|
|
482
|
+
const cache = this[RESOURCE_CACHE_KEY];
|
|
483
|
+
const stats = this[CACHE_STATS_KEY];
|
|
484
|
+
if (!cache || !stats) return;
|
|
485
|
+
const cacheStats = typeof cache.stats === 'object' ? cache.stats : null;
|
|
486
|
+
// diskStore is destroyed by discovery 'end' before this runs, so fall
|
|
487
|
+
// back to the snapshot captured in stats.finalDiskStats.
|
|
488
|
+
const diskStore = this[DISK_SPILL_KEY];
|
|
489
|
+
const diskSnap = (diskStore === null || diskStore === void 0 ? void 0 : diskStore.stats) ?? stats.finalDiskStats;
|
|
490
|
+
const diskReady = diskStore ? diskStore.ready : !!((_stats$finalDiskStats = stats.finalDiskStats) !== null && _stats$finalDiskStats !== void 0 && _stats$finalDiskStats.ready);
|
|
491
|
+
await this.sendCacheTelemetry('cache_summary', {
|
|
492
|
+
cache_budget_ram_mb: stats.effectiveMaxCacheRamMB,
|
|
493
|
+
hits: (cacheStats === null || cacheStats === void 0 ? void 0 : cacheStats.hits) ?? 0,
|
|
494
|
+
misses: (cacheStats === null || cacheStats === void 0 ? void 0 : cacheStats.misses) ?? 0,
|
|
495
|
+
evictions: (cacheStats === null || cacheStats === void 0 ? void 0 : cacheStats.evictions) ?? 0,
|
|
496
|
+
peak_bytes: (cacheStats === null || cacheStats === void 0 ? void 0 : cacheStats.peakBytes) ?? stats.unsetModeBytes,
|
|
497
|
+
final_bytes: cache.calculatedSize ?? stats.unsetModeBytes,
|
|
498
|
+
entry_count: cache.size ?? 0,
|
|
499
|
+
oversize_skipped: stats.oversizeSkipped,
|
|
500
|
+
disk_spill_enabled: diskReady,
|
|
501
|
+
disk_spilled_count: (diskSnap === null || diskSnap === void 0 ? void 0 : diskSnap.spilled) ?? 0,
|
|
502
|
+
disk_restored_count: (diskSnap === null || diskSnap === void 0 ? void 0 : diskSnap.restored) ?? 0,
|
|
503
|
+
disk_spill_failures: (diskSnap === null || diskSnap === void 0 ? void 0 : diskSnap.spillFailures) ?? 0,
|
|
504
|
+
disk_read_failures: (diskSnap === null || diskSnap === void 0 ? void 0 : diskSnap.readFailures) ?? 0,
|
|
505
|
+
disk_peak_bytes: (diskSnap === null || diskSnap === void 0 ? void 0 : diskSnap.peakBytes) ?? 0,
|
|
506
|
+
disk_final_bytes: (diskSnap === null || diskSnap === void 0 ? void 0 : diskSnap.currentBytes) ?? 0,
|
|
507
|
+
disk_final_entries: (diskSnap === null || diskSnap === void 0 ? void 0 : diskSnap.entries) ?? 0
|
|
508
|
+
});
|
|
509
|
+
} catch (err) {
|
|
510
|
+
this.log.debug('cache_summary build failed', err);
|
|
433
511
|
}
|
|
434
512
|
}
|
|
435
513
|
checkAndUpdateConcurrency() {
|
|
@@ -487,10 +565,10 @@ export class Percy {
|
|
|
487
565
|
// snapshots. Once asset discovery has completed for the provided snapshots, the queued task will
|
|
488
566
|
// resolve and an upload task will be queued separately.
|
|
489
567
|
snapshot(options, snapshotPromise = {}) {
|
|
490
|
-
var _this$
|
|
568
|
+
var _this$build3;
|
|
491
569
|
if (this.readyState !== 1) {
|
|
492
570
|
throw new Error('Not running');
|
|
493
|
-
} else if ((_this$
|
|
571
|
+
} else if ((_this$build3 = this.build) !== null && _this$build3 !== void 0 && _this$build3.error) {
|
|
494
572
|
throw new Error(this.build.error);
|
|
495
573
|
} else if (Array.isArray(options)) {
|
|
496
574
|
return yieldAll(options.map(o => this.yield.snapshot(o, snapshotPromise)));
|
|
@@ -551,6 +629,17 @@ export class Percy {
|
|
|
551
629
|
config: this.config
|
|
552
630
|
})
|
|
553
631
|
}, snapshot => {
|
|
632
|
+
// archive snapshot to disk if configured
|
|
633
|
+
if (this.archiveDir) {
|
|
634
|
+
import('./archive.js').then(({
|
|
635
|
+
archiveSnapshot
|
|
636
|
+
}) => {
|
|
637
|
+
archiveSnapshot(this.archiveDir, snapshot);
|
|
638
|
+
}).catch(err => {
|
|
639
|
+
this.log.error(`Failed to archive snapshot "${snapshot.name}": ${err.message}`);
|
|
640
|
+
});
|
|
641
|
+
}
|
|
642
|
+
|
|
554
643
|
// attaching promise resolve reject so to wait for snapshot to complete
|
|
555
644
|
if (this.syncMode(snapshot)) {
|
|
556
645
|
snapshotPromise[snapshot.name] = new Promise((resolve, reject) => {
|
|
@@ -640,6 +729,25 @@ export class Percy {
|
|
|
640
729
|
}
|
|
641
730
|
}.call(this);
|
|
642
731
|
}
|
|
732
|
+
|
|
733
|
+
// Pushes a pre-built snapshot directly to the upload queue without discovery.
|
|
734
|
+
// Used by the replay command to upload previously archived snapshots.
|
|
735
|
+
*replaySnapshot(snapshot) {
|
|
736
|
+
var _this$build4;
|
|
737
|
+
if (this.readyState !== 1) {
|
|
738
|
+
throw new Error('Not running');
|
|
739
|
+
} else if ((_this$build4 = this.build) !== null && _this$build4 !== void 0 && _this$build4.error) {
|
|
740
|
+
throw new Error(this.build.error);
|
|
741
|
+
}
|
|
742
|
+
snapshot.meta = {
|
|
743
|
+
snapshot: {
|
|
744
|
+
name: snapshot.name,
|
|
745
|
+
testCase: snapshot.testCase
|
|
746
|
+
}
|
|
747
|
+
};
|
|
748
|
+
this.log.info(`Replaying snapshot: ${snapshot.name}`, snapshot.meta);
|
|
749
|
+
_classPrivateFieldGet(_snapshots, this).push(snapshot);
|
|
750
|
+
}
|
|
643
751
|
shouldSkipAssetDiscovery(tokenType) {
|
|
644
752
|
if (this.testing && JSON.stringify(this.testing) === JSON.stringify({})) {
|
|
645
753
|
return true;
|
|
@@ -720,7 +828,7 @@ export class Percy {
|
|
|
720
828
|
async sendBuildLogs() {
|
|
721
829
|
if (!process.env.PERCY_TOKEN) return;
|
|
722
830
|
try {
|
|
723
|
-
var _this$
|
|
831
|
+
var _this$build5, _this$build6, _this$build7, _this$build8;
|
|
724
832
|
const logsObject = {
|
|
725
833
|
clilogs: logger.query(log => !['ci'].includes(log.debug))
|
|
726
834
|
};
|
|
@@ -732,10 +840,10 @@ export class Percy {
|
|
|
732
840
|
logsObject.cilogs = redactedContent;
|
|
733
841
|
}
|
|
734
842
|
const content = base64encode(Pako.gzip(JSON.stringify(logsObject)));
|
|
735
|
-
const referenceId = (_this$
|
|
843
|
+
const referenceId = (_this$build5 = this.build) !== null && _this$build5 !== void 0 && _this$build5.id ? `build_${(_this$build6 = this.build) === null || _this$build6 === void 0 ? void 0 : _this$build6.id}` : (_this$build7 = this.build) === null || _this$build7 === void 0 ? void 0 : _this$build7.id;
|
|
736
844
|
const eventObject = {
|
|
737
845
|
content: content,
|
|
738
|
-
build_id: (_this$
|
|
846
|
+
build_id: (_this$build8 = this.build) === null || _this$build8 === void 0 ? void 0 : _this$build8.id,
|
|
739
847
|
reference_id: referenceId,
|
|
740
848
|
service_name: 'cli',
|
|
741
849
|
base64encoded: true
|
|
@@ -808,9 +916,9 @@ export class Percy {
|
|
|
808
916
|
const newAllowedDomains = Array.from(processedHosts).filter(domain => !autoConfiguredHosts.has(domain));
|
|
809
917
|
const hasNewDomains = newAllowedDomains.length > 0 || newErrorHosts.size > 0;
|
|
810
918
|
try {
|
|
811
|
-
var _this$
|
|
919
|
+
var _this$build9;
|
|
812
920
|
await this.client.updateProjectDomainConfig({
|
|
813
|
-
buildId: (_this$
|
|
921
|
+
buildId: (_this$build9 = this.build) === null || _this$build9 === void 0 ? void 0 : _this$build9.id,
|
|
814
922
|
allowedDomains: Array.from(processedHosts),
|
|
815
923
|
errorDomains: Array.from(newErrorHosts)
|
|
816
924
|
});
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@percy/core",
|
|
3
|
-
"version": "1.31.14-beta.
|
|
3
|
+
"version": "1.31.14-beta.2",
|
|
4
4
|
"license": "MIT",
|
|
5
5
|
"repository": {
|
|
6
6
|
"type": "git",
|
|
@@ -30,6 +30,7 @@
|
|
|
30
30
|
},
|
|
31
31
|
"./utils": "./dist/utils.js",
|
|
32
32
|
"./config": "./dist/config.js",
|
|
33
|
+
"./archive": "./dist/archive.js",
|
|
33
34
|
"./install": "./dist/install.js",
|
|
34
35
|
"./test/helpers": "./test/helpers/index.js",
|
|
35
36
|
"./test/helpers/server": "./test/helpers/server.js"
|
|
@@ -43,12 +44,12 @@
|
|
|
43
44
|
"test:types": "tsd"
|
|
44
45
|
},
|
|
45
46
|
"dependencies": {
|
|
46
|
-
"@percy/client": "1.31.14-beta.
|
|
47
|
-
"@percy/config": "1.31.14-beta.
|
|
48
|
-
"@percy/dom": "1.31.14-beta.
|
|
49
|
-
"@percy/logger": "1.31.14-beta.
|
|
50
|
-
"@percy/monitoring": "1.31.14-beta.
|
|
51
|
-
"@percy/webdriver-utils": "1.31.14-beta.
|
|
47
|
+
"@percy/client": "1.31.14-beta.2",
|
|
48
|
+
"@percy/config": "1.31.14-beta.2",
|
|
49
|
+
"@percy/dom": "1.31.14-beta.2",
|
|
50
|
+
"@percy/logger": "1.31.14-beta.2",
|
|
51
|
+
"@percy/monitoring": "1.31.14-beta.2",
|
|
52
|
+
"@percy/webdriver-utils": "1.31.14-beta.2",
|
|
52
53
|
"content-disposition": "^0.5.4",
|
|
53
54
|
"cross-spawn": "^7.0.3",
|
|
54
55
|
"extract-zip": "^2.0.1",
|
|
@@ -62,7 +63,7 @@
|
|
|
62
63
|
"yaml": "^2.4.1"
|
|
63
64
|
},
|
|
64
65
|
"optionalDependencies": {
|
|
65
|
-
"@percy/cli-doctor": "1.31.14-beta.
|
|
66
|
+
"@percy/cli-doctor": "1.31.14-beta.2"
|
|
66
67
|
},
|
|
67
|
-
"gitHead": "
|
|
68
|
+
"gitHead": "e4fce73023453b77cdef50aac1a9bd5eb70cd01a"
|
|
68
69
|
}
|