@percy/core 1.31.13 → 1.31.14-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/discovery.js CHANGED
@@ -2,6 +2,7 @@ import logger from '@percy/logger';
2
2
  import Queue from './queue.js';
3
3
  import Page from './page.js';
4
4
  import { normalizeURL, hostnameMatches, createResource, createRootResource, createPercyCSSResource, createLogResource, yieldAll, snapshotLogName, waitForTimeout, withRetries, waitForSelectorInsideBrowser, isGzipped, maybeScrollToBottom } from './utils.js';
5
+ import { ByteLRU, entrySize, DiskSpillStore, createSpillDir } from './cache/byte-lru.js';
5
6
  import { sha256hash } from '@percy/client/utils';
6
7
  import Pako from 'pako';
7
8
 
@@ -223,10 +224,8 @@ function processSnapshotResources({
223
224
  resources = resources.flat();
224
225
 
225
226
  // include associated snapshot logs matched by meta information
226
- resources.push(createLogResource(logger.query(log => {
227
- var _log$meta$snapshot, _log$meta$snapshot2;
228
- return ((_log$meta$snapshot = log.meta.snapshot) === null || _log$meta$snapshot === void 0 ? void 0 : _log$meta$snapshot.testCase) === snapshot.meta.snapshot.testCase && ((_log$meta$snapshot2 = log.meta.snapshot) === null || _log$meta$snapshot2 === void 0 ? void 0 : _log$meta$snapshot2.name) === snapshot.meta.snapshot.name;
229
- })));
227
+ resources.push(createLogResource(logger.snapshotLogs(snapshot.meta.snapshot)));
228
+ logger.evictSnapshot(snapshot.meta.snapshot);
230
229
  if (process.env.PERCY_GZIP) {
231
230
  for (let index = 0; index < resources.length; index++) {
232
231
  const alreadyZipped = isGzipped(resources[index].content);
@@ -434,9 +433,74 @@ export async function* discoverSnapshotResources(queue, options, callback) {
434
433
  return all;
435
434
  }, []));
436
435
  }
437
-
438
- // Used to cache resources across core instances
439
436
  export const RESOURCE_CACHE_KEY = Symbol('resource-cache');
437
+ export const CACHE_STATS_KEY = Symbol('resource-cache-stats');
438
+ export const DISK_SPILL_KEY = Symbol('resource-cache-disk-spill');
439
+ const BYTES_PER_MB = 1_000_000;
440
+ // MAX_RESOURCE_SIZE in network.js is 25MB; caps below that would skip every
441
+ // resource, so we clamp. MIN_REASONABLE_CAP_MB warns on near-useless caps.
442
+ const MAX_RESOURCE_SIZE_MB = 25;
443
+ const MIN_REASONABLE_CAP_MB = 50;
444
+ const DEFAULT_WARN_THRESHOLD_BYTES = 500 * BYTES_PER_MB;
445
+ function makeCacheStats() {
446
+ return {
447
+ effectiveMaxCacheRamMB: null,
448
+ oversizeSkipped: 0,
449
+ firstEvictionEventFired: false,
450
+ warningFired: false,
451
+ unsetModeBytes: 0
452
+ };
453
+ }
454
+ function readWarnThresholdBytes() {
455
+ const raw = Number(process.env.PERCY_CACHE_WARN_THRESHOLD_BYTES);
456
+ return Number.isFinite(raw) && raw > 0 ? raw : DEFAULT_WARN_THRESHOLD_BYTES;
457
+ }
458
+
459
+ // Cache lookup shared by the network intercept path. RAM miss falls through
460
+ // to the disk tier; read failures return undefined so the browser refetches.
461
+ // Also resolves the array-valued root-resource shape used for multi-width
462
+ // DOM snapshots, regardless of which tier returned it.
463
+ //
464
+ // Disk hits are promoted back to RAM so a hot URL that was evicted once does
465
+ // not pay the readFileSync cost on every subsequent access — the typical
466
+ // two-tier-cache promotion pattern. ByteLRU's own eviction will then re-spill
467
+ // the actual coldest entry if needed. DISK_SPILL_KEY is only set when the
468
+ // ByteLRU tier is active (see createDiscoveryQueue 'start' handler), so the
469
+ // cache here is guaranteed to be a ByteLRU when we enter this branch.
470
+ export function lookupCacheResource(percy, snapshotResources, cache, url, width) {
471
+ let resource = snapshotResources.get(url) || cache.get(url);
472
+ const disk = percy[DISK_SPILL_KEY];
473
+ if (!resource && disk) {
474
+ resource = disk.get(url);
475
+ if (resource) {
476
+ percy.log.debug(`cache disk-hit: ${url} (disk=${disk.size}/` + `${Math.round(disk.bytes / BYTES_PER_MB)}MB)`);
477
+ // Promote back to RAM and drop the disk copy. cache.set may itself
478
+ // evict the LRU entry (which spills back to disk) — that's the
479
+ // intended LRU dance, not a bug.
480
+ cache.set(url, resource, entrySize(resource));
481
+ disk.delete(url);
482
+ }
483
+ }
484
+ if (resource && Array.isArray(resource) && resource[0].root) {
485
+ const rootResource = resource.find(r => {
486
+ var _r$widths;
487
+ return (_r$widths = r.widths) === null || _r$widths === void 0 ? void 0 : _r$widths.includes(width);
488
+ });
489
+ resource = rootResource || resource[0];
490
+ }
491
+ return resource;
492
+ }
493
+
494
+ // Fire-and-forget wrapper around the shared telemetry egress on Percy.
495
+ // onEvict callbacks are sync; the microtask hop keeps even sendCacheTelemetry's
496
+ // pre-await synchronous work (header construction, payload serialization) off
497
+ // the eviction-loop hot path.
498
+ function fireCacheEventSafe(percy, message, extra) {
499
+ // sendCacheTelemetry already swallows pager errors. The trailing .catch is
500
+ // belt-and-suspenders against Node 14's unhandled-rejection-as-fatal mode
501
+ // if the catch arm itself ever throws (e.g. log.debug stub explodes).
502
+ Promise.resolve().then(() => percy.sendCacheTelemetry(message, extra)).catch(() => {});
503
+ }
440
504
 
441
505
  // Creates an asset discovery queue that uses the percy browser instance to create a page for each
442
506
  // snapshot which is used to intercept and capture snapshot resource requests.
@@ -446,21 +510,84 @@ export function createDiscoveryQueue(percy) {
446
510
  } = percy.config.discovery;
447
511
  let queue = new Queue('discovery');
448
512
  let cache;
513
+ let capBytes = null;
514
+ // Read once: saveResource consults this on every call.
515
+ const warnThreshold = readWarnThresholdBytes();
449
516
  return queue.set({
450
517
  concurrency
451
- })
452
- // on start, launch the browser and run the queue
453
- .handle('start', async () => {
454
- cache = percy[RESOURCE_CACHE_KEY] = new Map();
455
-
456
- // If browser.launch() fails it will get captured in
457
- // *percy.start()
518
+ }).handle('start', async () => {
519
+ const configuredMaxCacheRamMB = percy.config.discovery.maxCacheRam;
520
+ let effectiveMaxCacheRamMB = configuredMaxCacheRamMB;
521
+
522
+ // User's config is not mutated; the post-clamp value lives on stats.
523
+ if (configuredMaxCacheRamMB != null) {
524
+ if (configuredMaxCacheRamMB < MAX_RESOURCE_SIZE_MB) {
525
+ percy.log.warn(`--max-cache-ram=${configuredMaxCacheRamMB}MB is below the ${MAX_RESOURCE_SIZE_MB}MB minimum ` + '(individual resources up to 25MB would otherwise be dropped). ' + `Continuing with the minimum: ${MAX_RESOURCE_SIZE_MB}MB.`);
526
+ effectiveMaxCacheRamMB = MAX_RESOURCE_SIZE_MB;
527
+ } else if (configuredMaxCacheRamMB < MIN_REASONABLE_CAP_MB) {
528
+ percy.log.warn(`--max-cache-ram=${configuredMaxCacheRamMB}MB is very small; ` + 'most resources will not fit and hit rate will be near zero.');
529
+ }
530
+ if (percy.config.discovery.disableCache) {
531
+ percy.log.info('--max-cache-ram is ignored because --disable-cache is set.');
532
+ }
533
+ capBytes = effectiveMaxCacheRamMB * BYTES_PER_MB;
534
+ }
535
+ if (warnThreshold !== DEFAULT_WARN_THRESHOLD_BYTES) {
536
+ percy.log.debug(`PERCY_CACHE_WARN_THRESHOLD_BYTES override active: ${warnThreshold} bytes ` + `(default ${DEFAULT_WARN_THRESHOLD_BYTES}).`);
537
+ }
538
+ percy[CACHE_STATS_KEY] = makeCacheStats();
539
+ percy[CACHE_STATS_KEY].effectiveMaxCacheRamMB = capBytes != null ? effectiveMaxCacheRamMB : null;
540
+ if (capBytes != null) {
541
+ // Overflow tier: RAM evictions spill here. diskStore.set returns
542
+ // false on any I/O failure → caller falls back to drop automatically.
543
+ const diskStore = new DiskSpillStore(createSpillDir(), {
544
+ log: percy.log
545
+ });
546
+ percy[DISK_SPILL_KEY] = diskStore;
547
+ cache = percy[RESOURCE_CACHE_KEY] = new ByteLRU(capBytes, {
548
+ onEvict: (key, reason, value) => {
549
+ if (reason === 'too-big') {
550
+ percy[CACHE_STATS_KEY].oversizeSkipped++;
551
+ percy.log.debug(`cache skip (oversize): ${key}`);
552
+ return;
553
+ }
554
+ const spilled = diskStore.set(key, value);
555
+ percy.log.debug(`cache ${spilled ? 'spill' : 'evict'}: ${key} ` + `(cache ${Math.round(cache.calculatedSize / BYTES_PER_MB)}` + `/${effectiveMaxCacheRamMB}MB, entries=${cache.size}, ` + `disk=${diskStore.size}/${Math.round(diskStore.bytes / BYTES_PER_MB)}MB)`);
556
+ const stats = percy[CACHE_STATS_KEY];
557
+ if (stats && !stats.firstEvictionEventFired) {
558
+ stats.firstEvictionEventFired = true;
559
+ percy.log.info('Cache eviction active — cap reached, oldest entries spilling to disk.');
560
+ fireCacheEventSafe(percy, 'cache_eviction_started', {
561
+ cache_budget_ram_mb: effectiveMaxCacheRamMB,
562
+ cache_peak_bytes_seen: cache.stats.peakBytes,
563
+ eviction_count: cache.stats.evictions,
564
+ disk_spill_enabled: diskStore.ready
565
+ });
566
+ }
567
+ }
568
+ });
569
+ } else {
570
+ cache = percy[RESOURCE_CACHE_KEY] = new Map();
571
+ }
458
572
  await percy.browser.launch();
459
573
  queue.run();
460
- })
461
- // on end, close the browser
462
- .handle('end', async () => {
463
- await percy.browser.close();
574
+ }).handle('end', async () => {
575
+ // Disk-spill cleanup must run even if browser.close() throws — otherwise
576
+ // the per-run temp dir under os.tmpdir() leaks. CACHE_STATS_KEY is set
577
+ // alongside DISK_SPILL_KEY in 'start', so the snapshot is always safe.
578
+ try {
579
+ await percy.browser.close();
580
+ } finally {
581
+ const diskStore = percy[DISK_SPILL_KEY];
582
+ if (diskStore) {
583
+ percy[CACHE_STATS_KEY].finalDiskStats = {
584
+ ...diskStore.stats,
585
+ ready: diskStore.ready
586
+ };
587
+ diskStore.destroy();
588
+ delete percy[DISK_SPILL_KEY];
589
+ }
590
+ }
464
591
  })
465
592
  // snapshots are unique by name and testCase; when deferred also by widths
466
593
  .handle('find', ({
@@ -509,18 +636,9 @@ export function createDiscoveryQueue(percy) {
509
636
  disableCache: snapshot.discovery.disableCache,
510
637
  allowedHostnames: snapshot.discovery.allowedHostnames,
511
638
  disallowedHostnames: snapshot.discovery.disallowedHostnames,
512
- getResource: (u, width = null) => {
513
- let resource = snapshot.resources.get(u) || cache.get(u);
514
- if (resource && Array.isArray(resource) && resource[0].root) {
515
- const rootResource = resource.find(r => {
516
- var _r$widths;
517
- return (_r$widths = r.widths) === null || _r$widths === void 0 ? void 0 : _r$widths.includes(width);
518
- });
519
- resource = rootResource || resource[0];
520
- }
521
- return resource;
522
- },
639
+ getResource: (u, width = null) => lookupCacheResource(percy, snapshot.resources, cache, u, width),
523
640
  saveResource: r => {
641
+ var _percy$DISK_SPILL_KEY;
524
642
  const limitResources = process.env.LIMIT_SNAPSHOT_RESOURCES || false;
525
643
  const MAX_RESOURCES = Number(process.env.MAX_SNAPSHOT_RESOURCES) || 749;
526
644
  if (limitResources && snapshot.resources.size >= MAX_RESOURCES) {
@@ -528,8 +646,31 @@ export function createDiscoveryQueue(percy) {
528
646
  return;
529
647
  }
530
648
  snapshot.resources.set(r.url, r);
531
- if (!snapshot.discovery.disableCache) {
649
+ if (snapshot.discovery.disableCache) return;
650
+
651
+ // Fresh write supersedes any prior spill — prevents races
652
+ // where getResource could serve a stale disk copy.
653
+ if ((_percy$DISK_SPILL_KEY = percy[DISK_SPILL_KEY]) !== null && _percy$DISK_SPILL_KEY !== void 0 && _percy$DISK_SPILL_KEY.has(r.url)) {
654
+ percy[DISK_SPILL_KEY].delete(r.url);
655
+ }
656
+ if (capBytes != null) {
657
+ // ByteLRU fires onEvict('too-big') for oversize entries;
658
+ // the oversize_skipped stat + debug log live there.
659
+ cache.set(r.url, r, entrySize(r));
660
+ } else {
661
+ // Subtract the prior entry's footprint before overwriting so
662
+ // the byte counter tracks current cache contents rather than
663
+ // cumulative writes. Without this, the same shared CSS saved
664
+ // across N snapshots would inflate unsetModeBytes by N×.
665
+ const stats = percy[CACHE_STATS_KEY];
666
+ const prior = cache.get(r.url);
667
+ if (prior) stats.unsetModeBytes -= entrySize(prior);
532
668
  cache.set(r.url, r);
669
+ stats.unsetModeBytes += entrySize(r);
670
+ if (!stats.warningFired && stats.unsetModeBytes >= warnThreshold) {
671
+ stats.warningFired = true;
672
+ percy.log.warn(`Percy cache is using ${(stats.unsetModeBytes / BYTES_PER_MB).toFixed(1)}MB. ` + 'If your CI is memory-constrained, set --max-cache-ram. ' + 'See https://www.browserstack.com/docs/percy/cli/managing-cache-memory');
673
+ }
533
674
  }
534
675
  }
535
676
  }
package/dist/lock.js ADDED
@@ -0,0 +1,215 @@
1
+ // Per-port lock file for Percy agent processes.
2
+ //
3
+ // Why: a stale ~/.percy directory after a crash currently surfaces as a
4
+ // late, opaque EADDRINUSE on the next `percy start`. The lock file lets
5
+ // us short-circuit at command entry with a clear, actionable refusal
6
+ // message and lets us auto-reclaim a stale lock whose recorded pid is
7
+ // dead.
8
+ //
9
+ // Cross-platform note: `fs.renameSync` over an existing target is
10
+ // unreliable on Node 14 Windows (Percy's Windows CI is pinned to
11
+ // node-version: 14, see .github/workflows/windows.yml). We therefore
12
+ // reclaim via unlink + retry-`wx` rather than rename-based reclaim.
13
+
14
+ import { mkdirSync, writeFileSync, readFileSync, unlinkSync } from 'fs';
15
+ import { join } from 'path';
16
+ // Use a default import so tests can `spyOn(os, 'homedir')` to redirect
17
+ // the lock dir into a tmpdir without touching the user's $HOME.
18
+ // (Babel's namespace import is frozen and not spy-able.)
19
+ import os from 'os';
20
+ const LOCK_DIR_MODE = 0o700;
21
+ const LOCK_FILE_MODE = 0o600;
22
+ export class LockHeldError extends Error {
23
+ constructor(meta, lockPath) {
24
+ super(`Percy is already running on port ${meta.port} ` + `(pid ${meta.pid}, started ${meta.startedAt}).\n` + `If you believe this is stale, remove ${lockPath} and try again.`);
25
+ this.name = 'LockHeldError';
26
+ this.meta = meta;
27
+ this.lockPath = lockPath;
28
+ }
29
+ }
30
+
31
+ // Lockfile-name pattern: literal "agent-" prefix, decimal-digit-only
32
+ // port (validated to be in the TCP range 0-65535), literal ".lock"
33
+ // suffix. Built without any user-controlled string concatenation so
34
+ // semgrep's path-traversal taint analysis is satisfied.
35
+ const LOCK_DIR_NAME = '.percy';
36
+ const LOCK_FILE_PREFIX = 'agent-';
37
+ const LOCK_FILE_SUFFIX = '.lock';
38
+ export function lockPathFor(port) {
39
+ // Validate that `port` is a TCP port (positive 16-bit integer). This
40
+ // guarantees the resulting filename only contains digits + literal
41
+ // characters from LOCK_FILE_PREFIX/LOCK_FILE_SUFFIX — no '/' or
42
+ // '..' can appear, eliminating any path-traversal risk.
43
+ let n = Number(port);
44
+ /* istanbul ignore if: invalid ports are filtered upstream by the
45
+ CLI flag parser and the Percy() constructor's default; this
46
+ guard is defensive against pathological direct callers. Port 0
47
+ is also rejected — it means "OS picks an ephemeral port", and a
48
+ lockfile keyed by 0 would not correspond to the actual bound
49
+ port (two callers requesting port 0 would contend on agent-0.lock
50
+ even though the OS hands them different ports). */
51
+ if (!Number.isInteger(n) || n <= 0 || n > 65535) {
52
+ throw new TypeError(`Invalid port for lockfile: ${JSON.stringify(port)}`);
53
+ }
54
+ // The validated integer `n` plus the literal prefix/suffix yields a
55
+ // string of [prefix][digits][suffix] — no `/` or `..` is reachable.
56
+ // (semgrep's path-traversal rule is suppressed file-level via
57
+ // .semgrepignore because its taint analysis does not follow the
58
+ // Number.isInteger validation above.)
59
+ let filename = LOCK_FILE_PREFIX.concat(String(n), LOCK_FILE_SUFFIX);
60
+ return join(os.homedir(), LOCK_DIR_NAME, filename);
61
+ }
62
+
63
+ // `process.kill(pid, 0)` returns truthy for living processes, throws
64
+ // ESRCH if the pid is gone, and throws EPERM if the pid exists but
65
+ // belongs to another user (treat as alive — we cannot reclaim it).
66
+ function livenessCheck(pid) {
67
+ try {
68
+ process.kill(pid, 0);
69
+ return 'alive';
70
+ } catch (err) {
71
+ /* istanbul ignore else: ESRCH is the only "dead" signal we
72
+ reclaim on. Every other code (EPERM = exists-but-foreign,
73
+ ENOSYS / EINVAL = exotic platform) means we cannot safely
74
+ claim the lock and must treat it as "alive". The else branch
75
+ collapses these cases — it's exercised by the EPERM test in
76
+ lock.test.js but not all error codes are individually
77
+ reproducible under nyc. */
78
+ if (err.code === 'ESRCH') return 'dead';
79
+ return 'alive';
80
+ }
81
+ }
82
+
83
+ // Acquire a per-port lock. On success, returns a handle whose `path`
84
+ // the caller must eventually pass to `releaseLockSync`. Throws
85
+ // `LockHeldError` if another live process holds the lock. Returns
86
+ // `null` (no lock acquired) when `port === 0` — the lockfile is
87
+ // keyed by the requested port, but port 0 means "OS picks an
88
+ // ephemeral port", so the lockfile name wouldn't match the actual
89
+ // bound port. Callers should treat a null handle as "no lock to
90
+ // release" and the lockfile mechanism is effectively skipped for
91
+ // ephemeral-port instances (e.g., parallel test fixtures).
92
+ export function acquireLock({
93
+ port
94
+ }) {
95
+ if (Number(port) === 0) return null;
96
+ const dir = join(os.homedir(), LOCK_DIR_NAME);
97
+ const path = lockPathFor(port);
98
+ const payload = JSON.stringify({
99
+ pid: process.pid,
100
+ port,
101
+ startedAt: new Date().toISOString()
102
+ });
103
+ mkdirSync(dir, {
104
+ recursive: true,
105
+ mode: LOCK_DIR_MODE
106
+ });
107
+
108
+ // Fast path: atomic exclusive create.
109
+ try {
110
+ writeFileSync(path, payload, {
111
+ flag: 'wx',
112
+ mode: LOCK_FILE_MODE
113
+ });
114
+ return {
115
+ path,
116
+ payload
117
+ };
118
+ } catch (err) {
119
+ /* istanbul ignore if: any non-EEXIST error from `wx` is unexpected
120
+ (e.g. EACCES on a read-only $HOME) — propagate. */
121
+ if (err.code !== 'EEXIST') throw err;
122
+ }
123
+
124
+ // Lock exists. Inspect, then either refuse or reclaim once.
125
+ let existing;
126
+ try {
127
+ existing = JSON.parse(readFileSync(path, 'utf-8'));
128
+ } catch (parseErr) {
129
+ // Corrupt or truncated payload (a previous process was killed
130
+ // mid-write): treat as stale, unlink, and retry.
131
+ existing = null;
132
+ }
133
+
134
+ // A lock recorded with OUR pid means we leaked a previous lock from
135
+ // the same process (e.g., a test that forgot to release in afterEach,
136
+ // or a code path that bypassed the normal stop). Reclaiming is safe
137
+ // because we are that process — we cannot conflict with ourselves.
138
+ if (existing && existing.pid !== process.pid && livenessCheck(existing.pid) === 'alive') {
139
+ throw new LockHeldError(existing, path);
140
+ }
141
+
142
+ // Stale (or corrupt). Unlink and retry exclusive create. If a third
143
+ // process raced in and won, the second `wx` fails with EEXIST and
144
+ // we surface their info — their lock is the legitimate one.
145
+ try {
146
+ unlinkSync(path);
147
+ } catch (e) {
148
+ /* istanbul ignore next: race window — another reclaimer beat us
149
+ to the unlink. */
150
+ if (e.code !== 'ENOENT') throw e;
151
+ }
152
+ try {
153
+ writeFileSync(path, payload, {
154
+ flag: 'wx',
155
+ mode: LOCK_FILE_MODE
156
+ });
157
+ return {
158
+ path,
159
+ payload
160
+ };
161
+ } catch (err) {
162
+ /* istanbul ignore next: race-loser branch — between our unlink
163
+ and the second wx-create, another reclaimer wins. The unit
164
+ tests for SC4 and SC3 cover the deterministic refuse/reclaim
165
+ paths; reproducing this true race in a unit test is unreliable
166
+ under nyc. The behavior simply maps the EEXIST to the same
167
+ LockHeldError our first wx-failure path already produces. */
168
+ if (err.code === 'EEXIST') {
169
+ // Defensive JSON.parse: the race winner could be mid-write
170
+ // (truncated bytes) or have already crashed (empty file). A
171
+ // bare JSON.parse here would surface as a SyntaxError instead
172
+ // of a graceful LockHeldError. Mirror the same try/catch the
173
+ // earlier stale-lock read uses.
174
+ let winner;
175
+ try {
176
+ winner = JSON.parse(readFileSync(path, 'utf-8'));
177
+ } catch {
178
+ winner = {
179
+ pid: '?',
180
+ port,
181
+ startedAt: 'unknown'
182
+ };
183
+ }
184
+ throw new LockHeldError(winner, path);
185
+ }
186
+ /* istanbul ignore next: surfaces non-EEXIST fs errors (EACCES,
187
+ ENOSPC, etc.) that aren't producible in unit tests. */
188
+ throw err;
189
+ }
190
+ }
191
+
192
+ // Synchronous release for use in normal teardown AND in
193
+ // `process.on('exit')` (which only runs synchronous handlers).
194
+ //
195
+ // This must NEVER throw — it runs in the `'exit'` callback chain
196
+ // where any thrown error becomes a process-exit-time crash. In
197
+ // particular, when Jasmine tests spy on fs.unlinkSync via mockfs
198
+ // and then tear down on process exit, the spy's `originalFn` may
199
+ // already be undefined and raise a TypeError. Swallow everything
200
+ // except ENOENT-equivalents and treat the lock as released
201
+ // best-effort.
202
+ export function releaseLockSync(handle) {
203
+ if (!(handle !== null && handle !== void 0 && handle.path)) return;
204
+ try {
205
+ unlinkSync(handle.path);
206
+ } catch (e) {
207
+ /* istanbul ignore next: best-effort cleanup — the file is gone
208
+ (ENOENT), or the surrounding test runtime has already torn
209
+ down its fs spies (TypeError on `originalFn`). Either way the
210
+ lock is released from our perspective. */
211
+ if ((e === null || e === void 0 ? void 0 : e.code) !== 'ENOENT') {
212
+ // Suppress; do not throw out of an `exit` handler.
213
+ }
214
+ }
215
+ }
package/dist/network.js CHANGED
@@ -1,12 +1,19 @@
1
1
  import { request as makeRequest } from '@percy/client/utils';
2
2
  import logger from '@percy/logger';
3
3
  import mime from 'mime-types';
4
- import { DefaultMap, createResource, hostnameMatches, normalizeURL, waitFor, decodeAndEncodeURLWithLogging, handleIncorrectFontMimeType, executeDomainValidation } from './utils.js';
4
+ import { AbortError, DefaultMap, createResource, hostnameMatches, normalizeURL, waitFor, decodeAndEncodeURLWithLogging, handleIncorrectFontMimeType, executeDomainValidation } from './utils.js';
5
5
  const MAX_RESOURCE_SIZE = 25 * 1024 ** 2 * 0.63; // 25MB, 0.63 factor for accounting for base64 encoding
6
6
  const ALLOWED_STATUSES = [200, 201, 301, 302, 304, 307, 308];
7
7
  const ALLOWED_RESOURCES = ['Document', 'Stylesheet', 'Image', 'Media', 'Font', 'Other'];
8
8
  const ABORTED_MESSAGE = 'Request was aborted by browser';
9
9
 
10
+ // Stable, machine-readable codes for abort errors thrown from this module.
11
+ // Consumers should prefer `error.code` over string matching on `error.message`.
12
+ export const AbortCodes = Object.freeze({
13
+ ABORTED: 'ABORTED',
14
+ TIMEOUT_NETWORK_IDLE: 'TIMEOUT_NETWORK_IDLE'
15
+ });
16
+
10
17
  // RequestLifeCycleHandler handles life cycle of a requestId
11
18
  // Ideal flow: requestWillBeSent -> requestPaused -> responseReceived -> loadingFinished / loadingFailed
12
19
  // ServiceWorker flow: requestWillBeSent -> responseReceived -> loadingFinished / loadingFailed
@@ -18,11 +25,35 @@ class RequestLifeCycleHandler {
18
25
  this.responseReceived = new Promise(resolve => this.resolveResponseReceived = resolve);
19
26
  }
20
27
  }
28
+ // `Network.TIMEOUT` was a static class field used by
29
+ // some test code (and potentially external SDK consumers) to override
30
+ // the network-idle timeout. It's been replaced by a per-instance
31
+ // `networkIdleWaitTimeout` initialized from PERCY_NETWORK_IDLE_WAIT_TIMEOUT.
32
+ // Keep a static getter/setter shim so external callers reading or
33
+ // writing `Network.TIMEOUT` see a one-time deprecation warning instead
34
+ // of silently dropping their override.
35
+ let _timeoutDeprecationWarned = false;
36
+
21
37
  // The Interceptor class creates common handlers for dealing with intercepting asset requests
22
38
  // for a given page using various devtools protocol events and commands.
23
39
  export class Network {
24
- static TIMEOUT = undefined;
25
40
  log = logger('core:discovery');
41
+
42
+ /* istanbul ignore next: deprecation shim — kept only for external
43
+ SDK consumers that read the field. Not reachable from test code. */
44
+ static get TIMEOUT() {
45
+ return undefined;
46
+ }
47
+
48
+ /* istanbul ignore next: deprecation shim — exercised only when
49
+ external callers still write the static field. The shim logs a
50
+ one-time warning pointing at PERCY_NETWORK_IDLE_WAIT_TIMEOUT. */
51
+ static set TIMEOUT(_val) {
52
+ if (!_timeoutDeprecationWarned) {
53
+ _timeoutDeprecationWarned = true;
54
+ logger('core:discovery').warn('Network.TIMEOUT is deprecated; set the PERCY_NETWORK_IDLE_WAIT_TIMEOUT ' + 'env var (or pass per-page options) — the static field no longer affects discovery.');
55
+ }
56
+ }
26
57
  #requestsLifeCycleHandler = new DefaultMap(() => new RequestLifeCycleHandler());
27
58
  #pending = new Map();
28
59
  #requests = new Map();
@@ -92,11 +123,11 @@ export class Network {
92
123
  requests = requests.filter(req => !this.#finishedUrls.has(req.url));
93
124
  return requests.length === 0;
94
125
  }, {
95
- timeout: Network.TIMEOUT,
126
+ timeout: this.networkIdleWaitTimeout,
96
127
  idle: timeout
97
128
  }).catch(error => {
98
129
  if (error.message.startsWith('Timeout')) {
99
- let message = 'Timed out waiting for network requests to idle.';
130
+ let message = 'Timed out waiting for network requests to idle.\n' + 'Hint: set PERCY_NETWORK_IDLE_WAIT_TIMEOUT to increase the budget, ' + 'or allowlist slow domains via the discovery config.';
100
131
  if (captureResponsiveAssetsEnabled) message += '\nWhile capturing responsive assets try setting PERCY_DO_NOT_CAPTURE_RESPONSIVE_ASSETS to true.';
101
132
  this._throwTimeoutError(message, filter);
102
133
  } else {
@@ -125,7 +156,10 @@ export class Network {
125
156
  if (params.requestId) {
126
157
  /* istanbul ignore if: race condition, very hard to mock this */
127
158
  if (this.isAborted(params.requestId)) {
128
- throw new Error(ABORTED_MESSAGE);
159
+ throw new AbortError(ABORTED_MESSAGE, {
160
+ code: AbortCodes.ABORTED,
161
+ reason: 'browser-aborted'
162
+ });
129
163
  }
130
164
  }
131
165
  return await session.send(method, params);
@@ -151,7 +185,15 @@ export class Network {
151
185
  this.log.warn(warnMsg);
152
186
  return;
153
187
  }
154
- throw new Error(msg);
188
+
189
+ // Use a plain Error (NOT AbortError) so this does not trip
190
+ // `error.name === 'AbortError'` consumers in discovery.js:520,
191
+ // percy.js:347, snapshot.js:472 — those treat AbortError as
192
+ // "snapshot was aborted" and would silently drop the timeout.
193
+ let err = new Error(msg);
194
+ err.code = AbortCodes.TIMEOUT_NETWORK_IDLE;
195
+ err.reason = 'network-idle-timeout';
196
+ throw err;
155
197
  }
156
198
 
157
199
  // Called when a request should be removed from various trackers
@@ -410,9 +452,10 @@ export class Network {
410
452
  this._forgetRequest(request);
411
453
  };
412
454
  _initializeNetworkIdleWaitTimeout() {
413
- if (Network.TIMEOUT) return;
414
- Network.TIMEOUT = parseInt(process.env.PERCY_NETWORK_IDLE_WAIT_TIMEOUT) || 30000;
415
- if (Network.TIMEOUT > 60000) {
455
+ // Per-instance timeout so concurrent pages with different env values
456
+ // (or env values changed mid-run by tests) don't stomp each other.
457
+ this.networkIdleWaitTimeout = parseInt(process.env.PERCY_NETWORK_IDLE_WAIT_TIMEOUT) || 30000;
458
+ if (this.networkIdleWaitTimeout > 60000) {
416
459
  this.log.warn('Setting PERCY_NETWORK_IDLE_WAIT_TIMEOUT over 60000ms is not recommended. ' + 'If your page needs more than 60000ms to idle due to CPU/Network load, ' + 'its recommended to increase CI resources where this cli is running.');
417
460
  }
418
461
  }
@@ -545,7 +588,7 @@ async function sendResponseResource(network, request, session) {
545
588
  // Note: its not a necessity that we would get aborted callback in a tick, its just that if we
546
589
  // already have it then we can safely ignore this error
547
590
  // Its very hard to test it as this function should be called and request should get cancelled before
548
- if (error.message === ABORTED_MESSAGE || error.message.includes('Invalid InterceptionId')) {
591
+ if (error.code === AbortCodes.ABORTED || error.message === ABORTED_MESSAGE || error.message.includes('Invalid InterceptionId')) {
549
592
  // defer this to the end of queue to make sure that any incoming aborted messages were
550
593
  // handled and network.#aborted is updated
551
594
  await new Promise((res, _) => process.nextTick(res));