@desplega.ai/qa-use 2.14.0 → 2.15.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +23 -0
- package/dist/lib/env/index.d.ts +13 -0
- package/dist/lib/env/index.d.ts.map +1 -1
- package/dist/lib/env/index.js +35 -0
- package/dist/lib/env/index.js.map +1 -1
- package/dist/lib/env/localhost.d.ts +22 -0
- package/dist/lib/env/localhost.d.ts.map +1 -0
- package/dist/lib/env/localhost.js +49 -0
- package/dist/lib/env/localhost.js.map +1 -0
- package/dist/lib/env/paths.d.ts +27 -0
- package/dist/lib/env/paths.d.ts.map +1 -0
- package/dist/lib/env/paths.js +42 -0
- package/dist/lib/env/paths.js.map +1 -0
- package/dist/lib/env/sessions.d.ts +55 -0
- package/dist/lib/env/sessions.d.ts.map +1 -0
- package/dist/lib/env/sessions.js +128 -0
- package/dist/lib/env/sessions.js.map +1 -0
- package/dist/lib/tunnel/errors.d.ts +61 -0
- package/dist/lib/tunnel/errors.d.ts.map +1 -0
- package/dist/lib/tunnel/errors.js +152 -0
- package/dist/lib/tunnel/errors.js.map +1 -0
- package/dist/lib/tunnel/index.d.ts.map +1 -1
- package/dist/lib/tunnel/index.js +26 -11
- package/dist/lib/tunnel/index.js.map +1 -1
- package/dist/lib/tunnel/registry.d.ts +182 -0
- package/dist/lib/tunnel/registry.d.ts.map +1 -0
- package/dist/lib/tunnel/registry.js +561 -0
- package/dist/lib/tunnel/registry.js.map +1 -0
- package/dist/package.json +1 -1
- package/dist/src/cli/commands/browser/_detached.d.ts +27 -0
- package/dist/src/cli/commands/browser/_detached.d.ts.map +1 -0
- package/dist/src/cli/commands/browser/_detached.js +422 -0
- package/dist/src/cli/commands/browser/_detached.js.map +1 -0
- package/dist/src/cli/commands/browser/close.d.ts +7 -0
- package/dist/src/cli/commands/browser/close.d.ts.map +1 -1
- package/dist/src/cli/commands/browser/close.js +101 -5
- package/dist/src/cli/commands/browser/close.js.map +1 -1
- package/dist/src/cli/commands/browser/create.d.ts +7 -0
- package/dist/src/cli/commands/browser/create.d.ts.map +1 -1
- package/dist/src/cli/commands/browser/create.js +233 -25
- package/dist/src/cli/commands/browser/create.js.map +1 -1
- package/dist/src/cli/commands/browser/index.d.ts.map +1 -1
- package/dist/src/cli/commands/browser/index.js +3 -0
- package/dist/src/cli/commands/browser/index.js.map +1 -1
- package/dist/src/cli/commands/browser/run.d.ts.map +1 -1
- package/dist/src/cli/commands/browser/run.js +13 -6
- package/dist/src/cli/commands/browser/run.js.map +1 -1
- package/dist/src/cli/commands/browser/status.d.ts +4 -0
- package/dist/src/cli/commands/browser/status.d.ts.map +1 -1
- package/dist/src/cli/commands/browser/status.js +85 -3
- package/dist/src/cli/commands/browser/status.js.map +1 -1
- package/dist/src/cli/commands/doctor.d.ts +45 -0
- package/dist/src/cli/commands/doctor.d.ts.map +1 -0
- package/dist/src/cli/commands/doctor.js +267 -0
- package/dist/src/cli/commands/doctor.js.map +1 -0
- package/dist/src/cli/commands/test/run.d.ts.map +1 -1
- package/dist/src/cli/commands/test/run.js +29 -18
- package/dist/src/cli/commands/test/run.js.map +1 -1
- package/dist/src/cli/commands/tunnel/close.d.ts +18 -0
- package/dist/src/cli/commands/tunnel/close.d.ts.map +1 -0
- package/dist/src/cli/commands/tunnel/close.js +154 -0
- package/dist/src/cli/commands/tunnel/close.js.map +1 -0
- package/dist/src/cli/commands/tunnel/index.d.ts +6 -0
- package/dist/src/cli/commands/tunnel/index.d.ts.map +1 -0
- package/dist/src/cli/commands/tunnel/index.js +17 -0
- package/dist/src/cli/commands/tunnel/index.js.map +1 -0
- package/dist/src/cli/commands/tunnel/ls.d.ts +10 -0
- package/dist/src/cli/commands/tunnel/ls.d.ts.map +1 -0
- package/dist/src/cli/commands/tunnel/ls.js +89 -0
- package/dist/src/cli/commands/tunnel/ls.js.map +1 -0
- package/dist/src/cli/commands/tunnel/start.d.ts +15 -0
- package/dist/src/cli/commands/tunnel/start.d.ts.map +1 -0
- package/dist/src/cli/commands/tunnel/start.js +65 -0
- package/dist/src/cli/commands/tunnel/start.js.map +1 -0
- package/dist/src/cli/commands/tunnel/status.d.ts +8 -0
- package/dist/src/cli/commands/tunnel/status.d.ts.map +1 -0
- package/dist/src/cli/commands/tunnel/status.js +58 -0
- package/dist/src/cli/commands/tunnel/status.js.map +1 -0
- package/dist/src/cli/generated/docs-content.d.ts +1 -1
- package/dist/src/cli/generated/docs-content.d.ts.map +1 -1
- package/dist/src/cli/generated/docs-content.js +157 -100
- package/dist/src/cli/generated/docs-content.js.map +1 -1
- package/dist/src/cli/index.js +8 -0
- package/dist/src/cli/index.js.map +1 -1
- package/dist/src/cli/lib/browser.d.ts +25 -9
- package/dist/src/cli/lib/browser.d.ts.map +1 -1
- package/dist/src/cli/lib/browser.js +73 -42
- package/dist/src/cli/lib/browser.js.map +1 -1
- package/dist/src/cli/lib/cli-entry.d.ts +40 -0
- package/dist/src/cli/lib/cli-entry.d.ts.map +1 -0
- package/dist/src/cli/lib/cli-entry.js +65 -0
- package/dist/src/cli/lib/cli-entry.js.map +1 -0
- package/dist/src/cli/lib/config.d.ts.map +1 -1
- package/dist/src/cli/lib/config.js +8 -4
- package/dist/src/cli/lib/config.js.map +1 -1
- package/dist/src/cli/lib/startup-sweep.d.ts +45 -0
- package/dist/src/cli/lib/startup-sweep.d.ts.map +1 -0
- package/dist/src/cli/lib/startup-sweep.js +246 -0
- package/dist/src/cli/lib/startup-sweep.js.map +1 -0
- package/dist/src/cli/lib/tunnel-banner.d.ts +33 -0
- package/dist/src/cli/lib/tunnel-banner.d.ts.map +1 -0
- package/dist/src/cli/lib/tunnel-banner.js +55 -0
- package/dist/src/cli/lib/tunnel-banner.js.map +1 -0
- package/dist/src/cli/lib/tunnel-error-hint.d.ts +20 -0
- package/dist/src/cli/lib/tunnel-error-hint.d.ts.map +1 -0
- package/dist/src/cli/lib/tunnel-error-hint.js +48 -0
- package/dist/src/cli/lib/tunnel-error-hint.js.map +1 -0
- package/dist/src/cli/lib/tunnel-option.d.ts +27 -0
- package/dist/src/cli/lib/tunnel-option.d.ts.map +1 -0
- package/dist/src/cli/lib/tunnel-option.js +77 -0
- package/dist/src/cli/lib/tunnel-option.js.map +1 -0
- package/dist/src/cli/lib/tunnel-resolve.d.ts +42 -0
- package/dist/src/cli/lib/tunnel-resolve.d.ts.map +1 -0
- package/dist/src/cli/lib/tunnel-resolve.js +72 -0
- package/dist/src/cli/lib/tunnel-resolve.js.map +1 -0
- package/lib/env/index.ts +51 -0
- package/lib/env/localhost.test.ts +63 -0
- package/lib/env/localhost.ts +51 -0
- package/lib/env/paths.ts +46 -0
- package/lib/env/sessions.test.ts +109 -0
- package/lib/env/sessions.ts +155 -0
- package/lib/tunnel/errors.test.ts +105 -0
- package/lib/tunnel/errors.ts +169 -0
- package/lib/tunnel/index.ts +26 -11
- package/lib/tunnel/registry.test.ts +420 -0
- package/lib/tunnel/registry.ts +646 -0
- package/package.json +1 -1
|
@@ -0,0 +1,646 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* TunnelRegistry — a shared, refcount-managed layer over `TunnelManager`.
|
|
3
|
+
*
|
|
4
|
+
* Goals:
|
|
5
|
+
* - Two CLI commands targeting the same localhost base URL share one
|
|
6
|
+
* remote tunnel (one public URL, one provider connection).
|
|
7
|
+
* - State is visible and editable across sibling processes via
|
|
8
|
+
* `~/.qa-use/tunnels/<hash>.json`, so a SECOND process targeting the
|
|
9
|
+
* same localhost picks up the FIRST process's public URL rather than
|
|
10
|
+
* spinning up its own tunnel.
|
|
11
|
+
* - Last-releaser (in the OWNER process) keeps the tunnel alive for
|
|
12
|
+
* `GRACE_MS` (30 s default) so rapid-fire invocations do not thrash
|
|
13
|
+
* the provider.
|
|
14
|
+
*
|
|
15
|
+
* Cross-process coordination model:
|
|
16
|
+
* - The process that first acquires a given target becomes the OWNER
|
|
17
|
+
* and runs the in-process `TunnelManager`. Its PID is recorded in
|
|
18
|
+
* the registry file.
|
|
19
|
+
* - Later acquirers in OTHER processes read the file, see an alive
|
|
20
|
+
* owner PID, increment refcount under a lockfile, and return an
|
|
21
|
+
* "attach" handle (`isCrossProcessAttach: true`) with the owner's
|
|
22
|
+
* `publicUrl`. They do NOT construct a `TunnelManager`.
|
|
23
|
+
* - Read-modify-write of the record is guarded by a lockfile
|
|
24
|
+
* (`<hash>.lock`, `O_EXCL | O_CREAT`) with bounded retry.
|
|
25
|
+
*
|
|
26
|
+
* TTL grace limitation:
|
|
27
|
+
* - The grace window is bounded by the OWNER process's lifetime. Since
|
|
28
|
+
* `TunnelManager` is a per-process localtunnel client, the remote
|
|
29
|
+
* tunnel dies when the owner exits. For long-lived owners
|
|
30
|
+
* (`tunnel start --hold`, a running `test run`, a detached
|
|
31
|
+
* `browser create` from Phase 4) grace works as designed: a new
|
|
32
|
+
* acquirer within `GRACE_MS` cancels tear-down. For short-lived
|
|
33
|
+
* commands that release-and-exit immediately, grace collapses to
|
|
34
|
+
* zero (the owner process is gone, so there is no tunnel to keep
|
|
35
|
+
* alive anyway).
|
|
36
|
+
*
|
|
37
|
+
* Non-goals (this phase):
|
|
38
|
+
* - Daemonised tunnel hosts. Phase 4 adds detach so short-lived
|
|
39
|
+
* commands can leave a long-lived owner behind.
|
|
40
|
+
* - Retries on provider failure. Zero retries, consistent with Phase 2.
|
|
41
|
+
*/
|
|
42
|
+
|
|
43
|
+
import crypto from 'node:crypto';
|
|
44
|
+
import fs from 'node:fs';
|
|
45
|
+
import path from 'node:path';
|
|
46
|
+
import { URL } from 'node:url';
|
|
47
|
+
import { getPortFromUrl } from '../env/localhost.js';
|
|
48
|
+
import { ensureDir, tunnelsDir } from '../env/paths.js';
|
|
49
|
+
import { TunnelQuotaError } from './errors.js';
|
|
50
|
+
import { TunnelManager, type TunnelOptions } from './index.js';
|
|
51
|
+
|
|
52
|
+
/**
|
|
53
|
+
* How long (ms) to keep a tunnel alive after its refcount hits zero.
|
|
54
|
+
* A new `acquire()` within this window reuses the existing tunnel.
|
|
55
|
+
* Overridable via `QA_USE_TUNNEL_GRACE_MS` env var (primarily a
|
|
56
|
+
* test-friendly knob; production callers should stick with the default).
|
|
57
|
+
*/
|
|
58
|
+
export const GRACE_MS = 30_000;
|
|
59
|
+
|
|
60
|
+
function resolveGraceMsFromEnv(fallback: number): number {
|
|
61
|
+
const raw = process.env.QA_USE_TUNNEL_GRACE_MS;
|
|
62
|
+
if (!raw) return fallback;
|
|
63
|
+
const parsed = Number.parseInt(raw, 10);
|
|
64
|
+
if (Number.isFinite(parsed) && parsed >= 0) return parsed;
|
|
65
|
+
return fallback;
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* Max concurrent tunnels per API key. Mirrors the sessionIndex clamp at
|
|
70
|
+
* `lib/tunnel/index.ts:41` so we surface a clear error instead of silently
|
|
71
|
+
* colliding on subdomains.
|
|
72
|
+
*/
|
|
73
|
+
export const MAX_CONCURRENT_TUNNELS = 10;
|
|
74
|
+
|
|
75
|
+
/** On-disk schema for a registry entry. */
|
|
76
|
+
export interface TunnelRecord {
|
|
77
|
+
id: string;
|
|
78
|
+
target: string;
|
|
79
|
+
publicUrl: string;
|
|
80
|
+
pid: number;
|
|
81
|
+
refcount: number;
|
|
82
|
+
ttlExpiresAt: number | null;
|
|
83
|
+
startedAt: number;
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
/**
|
|
87
|
+
* Handle returned by `acquire()` / kept in memory by consumers.
|
|
88
|
+
*
|
|
89
|
+
* Consumers must call `registry.release(handle)` exactly once per
|
|
90
|
+
* `acquire()` call.
|
|
91
|
+
*/
|
|
92
|
+
export interface TunnelHandle extends TunnelRecord {
|
|
93
|
+
/**
|
|
94
|
+
* True when this handle was acquired from a foreign process's tunnel
|
|
95
|
+
* (the registry file's `pid` !== `process.pid`). The caller MUST NOT
|
|
96
|
+
* attempt to retrieve a `TunnelManager` via `getLiveManager(target)` —
|
|
97
|
+
* there isn't one in this process. Use `handle.publicUrl` directly.
|
|
98
|
+
*/
|
|
99
|
+
isCrossProcessAttach: boolean;
|
|
100
|
+
/**
|
|
101
|
+
* Internal marker used to guard double-release. Not persisted.
|
|
102
|
+
*/
|
|
103
|
+
_released?: boolean;
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
/**
|
|
107
|
+
* Canonical target key — used both as the map key and to derive the
|
|
108
|
+
* filename hash. We lowercase the hostname and drop any path/query so
|
|
109
|
+
* `http://Localhost:3000/foo` and `http://localhost:3000/` dedupe.
|
|
110
|
+
*/
|
|
111
|
+
export function canonicalTarget(target: string): string {
|
|
112
|
+
try {
|
|
113
|
+
const u = new URL(target);
|
|
114
|
+
return `${u.protocol}//${u.hostname.toLowerCase()}${u.port ? `:${u.port}` : ''}`;
|
|
115
|
+
} catch {
|
|
116
|
+
return target.toLowerCase();
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
/** Filename hash for a target. First 10 hex chars of sha256. */
|
|
121
|
+
export function targetHash(target: string): string {
|
|
122
|
+
return crypto.createHash('sha256').update(canonicalTarget(target)).digest('hex').slice(0, 10);
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
/**
|
|
126
|
+
* Atomic write: write to `<path>.tmp` then rename. `rename` is atomic on
|
|
127
|
+
* POSIX, so readers never see a half-written file.
|
|
128
|
+
*/
|
|
129
|
+
function atomicWriteJson(filePath: string, data: unknown): void {
|
|
130
|
+
ensureDir(path.dirname(filePath));
|
|
131
|
+
const tmp = `${filePath}.tmp-${process.pid}-${Date.now()}`;
|
|
132
|
+
fs.writeFileSync(tmp, JSON.stringify(data, null, 2));
|
|
133
|
+
fs.renameSync(tmp, filePath);
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
function readRecord(filePath: string): TunnelRecord | null {
|
|
137
|
+
try {
|
|
138
|
+
const raw = fs.readFileSync(filePath, 'utf8');
|
|
139
|
+
const parsed = JSON.parse(raw) as TunnelRecord;
|
|
140
|
+
if (typeof parsed.id !== 'string' || typeof parsed.target !== 'string') return null;
|
|
141
|
+
return parsed;
|
|
142
|
+
} catch {
|
|
143
|
+
return null;
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
function safeUnlink(filePath: string): void {
|
|
148
|
+
try {
|
|
149
|
+
fs.unlinkSync(filePath);
|
|
150
|
+
} catch {
|
|
151
|
+
/* ignore */
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
function isPidAlive(pid: number): boolean {
|
|
156
|
+
if (!pid || pid <= 0) return false;
|
|
157
|
+
try {
|
|
158
|
+
process.kill(pid, 0);
|
|
159
|
+
return true;
|
|
160
|
+
} catch (err) {
|
|
161
|
+
// ESRCH = no process with that pid. EPERM = exists but we can't
|
|
162
|
+
// signal it (still counts as alive).
|
|
163
|
+
if ((err as NodeJS.ErrnoException).code === 'EPERM') return true;
|
|
164
|
+
return false;
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
/**
|
|
169
|
+
* Acquire a per-target advisory lockfile. Cross-process exclusion for
|
|
170
|
+
* read-modify-write of the `<hash>.json` file. Implemented via
|
|
171
|
+
* `O_EXCL | O_CREAT` with bounded retry; releases via `unlinkSync`.
|
|
172
|
+
*
|
|
173
|
+
* Returns an `unlock` callback. Always invoke it in a `finally` block.
|
|
174
|
+
*/
|
|
175
|
+
const LOCK_RETRY_INTERVAL_MS = 15;
|
|
176
|
+
const LOCK_MAX_WAIT_MS = 2_000;
|
|
177
|
+
const LOCK_STALE_THRESHOLD_MS = 5_000;
|
|
178
|
+
|
|
179
|
+
async function withLock<T>(lockPath: string, fn: () => Promise<T> | T): Promise<T> {
|
|
180
|
+
ensureDir(path.dirname(lockPath));
|
|
181
|
+
const deadline = Date.now() + LOCK_MAX_WAIT_MS;
|
|
182
|
+
let fd: number | null = null;
|
|
183
|
+
|
|
184
|
+
while (true) {
|
|
185
|
+
try {
|
|
186
|
+
// O_EXCL | O_CREAT — fails with EEXIST if another holder is live.
|
|
187
|
+
fd = fs.openSync(lockPath, fs.constants.O_CREAT | fs.constants.O_EXCL | fs.constants.O_RDWR);
|
|
188
|
+
fs.writeSync(fd, String(process.pid));
|
|
189
|
+
break;
|
|
190
|
+
} catch (err) {
|
|
191
|
+
if ((err as NodeJS.ErrnoException).code !== 'EEXIST') throw err;
|
|
192
|
+
// Check for stale lock (older than threshold) and reap.
|
|
193
|
+
try {
|
|
194
|
+
const stat = fs.statSync(lockPath);
|
|
195
|
+
if (Date.now() - stat.mtimeMs > LOCK_STALE_THRESHOLD_MS) {
|
|
196
|
+
safeUnlink(lockPath);
|
|
197
|
+
continue;
|
|
198
|
+
}
|
|
199
|
+
} catch {
|
|
200
|
+
// Lock file vanished between exist check and stat — loop again.
|
|
201
|
+
continue;
|
|
202
|
+
}
|
|
203
|
+
if (Date.now() >= deadline) {
|
|
204
|
+
throw new Error(`Timed out waiting for tunnel registry lock: ${lockPath}`);
|
|
205
|
+
}
|
|
206
|
+
await new Promise((r) => setTimeout(r, LOCK_RETRY_INTERVAL_MS));
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
try {
|
|
211
|
+
return await fn();
|
|
212
|
+
} finally {
|
|
213
|
+
if (fd !== null) {
|
|
214
|
+
try {
|
|
215
|
+
fs.closeSync(fd);
|
|
216
|
+
} catch {
|
|
217
|
+
/* ignore */
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
safeUnlink(lockPath);
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
/**
|
|
225
|
+
* In-memory book-keeping for a live tunnel managed by this process.
|
|
226
|
+
* Only the OWNER process (record.pid === process.pid) has one of these.
|
|
227
|
+
*/
|
|
228
|
+
interface LiveEntry {
|
|
229
|
+
record: TunnelRecord;
|
|
230
|
+
manager: TunnelManager;
|
|
231
|
+
graceTimer?: NodeJS.Timeout;
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
/**
|
|
235
|
+
* Hook used by tests to inject a fake `TunnelManager` without booting a
|
|
236
|
+
* real tunnel. When unset (the default) the registry constructs a real
|
|
237
|
+
* `TunnelManager`.
|
|
238
|
+
*/
|
|
239
|
+
export type TunnelManagerFactory = () => TunnelManager;
|
|
240
|
+
|
|
241
|
+
export interface TunnelRegistryOptions {
|
|
242
|
+
/** Override factory (tests). */
|
|
243
|
+
managerFactory?: TunnelManagerFactory;
|
|
244
|
+
/** Override grace window (tests). */
|
|
245
|
+
graceMs?: number;
|
|
246
|
+
/** Override concurrency cap (tests). */
|
|
247
|
+
maxConcurrent?: number;
|
|
248
|
+
/** Options threaded into `TunnelManager.startTunnel(...)`. */
|
|
249
|
+
tunnelOptions?: TunnelOptions;
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
/**
|
|
253
|
+
* Acquire / release / list API for tunnels.
|
|
254
|
+
*
|
|
255
|
+
* A singleton is exported as `tunnelRegistry` below for convenience;
|
|
256
|
+
* callers that need isolation (tests) construct their own instance.
|
|
257
|
+
*/
|
|
258
|
+
export class TunnelRegistry {
|
|
259
|
+
private readonly live = new Map<string, LiveEntry>();
|
|
260
|
+
private readonly graceMs: number;
|
|
261
|
+
private readonly maxConcurrent: number;
|
|
262
|
+
private readonly managerFactory: TunnelManagerFactory;
|
|
263
|
+
|
|
264
|
+
constructor(opts: TunnelRegistryOptions = {}) {
|
|
265
|
+
this.graceMs = resolveGraceMsFromEnv(opts.graceMs ?? GRACE_MS);
|
|
266
|
+
this.maxConcurrent = opts.maxConcurrent ?? MAX_CONCURRENT_TUNNELS;
|
|
267
|
+
this.managerFactory = opts.managerFactory ?? (() => new TunnelManager());
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
/**
|
|
271
|
+
* Start a tunnel for `target` (or reuse an existing one — in-process
|
|
272
|
+
* OR in a sibling process), returning a handle. Caller must pair each
|
|
273
|
+
* acquire with exactly one `release(handle)`.
|
|
274
|
+
*/
|
|
275
|
+
async acquire(target: string, opts: TunnelOptions = {}): Promise<TunnelHandle> {
|
|
276
|
+
const canon = canonicalTarget(target);
|
|
277
|
+
const hash = targetHash(canon);
|
|
278
|
+
const file = path.join(tunnelsDir(), `${hash}.json`);
|
|
279
|
+
const lock = path.join(tunnelsDir(), `${hash}.lock`);
|
|
280
|
+
|
|
281
|
+
// Fast path: we already own a live manager in this process.
|
|
282
|
+
const existing = this.live.get(canon);
|
|
283
|
+
if (existing) {
|
|
284
|
+
if (existing.graceTimer) {
|
|
285
|
+
clearTimeout(existing.graceTimer);
|
|
286
|
+
existing.graceTimer = undefined;
|
|
287
|
+
}
|
|
288
|
+
return withLock(lock, () => {
|
|
289
|
+
existing.record.refcount += 1;
|
|
290
|
+
existing.record.ttlExpiresAt = null;
|
|
291
|
+
this.writeRecord(existing.record);
|
|
292
|
+
return { ...existing.record, isCrossProcessAttach: false, _released: false };
|
|
293
|
+
});
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
// Slow path: consult the on-disk registry. Everything from here
|
|
297
|
+
// runs under the per-target lock.
|
|
298
|
+
const result = await withLock(
|
|
299
|
+
lock,
|
|
300
|
+
async (): Promise<{ kind: 'attach'; record: TunnelRecord } | { kind: 'fresh' }> => {
|
|
301
|
+
const existingRecord = readRecord(file);
|
|
302
|
+
if (existingRecord) {
|
|
303
|
+
if (isPidAlive(existingRecord.pid) && existingRecord.pid !== process.pid) {
|
|
304
|
+
// Another process owns this tunnel; attach.
|
|
305
|
+
existingRecord.refcount += 1;
|
|
306
|
+
existingRecord.ttlExpiresAt = null;
|
|
307
|
+
this.writeRecord(existingRecord);
|
|
308
|
+
return { kind: 'attach', record: existingRecord };
|
|
309
|
+
}
|
|
310
|
+
// Either same pid (rare — map miss means first acquire in
|
|
311
|
+
// this process; treat as stale) or dead pid. Reap and fall
|
|
312
|
+
// through.
|
|
313
|
+
safeUnlink(file);
|
|
314
|
+
}
|
|
315
|
+
return { kind: 'fresh' };
|
|
316
|
+
}
|
|
317
|
+
);
|
|
318
|
+
|
|
319
|
+
if (result.kind === 'attach') {
|
|
320
|
+
return { ...result.record, isCrossProcessAttach: true, _released: false };
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
// Fresh start — cap check + launch + write under a fresh lock
|
|
324
|
+
// acquisition (startTunnel can take seconds; don't hold the lock
|
|
325
|
+
// that whole time, but DO re-validate nothing raced us).
|
|
326
|
+
const activeList = this.list();
|
|
327
|
+
if (activeList.length >= this.maxConcurrent) {
|
|
328
|
+
throw new TunnelQuotaError(
|
|
329
|
+
`Concurrent tunnel cap reached (${this.maxConcurrent}). Close an existing tunnel with \`qa-use tunnel close <target>\` and try again.`,
|
|
330
|
+
{ target: canon }
|
|
331
|
+
);
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
const port = getPortFromUrl(canon);
|
|
335
|
+
const manager = this.managerFactory();
|
|
336
|
+
const session = await manager.startTunnel(port, opts);
|
|
337
|
+
|
|
338
|
+
return withLock(lock, async () => {
|
|
339
|
+
// Race resolution — in-process: a concurrent acquire may have
|
|
340
|
+
// landed in `this.live` while we were booting.
|
|
341
|
+
const existingLocal = this.live.get(canon);
|
|
342
|
+
if (existingLocal) {
|
|
343
|
+
try {
|
|
344
|
+
await manager.stopTunnel();
|
|
345
|
+
} catch {
|
|
346
|
+
/* best-effort */
|
|
347
|
+
}
|
|
348
|
+
if (existingLocal.graceTimer) {
|
|
349
|
+
clearTimeout(existingLocal.graceTimer);
|
|
350
|
+
existingLocal.graceTimer = undefined;
|
|
351
|
+
}
|
|
352
|
+
existingLocal.record.refcount += 1;
|
|
353
|
+
existingLocal.record.ttlExpiresAt = null;
|
|
354
|
+
this.writeRecord(existingLocal.record);
|
|
355
|
+
return {
|
|
356
|
+
...existingLocal.record,
|
|
357
|
+
isCrossProcessAttach: false,
|
|
358
|
+
_released: false,
|
|
359
|
+
};
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
// Race resolution — cross-process: if another process wrote a
|
|
363
|
+
// record while we were booting, prefer theirs and tear our
|
|
364
|
+
// tunnel down.
|
|
365
|
+
const raced = readRecord(file);
|
|
366
|
+
if (raced && isPidAlive(raced.pid) && raced.pid !== process.pid) {
|
|
367
|
+
try {
|
|
368
|
+
await manager.stopTunnel();
|
|
369
|
+
} catch {
|
|
370
|
+
/* best-effort */
|
|
371
|
+
}
|
|
372
|
+
raced.refcount += 1;
|
|
373
|
+
raced.ttlExpiresAt = null;
|
|
374
|
+
this.writeRecord(raced);
|
|
375
|
+
return { ...raced, isCrossProcessAttach: true, _released: false };
|
|
376
|
+
}
|
|
377
|
+
|
|
378
|
+
const record: TunnelRecord = {
|
|
379
|
+
id: hash,
|
|
380
|
+
target: canon,
|
|
381
|
+
publicUrl: session.publicUrl,
|
|
382
|
+
pid: process.pid,
|
|
383
|
+
refcount: 1,
|
|
384
|
+
ttlExpiresAt: null,
|
|
385
|
+
startedAt: Date.now(),
|
|
386
|
+
};
|
|
387
|
+
this.live.set(canon, { record, manager });
|
|
388
|
+
this.writeRecord(record);
|
|
389
|
+
return { ...record, isCrossProcessAttach: false, _released: false };
|
|
390
|
+
});
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
/**
|
|
394
|
+
* Release a handle. Decrements refcount (under lock). When the
|
|
395
|
+
* refcount hits zero AND we are the owner, schedule a tear-down
|
|
396
|
+
* `graceMs` later. A subsequent `acquire()` within the grace window
|
|
397
|
+
* cancels the tear-down.
|
|
398
|
+
*
|
|
399
|
+
* Grace window is bounded by owner process lifetime — short-lived
|
|
400
|
+
* commands exit before grace expires and will tear down immediately.
|
|
401
|
+
*/
|
|
402
|
+
async release(handle: TunnelHandle): Promise<void> {
|
|
403
|
+
if (handle._released) return;
|
|
404
|
+
handle._released = true;
|
|
405
|
+
|
|
406
|
+
const canon = canonicalTarget(handle.target);
|
|
407
|
+
const hash = targetHash(canon);
|
|
408
|
+
const file = path.join(tunnelsDir(), `${hash}.json`);
|
|
409
|
+
const lock = path.join(tunnelsDir(), `${hash}.lock`);
|
|
410
|
+
|
|
411
|
+
await withLock(lock, async () => {
|
|
412
|
+
const record = readRecord(file);
|
|
413
|
+
if (!record) return; // Already torn down.
|
|
414
|
+
|
|
415
|
+
record.refcount = Math.max(0, record.refcount - 1);
|
|
416
|
+
|
|
417
|
+
if (record.refcount > 0) {
|
|
418
|
+
record.ttlExpiresAt = null;
|
|
419
|
+
this.writeRecord(record);
|
|
420
|
+
// If we ARE the owner, keep the in-memory bookkeeping in sync.
|
|
421
|
+
const entry = this.live.get(canon);
|
|
422
|
+
if (entry) {
|
|
423
|
+
entry.record.refcount = record.refcount;
|
|
424
|
+
entry.record.ttlExpiresAt = null;
|
|
425
|
+
}
|
|
426
|
+
return;
|
|
427
|
+
}
|
|
428
|
+
|
|
429
|
+
// refcount === 0
|
|
430
|
+
record.ttlExpiresAt = Date.now() + this.graceMs;
|
|
431
|
+
this.writeRecord(record);
|
|
432
|
+
|
|
433
|
+
const entry = this.live.get(canon);
|
|
434
|
+
if (entry && record.pid === process.pid) {
|
|
435
|
+
// We are the owner and last releaser — schedule tear-down.
|
|
436
|
+
entry.record.refcount = 0;
|
|
437
|
+
entry.record.ttlExpiresAt = record.ttlExpiresAt;
|
|
438
|
+
if (entry.graceTimer) {
|
|
439
|
+
clearTimeout(entry.graceTimer);
|
|
440
|
+
}
|
|
441
|
+
entry.graceTimer = setTimeout(() => {
|
|
442
|
+
void this.maybeTeardown(canon).catch(() => {
|
|
443
|
+
/* best-effort */
|
|
444
|
+
});
|
|
445
|
+
}, this.graceMs);
|
|
446
|
+
if (typeof entry.graceTimer.unref === 'function') {
|
|
447
|
+
entry.graceTimer.unref();
|
|
448
|
+
}
|
|
449
|
+
}
|
|
450
|
+
// If we are NOT the owner (cross-process attach), just leave the
|
|
451
|
+
// zeroed refcount + ttl on disk. The owner's next release or its
|
|
452
|
+
// own exit path will honour the grace timer.
|
|
453
|
+
});
|
|
454
|
+
}
|
|
455
|
+
|
|
456
|
+
/**
|
|
457
|
+
* Force teardown of a tunnel regardless of refcount. Used by
|
|
458
|
+
* `qa-use tunnel close`. Safe to call when no such tunnel exists.
|
|
459
|
+
*
|
|
460
|
+
* If the owner is this process, tears down the in-memory manager.
|
|
461
|
+
* Otherwise just removes the registry file (and leaves the orphan
|
|
462
|
+
* remote tunnel to die with its owner).
|
|
463
|
+
*/
|
|
464
|
+
async forceClose(target: string): Promise<void> {
|
|
465
|
+
const canon = canonicalTarget(target);
|
|
466
|
+
const hash = targetHash(canon);
|
|
467
|
+
const lock = path.join(tunnelsDir(), `${hash}.lock`);
|
|
468
|
+
await withLock(lock, async () => {
|
|
469
|
+
await this.teardown(canon);
|
|
470
|
+
});
|
|
471
|
+
}
|
|
472
|
+
|
|
473
|
+
/**
|
|
474
|
+
* Look up a single entry by canonical target. Scans the on-disk
|
|
475
|
+
* registry; returns `null` if no record exists or the owning pid is
|
|
476
|
+
* dead.
|
|
477
|
+
*/
|
|
478
|
+
get(target: string): TunnelRecord | null {
|
|
479
|
+
const canon = canonicalTarget(target);
|
|
480
|
+
const hash = targetHash(canon);
|
|
481
|
+
const file = path.join(tunnelsDir(), `${hash}.json`);
|
|
482
|
+
const record = readRecord(file);
|
|
483
|
+
if (!record) return null;
|
|
484
|
+
if (!isPidAlive(record.pid)) {
|
|
485
|
+
safeUnlink(file);
|
|
486
|
+
return null;
|
|
487
|
+
}
|
|
488
|
+
return record;
|
|
489
|
+
}
|
|
490
|
+
|
|
491
|
+
/**
|
|
492
|
+
* List all live entries. Reconciles against owning PID; stale entries
|
|
493
|
+
* are removed as a side-effect.
|
|
494
|
+
*/
|
|
495
|
+
list(): TunnelRecord[] {
|
|
496
|
+
const dir = tunnelsDir();
|
|
497
|
+
let files: string[];
|
|
498
|
+
try {
|
|
499
|
+
files = fs.readdirSync(dir);
|
|
500
|
+
} catch {
|
|
501
|
+
return [];
|
|
502
|
+
}
|
|
503
|
+
|
|
504
|
+
const out: TunnelRecord[] = [];
|
|
505
|
+
for (const name of files) {
|
|
506
|
+
if (!name.endsWith('.json') || name.endsWith('.tmp')) continue;
|
|
507
|
+
const file = path.join(dir, name);
|
|
508
|
+
const record = readRecord(file);
|
|
509
|
+
if (!record) {
|
|
510
|
+
safeUnlink(file);
|
|
511
|
+
continue;
|
|
512
|
+
}
|
|
513
|
+
if (!isPidAlive(record.pid)) {
|
|
514
|
+
safeUnlink(file);
|
|
515
|
+
continue;
|
|
516
|
+
}
|
|
517
|
+
out.push(record);
|
|
518
|
+
}
|
|
519
|
+
return out;
|
|
520
|
+
}
|
|
521
|
+
|
|
522
|
+
/**
|
|
523
|
+
* Returns the live `TunnelManager` instance for `target` IF this process
|
|
524
|
+
* currently owns it. Used by callers that need health-check / WS-URL
|
|
525
|
+
* helpers on the underlying manager. Returns `null` for targets owned
|
|
526
|
+
* by a different process (file visible via `list()` / `get()` but not
|
|
527
|
+
* live in-memory here).
|
|
528
|
+
*/
|
|
529
|
+
getLiveManager(target: string): TunnelManager | null {
|
|
530
|
+
const canon = canonicalTarget(target);
|
|
531
|
+
const entry = this.live.get(canon);
|
|
532
|
+
return entry ? entry.manager : null;
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
/**
|
|
536
|
+
* Look up by filename hash (e.g. output of `tunnel ls`).
|
|
537
|
+
*/
|
|
538
|
+
getByHash(hash: string): TunnelRecord | null {
|
|
539
|
+
const file = path.join(tunnelsDir(), `${hash}.json`);
|
|
540
|
+
const record = readRecord(file);
|
|
541
|
+
if (!record) return null;
|
|
542
|
+
if (!isPidAlive(record.pid)) {
|
|
543
|
+
safeUnlink(file);
|
|
544
|
+
return null;
|
|
545
|
+
}
|
|
546
|
+
return record;
|
|
547
|
+
}
|
|
548
|
+
|
|
549
|
+
// -------------------------------------------------------------------
|
|
550
|
+
// Internals
|
|
551
|
+
// -------------------------------------------------------------------
|
|
552
|
+
|
|
553
|
+
private writeRecord(record: TunnelRecord): void {
|
|
554
|
+
const file = path.join(tunnelsDir(), `${record.id}.json`);
|
|
555
|
+
atomicWriteJson(file, record);
|
|
556
|
+
}
|
|
557
|
+
|
|
558
|
+
/**
|
|
559
|
+
* Timer callback: re-check under lock whether we should tear down. A
|
|
560
|
+
* concurrent acquire may have bumped refcount back above zero.
|
|
561
|
+
*/
|
|
562
|
+
private async maybeTeardown(canon: string): Promise<void> {
|
|
563
|
+
const hash = targetHash(canon);
|
|
564
|
+
const file = path.join(tunnelsDir(), `${hash}.json`);
|
|
565
|
+
const lock = path.join(tunnelsDir(), `${hash}.lock`);
|
|
566
|
+
|
|
567
|
+
await withLock(lock, async () => {
|
|
568
|
+
const record = readRecord(file);
|
|
569
|
+
if (!record) {
|
|
570
|
+
// Someone else cleaned up already — drop our in-memory entry.
|
|
571
|
+
const stale = this.live.get(canon);
|
|
572
|
+
if (stale) {
|
|
573
|
+
this.live.delete(canon);
|
|
574
|
+
try {
|
|
575
|
+
await stale.manager.stopTunnel();
|
|
576
|
+
} catch {
|
|
577
|
+
/* best-effort */
|
|
578
|
+
}
|
|
579
|
+
}
|
|
580
|
+
return;
|
|
581
|
+
}
|
|
582
|
+
// If another consumer joined, refcount is back above zero.
|
|
583
|
+
if (record.refcount > 0) {
|
|
584
|
+
// Clear our grace timer bookkeeping; caller will rearm on next
|
|
585
|
+
// release.
|
|
586
|
+
const entry = this.live.get(canon);
|
|
587
|
+
if (entry) {
|
|
588
|
+
entry.record.refcount = record.refcount;
|
|
589
|
+
entry.record.ttlExpiresAt = null;
|
|
590
|
+
if (entry.graceTimer) {
|
|
591
|
+
clearTimeout(entry.graceTimer);
|
|
592
|
+
entry.graceTimer = undefined;
|
|
593
|
+
}
|
|
594
|
+
}
|
|
595
|
+
return;
|
|
596
|
+
}
|
|
597
|
+
// ttlExpiresAt guard: may have been bumped by another release,
|
|
598
|
+
// e.g. a cross-process release wrote a fresh grace window.
|
|
599
|
+
if (record.ttlExpiresAt && record.ttlExpiresAt > Date.now()) {
|
|
600
|
+
// Reschedule.
|
|
601
|
+
const entry = this.live.get(canon);
|
|
602
|
+
if (entry) {
|
|
603
|
+
if (entry.graceTimer) clearTimeout(entry.graceTimer);
|
|
604
|
+
const delay = Math.max(0, record.ttlExpiresAt - Date.now());
|
|
605
|
+
entry.graceTimer = setTimeout(() => {
|
|
606
|
+
void this.maybeTeardown(canon).catch(() => {
|
|
607
|
+
/* best-effort */
|
|
608
|
+
});
|
|
609
|
+
}, delay);
|
|
610
|
+
if (typeof entry.graceTimer.unref === 'function') {
|
|
611
|
+
entry.graceTimer.unref();
|
|
612
|
+
}
|
|
613
|
+
}
|
|
614
|
+
return;
|
|
615
|
+
}
|
|
616
|
+
await this.teardown(canon);
|
|
617
|
+
});
|
|
618
|
+
}
|
|
619
|
+
|
|
620
|
+
/**
|
|
621
|
+
* Unconditional tear-down. Callers must hold the per-target lock.
|
|
622
|
+
*/
|
|
623
|
+
private async teardown(canon: string): Promise<void> {
|
|
624
|
+
const entry = this.live.get(canon);
|
|
625
|
+
const hash = targetHash(canon);
|
|
626
|
+
const file = path.join(tunnelsDir(), `${hash}.json`);
|
|
627
|
+
|
|
628
|
+
if (entry) {
|
|
629
|
+
if (entry.graceTimer) {
|
|
630
|
+
clearTimeout(entry.graceTimer);
|
|
631
|
+
entry.graceTimer = undefined;
|
|
632
|
+
}
|
|
633
|
+
this.live.delete(canon);
|
|
634
|
+
try {
|
|
635
|
+
await entry.manager.stopTunnel();
|
|
636
|
+
} catch {
|
|
637
|
+
/* best-effort */
|
|
638
|
+
}
|
|
639
|
+
}
|
|
640
|
+
|
|
641
|
+
safeUnlink(file);
|
|
642
|
+
}
|
|
643
|
+
}
|
|
644
|
+
|
|
645
|
+
/** Module-level singleton for CLI use. */
|
|
646
|
+
export const tunnelRegistry = new TunnelRegistry();
|