@in-the-loop-labs/pair-review 3.2.3 → 3.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,679 @@
1
+ // Copyright 2026 Tim Perkins (tjwp) | SPDX-License-Identifier: Apache-2.0
2
+ 'use strict';
3
+
4
+ const fs = require('fs');
5
+ const logger = require('../utils/logger');
6
+ const { WorktreePoolRepository, WorktreeRepository, generateWorktreeId } = require('../database');
7
+ const { GitWorktreeManager } = require('./worktree');
8
+ const { WorktreePoolUsageTracker } = require('./worktree-pool-usage');
9
+ const { normalizeRepository } = require('../utils/paths');
10
+ const { getRepoPoolSize } = require('../config');
11
+
12
+ /**
13
+ * Consolidates the worktree pool state machine: absorbs WorktreePoolManager
14
+ * and composes WorktreePoolUsageTracker to provide a single entry point for
15
+ * all pool lifecycle operations (acquire, release, session/analysis tracking,
16
+ * startup rehydration).
17
+ */
18
+ class WorktreePoolLifecycle {
19
+ /**
20
+ * @param {Object} db - Database instance
21
+ * @param {Object} config - Configuration object from loadConfig()
22
+ * @param {Object} [_deps={}] - Injected dependencies for testing
23
+ */
24
+ constructor(db, config, _deps = {}) {
25
+ const defaults = {
26
+ poolRepo: new WorktreePoolRepository(db),
27
+ worktreeRepo: new WorktreeRepository(db),
28
+ usageTracker: new WorktreePoolUsageTracker(),
29
+ fs: fs,
30
+ simpleGit: require('simple-git'),
31
+ GitWorktreeManager: GitWorktreeManager,
32
+ };
33
+ const deps = { ...defaults, ..._deps };
34
+
35
+ this.db = db;
36
+ this.config = config;
37
+ this._poolRepo = deps.poolRepo;
38
+ this._worktreeRepo = deps.worktreeRepo;
39
+ this._usageTracker = deps.usageTracker;
40
+ this._fs = deps.fs;
41
+ this._simpleGit = deps.simpleGit;
42
+ this._GitWorktreeManager = deps.GitWorktreeManager;
43
+ }
44
+
45
+ /**
46
+ * Read-only accessor for the pool repository (used by callers that
47
+ * need direct DB queries, e.g. route handlers checking pool status).
48
+ */
49
+ get poolRepo() {
50
+ return this._poolRepo;
51
+ }
52
+
53
+ // ── Absorbed from WorktreePoolManager ────────────────────────────────────
54
+
55
+ /**
56
+ * Acquire a pool worktree for a PR review.
57
+ *
58
+ * Claim steps use DB-level serialization (BEGIN IMMEDIATE transactions in
59
+ * poolRepo.claimByPR / claimAvailable) so that concurrent requests cannot
60
+ * grab the same slot -- even across independent instances.
61
+ *
62
+ * Decision tree:
63
+ * 1. Pool worktree already assigned to this PR -> claim atomically, refresh and return
64
+ * 2. Available (LRU) pool worktree exists -> claim atomically, switch to this PR
65
+ * 3. Pool not full -> create a new pool worktree
66
+ * 4. All slots occupied -> create a standard non-pool worktree (slower fallback)
67
+ *
68
+ * @param {Object} prInfo - { owner, repo, prNumber, repository }
69
+ * @param {Object} prData - { head: { sha, ref }, base: { sha, ref } }
70
+ * @param {string} repositoryPath - Path to the main repository clone
71
+ * @param {Object} options - { worktreeSourcePath, checkoutScript, checkoutTimeout, resetScript, worktreeConfig, poolSize }
72
+ * @returns {Promise<{ worktreePath: string, worktreeId: string }>}
73
+ */
74
+ async acquireForPR(prInfo, prData, repositoryPath, options = {}) {
75
+ const repository = prInfo.repository || normalizeRepository(prInfo.owner, prInfo.repo);
76
+ const { poolSize } = options;
77
+
78
+ // 1. Already assigned to this PR? Atomically claim via DB transaction.
79
+ const existingPool = await this._poolRepo.claimByPR(prInfo.prNumber, repository);
80
+ if (existingPool) {
81
+ const worktreeRecord = await this._worktreeRepo.findById(existingPool.id);
82
+ if (worktreeRecord) {
83
+ if (!this._fs.existsSync(worktreeRecord.path)) {
84
+ logger.warn(`Pool worktree ${existingPool.id} directory missing from disk (${worktreeRecord.path}) -- removing stale records`);
85
+ await this._poolRepo.delete(existingPool.id);
86
+ await this._worktreeRepo.delete(existingPool.id);
87
+ } else {
88
+ logger.info(`Pool worktree ${existingPool.id} already assigned to PR #${prInfo.prNumber}, refreshing`);
89
+ return this._refreshPoolWorktree(existingPool, worktreeRecord, prInfo, prData);
90
+ }
91
+ } else {
92
+ logger.warn(`Orphaned pool entry ${existingPool.id} -- removing`);
93
+ await this._poolRepo.delete(existingPool.id);
94
+ }
95
+ }
96
+
97
+ // 2. Available slot (LRU eviction)? Atomically claim via DB transaction.
98
+ const available = await this._poolRepo.claimAvailable(repository);
99
+ if (available) {
100
+ const worktreeRecord = await this._worktreeRepo.findById(available.id);
101
+ if (worktreeRecord) {
102
+ if (!this._fs.existsSync(worktreeRecord.path)) {
103
+ logger.warn(`Pool worktree ${available.id} directory missing from disk (${worktreeRecord.path}) -- removing stale records`);
104
+ await this._poolRepo.delete(available.id);
105
+ await this._worktreeRepo.delete(available.id);
106
+ } else {
107
+ logger.info(`Switching pool worktree ${available.id} to PR #${prInfo.prNumber}`);
108
+ return this._switchPoolWorktree(available, worktreeRecord, prInfo, prData, options);
109
+ }
110
+ } else {
111
+ logger.warn(`Orphaned pool entry ${available.id} -- removing`);
112
+ await this._poolRepo.delete(available.id);
113
+ }
114
+ }
115
+
116
+ // 3. Pool not full -- atomically reserve a slot, then create
117
+ const poolId = generateWorktreeId();
118
+ const reserved = await this._poolRepo.reserveSlot(poolId, repository, poolSize);
119
+ if (reserved) {
120
+ logger.info(`Reserved pool slot ${poolId} for PR #${prInfo.prNumber}, creating worktree`);
121
+ return this._createPoolWorktree(prInfo, prData, repositoryPath, options, poolId);
122
+ }
123
+
124
+ // 4. All slots occupied — fall back to a standard non-pool worktree
125
+ // (slower but functional; the pool is pre-warmed capacity, not a hard limit)
126
+ logger.warn(`Pool full for ${repository} (${poolSize} slots), creating non-pool worktree for PR #${prInfo.prNumber} — setup will be slower`);
127
+
128
+ const normalizedPrData = {
129
+ head_sha: prData.head?.sha || prData.head_sha,
130
+ head_branch: prData.head?.ref || prData.head_branch,
131
+ base_sha: prData.base?.sha || prData.base_sha,
132
+ base_branch: prData.base?.ref || prData.base_branch,
133
+ repository: prData.repository,
134
+ };
135
+
136
+ const normalizedPrInfo = {
137
+ owner: prInfo.owner,
138
+ repo: prInfo.repo,
139
+ number: prInfo.prNumber,
140
+ };
141
+
142
+ const worktreeManager = new this._GitWorktreeManager(this.db, options.worktreeConfig || {});
143
+ const { path: worktreePath, id: worktreeId } = await worktreeManager.createWorktreeForPR(
144
+ normalizedPrInfo,
145
+ normalizedPrData,
146
+ repositoryPath,
147
+ { worktreeSourcePath: options.worktreeSourcePath, checkoutScript: options.checkoutScript, checkoutTimeout: options.checkoutTimeout }
148
+ );
149
+
150
+ return { worktreePath, worktreeId };
151
+ }
152
+
153
+ /**
154
+ * Create a new pool worktree from scratch.
155
+ * Expects that the caller has already reserved a pool slot via
156
+ * poolRepo.reserveSlot(). Delegates to GitWorktreeManager.createWorktreeForPR,
157
+ * then finalizes the reservation on success or deletes it on failure.
158
+ *
159
+ * @param {Object} prInfo
160
+ * @param {Object} prData
161
+ * @param {string} repositoryPath
162
+ * @param {Object} options
163
+ * @param {string} poolId - Pre-reserved pool worktree ID
164
+ * @returns {Promise<{ worktreePath: string, worktreeId: string }>}
165
+ */
166
+ async _createPoolWorktree(prInfo, prData, repositoryPath, options, poolId) {
167
+ const repository = prInfo.repository || normalizeRepository(prInfo.owner, prInfo.repo);
168
+ const { worktreeSourcePath, checkoutScript, checkoutTimeout, worktreeConfig } = options;
169
+
170
+ // Build worktree config with pool ID substituted into the name template.
171
+ // Preserve any user-configured template (e.g., '{id}/src' for monorepos)
172
+ // and only replace the {id} placeholder with the pool-specific ID.
173
+ const poolWorktreeConfig = {
174
+ ...(worktreeConfig || {}),
175
+ nameTemplate: (worktreeConfig?.nameTemplate || '{id}').replace(/\{id\}/g, poolId),
176
+ };
177
+
178
+ const worktreeManager = new this._GitWorktreeManager(this.db, poolWorktreeConfig);
179
+
180
+ // Normalize prData into the shape createWorktreeForPR expects
181
+ const normalizedPrData = {
182
+ head_sha: prData.head?.sha || prData.head_sha,
183
+ head_branch: prData.head?.ref || prData.head_branch,
184
+ base_sha: prData.base?.sha || prData.base_sha,
185
+ base_branch: prData.base?.ref || prData.base_branch,
186
+ repository: prData.repository,
187
+ };
188
+
189
+ const normalizedPrInfo = {
190
+ owner: prInfo.owner,
191
+ repo: prInfo.repo,
192
+ number: prInfo.prNumber,
193
+ };
194
+
195
+ try {
196
+ const { path: worktreePath, id: worktreeId } = await worktreeManager.createWorktreeForPR(
197
+ normalizedPrInfo,
198
+ normalizedPrData,
199
+ repositoryPath,
200
+ { worktreeSourcePath, checkoutScript, checkoutTimeout, explicitId: poolId }
201
+ );
202
+
203
+ // Finalize the reservation: set path and mark in_use.
204
+ // Use poolId (the reserved pool slot ID), NOT worktreeId (the worktrees-table ID).
205
+ await this._poolRepo.finalizeReservation(poolId, worktreePath, prInfo.prNumber);
206
+
207
+ logger.info(`Created pool worktree ${poolId} at ${worktreePath}`);
208
+ return { worktreePath, worktreeId: poolId };
209
+ } catch (err) {
210
+ // Creation failed -- remove the placeholder to free the slot
211
+ try {
212
+ await this._poolRepo.deleteReservation(poolId);
213
+ } catch (cleanupErr) {
214
+ logger.error(`Failed to delete reservation ${poolId} after creation failure: ${cleanupErr.message}`);
215
+ }
216
+ throw err;
217
+ }
218
+ }
219
+
220
+ /**
221
+ * Switch an existing pool worktree to a different PR.
222
+ *
223
+ * @param {Object} poolEntry - Pool table record
224
+ * @param {Object} worktreeRecord - Worktrees table record
225
+ * @param {Object} prInfo
226
+ * @param {Object} prData
227
+ * @param {Object} options
228
+ * @returns {Promise<{ worktreePath: string, worktreeId: string }>}
229
+ */
230
+ async _switchPoolWorktree(poolEntry, worktreeRecord, prInfo, prData, options) {
231
+ // Note: poolEntry was already atomically marked 'switching' by claimAvailable()
232
+ try {
233
+ const git = this._simpleGit(poolEntry.path);
234
+ const worktreeManager = new this._GitWorktreeManager(this.db);
235
+
236
+ // Resolve the remote
237
+ const remoteName = await worktreeManager.resolveRemoteForPR(git, prData, {
238
+ owner: prInfo.owner,
239
+ repo: prInfo.repo,
240
+ number: prInfo.prNumber,
241
+ });
242
+
243
+ // Fetch new PR refs (incremental -- cheap on a warm worktree) with fallback
244
+ logger.info(`Fetching PR #${prInfo.prNumber} refs into pool worktree ${poolEntry.id}`);
245
+ const fetchedHead = await worktreeManager.fetchPRHead(git, {
246
+ owner: prInfo.owner,
247
+ repo: prInfo.repo,
248
+ number: prInfo.prNumber,
249
+ }, prData, { remote: remoteName });
250
+
251
+ // Clean the working tree before switching PRs. Without this, untracked
252
+ // files (build artifacts, generated code) from the previous PR leak into
253
+ // the new checkout, and modified tracked files can cause checkout to fail.
254
+ // Use -fd (not -fdx) to preserve gitignored files like node_modules that
255
+ // the resetScript may depend on.
256
+ await git.reset(['--hard', 'HEAD']);
257
+ await git.clean('f', ['-d']);
258
+
259
+ // Checkout specific head SHA (stored SHA in restore mode, latest in fresh mode)
260
+ const targetSha = prData.head?.sha || prData.head_sha;
261
+ if (targetSha) {
262
+ await git.checkout([targetSha]);
263
+ } else {
264
+ await git.checkout([fetchedHead.checkoutTarget]);
265
+ }
266
+
267
+ // Run reset_script if configured
268
+ if (options.resetScript) {
269
+ logger.info(`Executing reset script: ${options.resetScript}`);
270
+ const headRef = prData.head?.ref || prData.head_branch || '';
271
+ const baseRef = prData.base?.ref || prData.base_branch || '';
272
+ const headSha = prData.head?.sha || prData.head_sha || '';
273
+ const baseSha = prData.base?.sha || prData.base_sha || '';
274
+
275
+ const scriptEnv = {
276
+ BASE_BRANCH: baseRef,
277
+ HEAD_BRANCH: headRef,
278
+ BASE_SHA: baseSha,
279
+ HEAD_SHA: headSha,
280
+ PR_NUMBER: String(prInfo.prNumber),
281
+ WORKTREE_PATH: poolEntry.path,
282
+ };
283
+ await worktreeManager.executeCheckoutScript(
284
+ options.resetScript, poolEntry.path, scriptEnv, options.checkoutTimeout
285
+ );
286
+ }
287
+
288
+ // Update worktrees table (returns paths of deleted non-pool worktree records)
289
+ const branch = prData.head?.ref || prData.head_branch || '';
290
+ const deletedPaths = await this._worktreeRepo.switchPR(poolEntry.id, prInfo.prNumber, branch);
291
+
292
+ // Best-effort disk cleanup for deleted non-pool worktree directories
293
+ if (deletedPaths && deletedPaths.length > 0) {
294
+ for (const deletedPath of deletedPaths) {
295
+ try {
296
+ await worktreeManager.cleanupWorktree(deletedPath);
297
+ logger.info(`Cleaned up obsolete worktree directory: ${deletedPath}`);
298
+ } catch (cleanupErr) {
299
+ logger.warn(`Failed to clean up obsolete worktree directory ${deletedPath}: ${cleanupErr.message}`);
300
+ }
301
+ }
302
+ }
303
+
304
+ // Forcefully clear all previous tracking state (sessions, analyses, grace
305
+ // timers, review mappings) before assigning to the new PR. Without this,
306
+ // zombie holds from the previous PR could trigger a false onIdle event
307
+ // that marks the worktree available while the new PR is using it.
308
+ this._usageTracker.clearWorktree(poolEntry.id);
309
+
310
+ // Mark in_use in pool table
311
+ await this._poolRepo.markInUse(poolEntry.id, prInfo.prNumber);
312
+
313
+ logger.info(`Switched pool worktree ${poolEntry.id} to PR #${prInfo.prNumber}`);
314
+ return { worktreePath: poolEntry.path, worktreeId: poolEntry.id };
315
+ } catch (err) {
316
+ // Roll back to available on failure
317
+ try {
318
+ await this._poolRepo.markAvailable(poolEntry.id);
319
+ } catch (rollbackErr) {
320
+ logger.error(`Failed to roll back pool worktree ${poolEntry.id} status: ${rollbackErr.message}`);
321
+ }
322
+ throw err;
323
+ }
324
+ }
325
+
326
+ /**
327
+ * Refresh an existing pool worktree that is already assigned to the right PR.
328
+ * Delegates to GitWorktreeManager.refreshWorktree for the git operations,
329
+ * then ensures the pool entry is marked as in_use.
330
+ *
331
+ * @param {Object} poolEntry - Pool table record
332
+ * @param {Object} worktreeRecord - Worktrees table record
333
+ * @param {Object} prInfo
334
+ * @param {Object} prData
335
+ * @returns {Promise<{ worktreePath: string, worktreeId: string }>}
336
+ */
337
+ async _refreshPoolWorktree(poolEntry, worktreeRecord, prInfo, prData) {
338
+ const targetSha = prData.head?.sha || prData.head_sha;
339
+ if (targetSha) {
340
+ try {
341
+ const git = this._simpleGit(poolEntry.path);
342
+ const currentHead = (await git.revparse(['HEAD'])).trim();
343
+ if (currentHead === targetSha) {
344
+ logger.info(`Pool worktree ${poolEntry.id} already at target SHA ${targetSha.slice(0, 8)}, skipping refresh`);
345
+ await this._poolRepo.markInUse(poolEntry.id, prInfo.prNumber);
346
+ return { worktreePath: poolEntry.path, worktreeId: poolEntry.id };
347
+ }
348
+ } catch (err) {
349
+ logger.warn(`Could not check HEAD of pool worktree ${poolEntry.id}: ${err.message}`);
350
+ }
351
+ }
352
+
353
+ const normalizedPrData = {
354
+ head_sha: prData.head?.sha || prData.head_sha,
355
+ head_branch: prData.head?.ref || prData.head_branch,
356
+ base_sha: prData.base?.sha || prData.base_sha,
357
+ base_branch: prData.base?.ref || prData.base_branch,
358
+ repository: prData.repository,
359
+ };
360
+
361
+ const normalizedPrInfo = {
362
+ owner: prInfo.owner,
363
+ repo: prInfo.repo,
364
+ number: prInfo.prNumber,
365
+ };
366
+
367
+ // Pool worktrees are unattended — dirty state is leftover noise, not
368
+ // intentional edits. Clean before refresh so refreshWorktree's
369
+ // hasLocalChanges guard never trips. Mirrors _switchPoolWorktree cleanup.
370
+ const refreshGit = this._simpleGit(poolEntry.path);
371
+ await refreshGit.reset(['--hard', 'HEAD']);
372
+ await refreshGit.clean('f', ['-d']);
373
+
374
+ const worktreeManager = new this._GitWorktreeManager(this.db);
375
+ await worktreeManager.refreshWorktree(worktreeRecord, normalizedPrInfo.number, normalizedPrData, normalizedPrInfo);
376
+
377
+ // refreshWorktree fetches from GitHub and resets to FETCH_HEAD, which is
378
+ // the latest PR head. In restore mode targetSha may be an older commit
379
+ // from the cached review. Explicitly check out the target SHA so the
380
+ // worktree lands on the expected commit rather than FETCH_HEAD.
381
+ if (targetSha) {
382
+ try {
383
+ const git = this._simpleGit(poolEntry.path);
384
+ await git.checkout([targetSha]);
385
+ } catch (err) {
386
+ logger.warn(`Could not checkout target SHA ${targetSha.slice(0, 8)} in pool worktree ${poolEntry.id}, continuing at FETCH_HEAD: ${err.message}`);
387
+ }
388
+ }
389
+
390
+ // Ensure pool entry is marked in_use
391
+ await this._poolRepo.markInUse(poolEntry.id, prInfo.prNumber);
392
+
393
+ logger.info(`Refreshed pool worktree ${poolEntry.id} for PR #${prInfo.prNumber}`);
394
+ return { worktreePath: poolEntry.path, worktreeId: poolEntry.id };
395
+ }
396
+
397
+ /**
398
+ * Release a pool worktree, marking it as available for reuse.
399
+ * Kept for backward compatibility -- callers that only need to mark
400
+ * a worktree available without touching usage tracking can use this.
401
+ *
402
+ * @param {string} worktreeId - Pool worktree ID
403
+ */
404
+ async release(worktreeId) {
405
+ await this._poolRepo.markAvailable(worktreeId);
406
+ logger.info(`Pool worktree ${worktreeId} released`);
407
+ }
408
+
409
+ // ── New lifecycle methods ────────────────────────────────────────────────
410
+
411
+ /**
412
+ * Register a WebSocket session for a pool worktree.
413
+ * Looks up the worktree by review ID, then adds the session to the
414
+ * in-memory usage tracker.
415
+ *
416
+ * @param {number} reviewId - The review ID to look up
417
+ * @param {string} sessionKey - Unique key for this WS connection
418
+ * @returns {Promise<{ worktreeId: string }|null>} worktreeId if found, null otherwise
419
+ */
420
+ async startSession(reviewId, sessionKey) {
421
+ const entry = await this._poolRepo.findByReviewId(reviewId);
422
+ if (!entry) return null;
423
+
424
+ this._usageTracker.addSession(entry.id, sessionKey);
425
+ return { worktreeId: entry.id };
426
+ }
427
+
428
+ /**
429
+ * Remove a WebSocket session from the usage tracker.
430
+ * Synchronous -- does not touch the database.
431
+ *
432
+ * @param {string} worktreeId - Pool worktree ID
433
+ * @param {string} sessionKey - Unique key for the WS connection
434
+ */
435
+ endSession(worktreeId, sessionKey) {
436
+ this._usageTracker.removeSession(worktreeId, sessionKey);
437
+ }
438
+
439
+ /**
440
+ * Register an AI analysis for a pool worktree.
441
+ * Looks up the worktree by review ID, then adds the analysis to the
442
+ * in-memory usage tracker.
443
+ *
444
+ * @param {number} reviewId - The review ID to look up
445
+ * @param {string} analysisId - Unique analysis identifier
446
+ * @returns {Promise<string|null>} worktreeId if found, null otherwise
447
+ */
448
+ async startAnalysis(reviewId, analysisId) {
449
+ const entry = await this._poolRepo.findByReviewId(reviewId);
450
+ if (!entry) return null;
451
+
452
+ this._usageTracker.addAnalysis(entry.id, analysisId);
453
+ return entry.id;
454
+ }
455
+
456
+ /**
457
+ * Remove an analysis hold from the usage tracker by analysis ID.
458
+ * Synchronous -- does not touch the database.
459
+ *
460
+ * @param {string} analysisId - Unique analysis identifier
461
+ */
462
+ endAnalysis(analysisId) {
463
+ this._usageTracker.removeAnalysisById(analysisId);
464
+ }
465
+
466
+ /**
467
+ * Release a worktree for deletion: clear usage state first, then
468
+ * mark available in the database.
469
+ *
470
+ * @param {string} worktreeId - Pool worktree ID
471
+ */
472
+ async releaseForDeletion(worktreeId) {
473
+ this._usageTracker.clearWorktree(worktreeId);
474
+ await this._poolRepo.markAvailable(worktreeId);
475
+ }
476
+
477
+ /**
478
+ * Release a worktree after a headless analysis completes: clear in-memory
479
+ * usage state first, then mark available in the database. Clearing state
480
+ * before the DB write prevents a race where another request claims the slot
481
+ * (after the DB write) and then has its tracking state wiped by clearWorktree.
482
+ *
483
+ * @param {string} worktreeId - Pool worktree ID
484
+ */
485
+ async releaseAfterHeadless(worktreeId) {
486
+ this._usageTracker.clearWorktree(worktreeId);
487
+ await this._poolRepo.markAvailable(worktreeId);
488
+ }
489
+
490
+ /**
491
+ * Permanently destroy a pool worktree: cancel active analyses, clear
492
+ * in-memory tracking, remove from disk, and delete from both DB tables.
493
+ *
494
+ * Unlike releaseForDeletion (which merely marks the slot available),
495
+ * this removes the worktree entirely. The pool will create a replacement
496
+ * when the next acquireForPR call triggers reserveSlot.
497
+ *
498
+ * @param {string} worktreeId - Pool worktree ID
499
+ * @param {Object} [options={}]
500
+ * @param {Function} [options.cancelAnalyses] - Async callback(worktreeId, analysisIdSet) to cancel running analyses
501
+ */
502
+ async destroyPoolWorktree(worktreeId, { cancelAnalyses } = {}) {
503
+ const poolEntry = await this._poolRepo.getPoolEntry(worktreeId);
504
+ if (poolEntry && (poolEntry.status === 'creating' || poolEntry.status === 'switching')) {
505
+ throw new Error(`Cannot delete worktree ${worktreeId}: currently ${poolEntry.status}`);
506
+ }
507
+
508
+ const activeIds = this._usageTracker.getActiveAnalyses(worktreeId);
509
+ if (activeIds.size > 0 && cancelAnalyses) {
510
+ await cancelAnalyses(worktreeId, activeIds);
511
+ }
512
+
513
+ this._usageTracker.clearWorktree(worktreeId);
514
+
515
+ const record = await this._worktreeRepo.findById(worktreeId);
516
+ if (record && record.path) {
517
+ try {
518
+ const mgr = new this._GitWorktreeManager(this.db);
519
+ await mgr.cleanupWorktree(record.path);
520
+ } catch (err) {
521
+ logger.warn(`Could not clean up pool worktree ${worktreeId} from disk: ${err.message}`);
522
+ }
523
+ }
524
+
525
+ await this._poolRepo.delete(worktreeId);
526
+ await this._worktreeRepo.delete(worktreeId);
527
+ logger.info(`Destroyed pool worktree ${worktreeId}`);
528
+ }
529
+
530
+ /**
531
+ * Set the review ID that owns a pool worktree (persistent ownership).
532
+ *
533
+ * @param {string} worktreeId - Pool worktree ID
534
+ * @param {number|null} reviewId - Review ID that owns the worktree
535
+ */
536
+ async setReviewOwner(worktreeId, reviewId) {
537
+ await this._poolRepo.setCurrentReviewId(worktreeId, reviewId);
538
+ }
539
+
540
+ /**
541
+ * Return the set of active analysis IDs for a worktree.
542
+ *
543
+ * @param {string} worktreeId - Pool worktree ID
544
+ * @returns {Set<string>} Active analysis IDs (may be empty)
545
+ */
546
+ getActiveAnalyses(worktreeId) {
547
+ return this._usageTracker.getActiveAnalyses(worktreeId);
548
+ }
549
+
550
+ /**
551
+ * Reset stale pool entries and rehydrate preserved ones on startup.
552
+ *
553
+ * This method:
554
+ * 1. Calls resetStaleAndPreserve() to clean up stale DB entries and
555
+ * identify entries with valid review ownership
556
+ * 2. Wires the onIdle callback with retry logic (2 attempts, 1s delay)
557
+ * so that idle worktrees are automatically marked available
558
+ * 3. Rehydrates preserved entries by triggering a synthetic
559
+ * session add/remove cycle, which starts the grace-period timer.
560
+ * If a real user reconnects before the timer fires, their WS
561
+ * session will cancel it automatically.
562
+ *
563
+ * @returns {Promise<Array<{id: string, current_review_id: number}>>} Preserved entries
564
+ */
565
+ async resetAndRehydrate() {
566
+ // 1. Reset stale entries and get preserved ones
567
+ const preserved = await this._poolRepo.resetStaleAndPreserve();
568
+ if (preserved.length > 0) {
569
+ logger.info(`Pool startup: preserved ${preserved.length} active worktree(s)`);
570
+ }
571
+
572
+ // 1b. Adopt existing non-pool worktrees into pool for pool-enabled repos
573
+ const adopted = await this._adoptExistingWorktrees();
574
+ for (const entry of adopted) {
575
+ preserved.push(entry);
576
+ }
577
+
578
+ // 2. Wire up idle callback with retry logic (2 attempts, 1s delay)
579
+ this._usageTracker.onIdle = async (worktreeId) => {
580
+ for (let attempt = 1; attempt <= 2; attempt++) {
581
+ try {
582
+ await this._poolRepo.markAvailable(worktreeId);
583
+ logger.info(`Pool worktree ${worktreeId} is now available`);
584
+ return;
585
+ } catch (err) {
586
+ if (attempt < 2) {
587
+ logger.warn(`Failed to release pool worktree ${worktreeId} (attempt ${attempt}), retrying: ${err.message}`);
588
+ await new Promise(r => setTimeout(r, 1000));
589
+ } else {
590
+ logger.error(`Failed to release pool worktree ${worktreeId} after ${attempt} attempts: ${err.message}`);
591
+ }
592
+ }
593
+ }
594
+ };
595
+
596
+ // 3. Rehydrate preserved entries by triggering grace-period timers
597
+ if (preserved.length > 0) {
598
+ logger.info(`Pool startup: preserved ${preserved.length} active worktree(s), starting grace periods`);
599
+ for (const entry of preserved) {
600
+ // Add then immediately remove a synthetic session to trigger the
601
+ // idle grace period timer. If a real user reconnects before the
602
+ // timer fires, their WS session will cancel it automatically.
603
+ this._usageTracker.addSession(entry.id, 'startup-rehydration');
604
+ this._usageTracker.removeSession(entry.id, 'startup-rehydration');
605
+ }
606
+ }
607
+
608
+ return preserved;
609
+ }
610
+
611
+ /**
612
+ * Adopt existing non-pool worktrees into the pool for repos that have pool_size configured.
613
+ * Worktrees already in worktree_pool are skipped. Adoption stops at pool capacity.
614
+ *
615
+ * Returns adopted entries that have `status = 'in_use'` so the caller can
616
+ * rehydrate them with synthetic sessions (same as preserved entries).
617
+ *
618
+ * @returns {Promise<Array<{id: string, current_review_id: number}>>} Adopted in_use entries
619
+ * @private
620
+ */
621
+ async _adoptExistingWorktrees() {
622
+ const repos = this.config.repos || {};
623
+ const adoptedInUse = [];
624
+
625
+ for (const repoName of Object.keys(repos)) {
626
+ const poolSize = getRepoPoolSize(this.config, repoName);
627
+ if (!poolSize) continue;
628
+
629
+ // Count existing pool entries for this repo
630
+ const existingCount = await this._poolRepo.countForRepo(repoName);
631
+ if (existingCount >= poolSize) continue; // already at capacity
632
+
633
+ // Find worktrees for this repo that are NOT in the pool (includes review ID via JOIN)
634
+ const orphans = await this._poolRepo.findOrphanWorktrees(repoName);
635
+
636
+ let adopted = 0;
637
+ for (const orphan of orphans) {
638
+ if (existingCount + adopted >= poolSize) break; // respect capacity
639
+
640
+ // Skip orphans whose directory no longer exists on disk
641
+ if (!this._fs.existsSync(orphan.path)) {
642
+ logger.warn(`Pool startup: skipping adoption of ${orphan.id} — directory missing (${orphan.path})`);
643
+ continue;
644
+ }
645
+
646
+ if (orphan.reviewId) {
647
+ // Adopt as in_use with review ownership
648
+ await this._poolRepo.create({
649
+ id: orphan.id,
650
+ repository: orphan.repository,
651
+ path: orphan.path,
652
+ prNumber: orphan.pr_number,
653
+ });
654
+ await this._poolRepo.setCurrentReviewId(orphan.id, orphan.reviewId);
655
+ adoptedInUse.push({ id: orphan.id, current_review_id: orphan.reviewId });
656
+ logger.info(`Pool startup: adopted worktree ${orphan.id} for PR #${orphan.pr_number} (in_use, review ${orphan.reviewId})`);
657
+ } else {
658
+ // Adopt as available (no active review)
659
+ await this._poolRepo.create({
660
+ id: orphan.id,
661
+ repository: orphan.repository,
662
+ path: orphan.path,
663
+ });
664
+ logger.info(`Pool startup: adopted worktree ${orphan.id} for PR #${orphan.pr_number} (available, no review)`);
665
+ }
666
+
667
+ adopted++;
668
+ }
669
+
670
+ if (adopted > 0) {
671
+ logger.info(`Pool startup: adopted ${adopted} worktree(s) for ${repoName}`);
672
+ }
673
+ }
674
+
675
+ return adoptedInUse;
676
+ }
677
+ }
678
+
679
+ module.exports = { WorktreePoolLifecycle };