@in-the-loop-labs/pair-review 3.2.2 → 3.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +7 -6
- package/package.json +5 -4
- package/plugin/.claude-plugin/plugin.json +1 -1
- package/plugin-code-critic/.claude-plugin/plugin.json +1 -1
- package/plugin-code-critic/skills/analyze/references/orchestration-balanced.md +9 -1
- package/plugin-code-critic/skills/analyze/references/orchestration-fast.md +8 -1
- package/plugin-code-critic/skills/analyze/references/orchestration-thorough.md +8 -7
- package/public/css/repo-settings.css +347 -0
- package/public/index.html +46 -9
- package/public/js/components/AIPanel.js +79 -37
- package/public/js/components/DiffOptionsDropdown.js +84 -1
- package/public/js/index.js +31 -6
- package/public/js/modules/analysis-history.js +11 -7
- package/public/js/pr.js +22 -0
- package/public/js/repo-settings.js +334 -6
- package/public/repo-settings.html +29 -0
- package/src/ai/analyzer.js +28 -19
- package/src/ai/claude-cli.js +2 -0
- package/src/ai/claude-provider.js +4 -1
- package/src/ai/prompts/baseline/consolidation/balanced.js +6 -4
- package/src/ai/prompts/baseline/consolidation/fast.js +6 -2
- package/src/ai/prompts/baseline/consolidation/thorough.js +7 -6
- package/src/ai/prompts/baseline/orchestration/balanced.js +13 -1
- package/src/ai/prompts/baseline/orchestration/fast.js +12 -1
- package/src/ai/prompts/baseline/orchestration/thorough.js +8 -7
- package/src/ai/provider.js +7 -6
- package/src/chat/session-manager.js +6 -3
- package/src/config.js +230 -38
- package/src/database.js +766 -38
- package/src/git/worktree-pool-lifecycle.js +674 -0
- package/src/git/worktree-pool-usage.js +216 -0
- package/src/git/worktree.js +46 -13
- package/src/main.js +185 -26
- package/src/routes/analyses.js +48 -26
- package/src/routes/chat.js +27 -3
- package/src/routes/config.js +17 -5
- package/src/routes/executable-analysis.js +38 -19
- package/src/routes/local.js +19 -6
- package/src/routes/mcp.js +13 -2
- package/src/routes/pr.js +72 -29
- package/src/routes/setup.js +41 -4
- package/src/routes/stack-analysis.js +29 -10
- package/src/routes/worktrees.js +294 -9
- package/src/server.js +20 -3
- package/src/setup/pr-setup.js +161 -27
- package/src/ws/server.js +51 -1
|
@@ -0,0 +1,674 @@
|
|
|
1
|
+
// Copyright 2026 Tim Perkins (tjwp) | SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
'use strict';
|
|
3
|
+
|
|
4
|
+
const fs = require('fs');
|
|
5
|
+
const logger = require('../utils/logger');
|
|
6
|
+
const { WorktreePoolRepository, WorktreeRepository, generateWorktreeId } = require('../database');
|
|
7
|
+
const { GitWorktreeManager } = require('./worktree');
|
|
8
|
+
const { WorktreePoolUsageTracker } = require('./worktree-pool-usage');
|
|
9
|
+
const { normalizeRepository } = require('../utils/paths');
|
|
10
|
+
const { getRepoPoolSize } = require('../config');
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* Consolidates the worktree pool state machine: absorbs WorktreePoolManager
|
|
14
|
+
* and composes WorktreePoolUsageTracker to provide a single entry point for
|
|
15
|
+
* all pool lifecycle operations (acquire, release, session/analysis tracking,
|
|
16
|
+
* startup rehydration).
|
|
17
|
+
*/
|
|
18
|
+
class WorktreePoolLifecycle {
|
|
19
|
+
/**
|
|
20
|
+
* @param {Object} db - Database instance
|
|
21
|
+
* @param {Object} config - Configuration object from loadConfig()
|
|
22
|
+
* @param {Object} [_deps={}] - Injected dependencies for testing
|
|
23
|
+
*/
|
|
24
|
+
constructor(db, config, _deps = {}) {
|
|
25
|
+
const defaults = {
|
|
26
|
+
poolRepo: new WorktreePoolRepository(db),
|
|
27
|
+
worktreeRepo: new WorktreeRepository(db),
|
|
28
|
+
usageTracker: new WorktreePoolUsageTracker(),
|
|
29
|
+
fs: fs,
|
|
30
|
+
simpleGit: require('simple-git'),
|
|
31
|
+
GitWorktreeManager: GitWorktreeManager,
|
|
32
|
+
};
|
|
33
|
+
const deps = { ...defaults, ..._deps };
|
|
34
|
+
|
|
35
|
+
this.db = db;
|
|
36
|
+
this.config = config;
|
|
37
|
+
this._poolRepo = deps.poolRepo;
|
|
38
|
+
this._worktreeRepo = deps.worktreeRepo;
|
|
39
|
+
this._usageTracker = deps.usageTracker;
|
|
40
|
+
this._fs = deps.fs;
|
|
41
|
+
this._simpleGit = deps.simpleGit;
|
|
42
|
+
this._GitWorktreeManager = deps.GitWorktreeManager;
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
/**
|
|
46
|
+
* Read-only accessor for the pool repository (used by callers that
|
|
47
|
+
* need direct DB queries, e.g. route handlers checking pool status).
|
|
48
|
+
*/
|
|
49
|
+
get poolRepo() {
|
|
50
|
+
return this._poolRepo;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
// ── Absorbed from WorktreePoolManager ────────────────────────────────────
|
|
54
|
+
|
|
55
|
+
/**
|
|
56
|
+
* Acquire a pool worktree for a PR review.
|
|
57
|
+
*
|
|
58
|
+
* Claim steps use DB-level serialization (BEGIN IMMEDIATE transactions in
|
|
59
|
+
* poolRepo.claimByPR / claimAvailable) so that concurrent requests cannot
|
|
60
|
+
* grab the same slot -- even across independent instances.
|
|
61
|
+
*
|
|
62
|
+
* Decision tree:
|
|
63
|
+
* 1. Pool worktree already assigned to this PR -> claim atomically, refresh and return
|
|
64
|
+
* 2. Available (LRU) pool worktree exists -> claim atomically, switch to this PR
|
|
65
|
+
* 3. Pool not full -> create a new pool worktree
|
|
66
|
+
* 4. All slots occupied -> create a standard non-pool worktree (slower fallback)
|
|
67
|
+
*
|
|
68
|
+
* @param {Object} prInfo - { owner, repo, prNumber, repository }
|
|
69
|
+
* @param {Object} prData - { head: { sha, ref }, base: { sha, ref } }
|
|
70
|
+
* @param {string} repositoryPath - Path to the main repository clone
|
|
71
|
+
* @param {Object} options - { worktreeSourcePath, checkoutScript, checkoutTimeout, resetScript, worktreeConfig, poolSize }
|
|
72
|
+
* @returns {Promise<{ worktreePath: string, worktreeId: string }>}
|
|
73
|
+
*/
|
|
74
|
+
async acquireForPR(prInfo, prData, repositoryPath, options = {}) {
|
|
75
|
+
const repository = prInfo.repository || normalizeRepository(prInfo.owner, prInfo.repo);
|
|
76
|
+
const { poolSize } = options;
|
|
77
|
+
|
|
78
|
+
// 1. Already assigned to this PR? Atomically claim via DB transaction.
|
|
79
|
+
const existingPool = await this._poolRepo.claimByPR(prInfo.prNumber, repository);
|
|
80
|
+
if (existingPool) {
|
|
81
|
+
const worktreeRecord = await this._worktreeRepo.findById(existingPool.id);
|
|
82
|
+
if (worktreeRecord) {
|
|
83
|
+
if (!this._fs.existsSync(worktreeRecord.path)) {
|
|
84
|
+
logger.warn(`Pool worktree ${existingPool.id} directory missing from disk (${worktreeRecord.path}) -- removing stale records`);
|
|
85
|
+
await this._poolRepo.delete(existingPool.id);
|
|
86
|
+
await this._worktreeRepo.delete(existingPool.id);
|
|
87
|
+
} else {
|
|
88
|
+
logger.info(`Pool worktree ${existingPool.id} already assigned to PR #${prInfo.prNumber}, refreshing`);
|
|
89
|
+
return this._refreshPoolWorktree(existingPool, worktreeRecord, prInfo, prData);
|
|
90
|
+
}
|
|
91
|
+
} else {
|
|
92
|
+
logger.warn(`Orphaned pool entry ${existingPool.id} -- removing`);
|
|
93
|
+
await this._poolRepo.delete(existingPool.id);
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
// 2. Available slot (LRU eviction)? Atomically claim via DB transaction.
|
|
98
|
+
const available = await this._poolRepo.claimAvailable(repository);
|
|
99
|
+
if (available) {
|
|
100
|
+
const worktreeRecord = await this._worktreeRepo.findById(available.id);
|
|
101
|
+
if (worktreeRecord) {
|
|
102
|
+
if (!this._fs.existsSync(worktreeRecord.path)) {
|
|
103
|
+
logger.warn(`Pool worktree ${available.id} directory missing from disk (${worktreeRecord.path}) -- removing stale records`);
|
|
104
|
+
await this._poolRepo.delete(available.id);
|
|
105
|
+
await this._worktreeRepo.delete(available.id);
|
|
106
|
+
} else {
|
|
107
|
+
logger.info(`Switching pool worktree ${available.id} to PR #${prInfo.prNumber}`);
|
|
108
|
+
return this._switchPoolWorktree(available, worktreeRecord, prInfo, prData, options);
|
|
109
|
+
}
|
|
110
|
+
} else {
|
|
111
|
+
logger.warn(`Orphaned pool entry ${available.id} -- removing`);
|
|
112
|
+
await this._poolRepo.delete(available.id);
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
// 3. Pool not full -- atomically reserve a slot, then create
|
|
117
|
+
const poolId = generateWorktreeId();
|
|
118
|
+
const reserved = await this._poolRepo.reserveSlot(poolId, repository, poolSize);
|
|
119
|
+
if (reserved) {
|
|
120
|
+
logger.info(`Reserved pool slot ${poolId} for PR #${prInfo.prNumber}, creating worktree`);
|
|
121
|
+
return this._createPoolWorktree(prInfo, prData, repositoryPath, options, poolId);
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
// 4. All slots occupied — fall back to a standard non-pool worktree
|
|
125
|
+
// (slower but functional; the pool is pre-warmed capacity, not a hard limit)
|
|
126
|
+
logger.warn(`Pool full for ${repository} (${poolSize} slots), creating non-pool worktree for PR #${prInfo.prNumber} — setup will be slower`);
|
|
127
|
+
|
|
128
|
+
const normalizedPrData = {
|
|
129
|
+
head_sha: prData.head?.sha || prData.head_sha,
|
|
130
|
+
head_branch: prData.head?.ref || prData.head_branch,
|
|
131
|
+
base_sha: prData.base?.sha || prData.base_sha,
|
|
132
|
+
base_branch: prData.base?.ref || prData.base_branch,
|
|
133
|
+
repository: prData.repository,
|
|
134
|
+
};
|
|
135
|
+
|
|
136
|
+
const normalizedPrInfo = {
|
|
137
|
+
owner: prInfo.owner,
|
|
138
|
+
repo: prInfo.repo,
|
|
139
|
+
number: prInfo.prNumber,
|
|
140
|
+
};
|
|
141
|
+
|
|
142
|
+
const worktreeManager = new this._GitWorktreeManager(this.db, options.worktreeConfig || {});
|
|
143
|
+
const { path: worktreePath, id: worktreeId } = await worktreeManager.createWorktreeForPR(
|
|
144
|
+
normalizedPrInfo,
|
|
145
|
+
normalizedPrData,
|
|
146
|
+
repositoryPath,
|
|
147
|
+
{ worktreeSourcePath: options.worktreeSourcePath, checkoutScript: options.checkoutScript, checkoutTimeout: options.checkoutTimeout }
|
|
148
|
+
);
|
|
149
|
+
|
|
150
|
+
return { worktreePath, worktreeId };
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
/**
|
|
154
|
+
* Create a new pool worktree from scratch.
|
|
155
|
+
* Expects that the caller has already reserved a pool slot via
|
|
156
|
+
* poolRepo.reserveSlot(). Delegates to GitWorktreeManager.createWorktreeForPR,
|
|
157
|
+
* then finalizes the reservation on success or deletes it on failure.
|
|
158
|
+
*
|
|
159
|
+
* @param {Object} prInfo
|
|
160
|
+
* @param {Object} prData
|
|
161
|
+
* @param {string} repositoryPath
|
|
162
|
+
* @param {Object} options
|
|
163
|
+
* @param {string} poolId - Pre-reserved pool worktree ID
|
|
164
|
+
* @returns {Promise<{ worktreePath: string, worktreeId: string }>}
|
|
165
|
+
*/
|
|
166
|
+
async _createPoolWorktree(prInfo, prData, repositoryPath, options, poolId) {
|
|
167
|
+
const repository = prInfo.repository || normalizeRepository(prInfo.owner, prInfo.repo);
|
|
168
|
+
const { worktreeSourcePath, checkoutScript, checkoutTimeout, worktreeConfig } = options;
|
|
169
|
+
|
|
170
|
+
// Build worktree config with pool ID substituted into the name template.
|
|
171
|
+
// Preserve any user-configured template (e.g., '{id}/src' for monorepos)
|
|
172
|
+
// and only replace the {id} placeholder with the pool-specific ID.
|
|
173
|
+
const poolWorktreeConfig = {
|
|
174
|
+
...(worktreeConfig || {}),
|
|
175
|
+
nameTemplate: (worktreeConfig?.nameTemplate || '{id}').replace(/\{id\}/g, poolId),
|
|
176
|
+
};
|
|
177
|
+
|
|
178
|
+
const worktreeManager = new this._GitWorktreeManager(this.db, poolWorktreeConfig);
|
|
179
|
+
|
|
180
|
+
// Normalize prData into the shape createWorktreeForPR expects
|
|
181
|
+
const normalizedPrData = {
|
|
182
|
+
head_sha: prData.head?.sha || prData.head_sha,
|
|
183
|
+
head_branch: prData.head?.ref || prData.head_branch,
|
|
184
|
+
base_sha: prData.base?.sha || prData.base_sha,
|
|
185
|
+
base_branch: prData.base?.ref || prData.base_branch,
|
|
186
|
+
repository: prData.repository,
|
|
187
|
+
};
|
|
188
|
+
|
|
189
|
+
const normalizedPrInfo = {
|
|
190
|
+
owner: prInfo.owner,
|
|
191
|
+
repo: prInfo.repo,
|
|
192
|
+
number: prInfo.prNumber,
|
|
193
|
+
};
|
|
194
|
+
|
|
195
|
+
try {
|
|
196
|
+
const { path: worktreePath, id: worktreeId } = await worktreeManager.createWorktreeForPR(
|
|
197
|
+
normalizedPrInfo,
|
|
198
|
+
normalizedPrData,
|
|
199
|
+
repositoryPath,
|
|
200
|
+
{ worktreeSourcePath, checkoutScript, checkoutTimeout, explicitId: poolId }
|
|
201
|
+
);
|
|
202
|
+
|
|
203
|
+
// Finalize the reservation: set path and mark in_use.
|
|
204
|
+
// Use poolId (the reserved pool slot ID), NOT worktreeId (the worktrees-table ID).
|
|
205
|
+
await this._poolRepo.finalizeReservation(poolId, worktreePath, prInfo.prNumber);
|
|
206
|
+
|
|
207
|
+
logger.info(`Created pool worktree ${poolId} at ${worktreePath}`);
|
|
208
|
+
return { worktreePath, worktreeId: poolId };
|
|
209
|
+
} catch (err) {
|
|
210
|
+
// Creation failed -- remove the placeholder to free the slot
|
|
211
|
+
try {
|
|
212
|
+
await this._poolRepo.deleteReservation(poolId);
|
|
213
|
+
} catch (cleanupErr) {
|
|
214
|
+
logger.error(`Failed to delete reservation ${poolId} after creation failure: ${cleanupErr.message}`);
|
|
215
|
+
}
|
|
216
|
+
throw err;
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
/**
|
|
221
|
+
* Switch an existing pool worktree to a different PR.
|
|
222
|
+
*
|
|
223
|
+
* @param {Object} poolEntry - Pool table record
|
|
224
|
+
* @param {Object} worktreeRecord - Worktrees table record
|
|
225
|
+
* @param {Object} prInfo
|
|
226
|
+
* @param {Object} prData
|
|
227
|
+
* @param {Object} options
|
|
228
|
+
* @returns {Promise<{ worktreePath: string, worktreeId: string }>}
|
|
229
|
+
*/
|
|
230
|
+
async _switchPoolWorktree(poolEntry, worktreeRecord, prInfo, prData, options) {
|
|
231
|
+
// Note: poolEntry was already atomically marked 'switching' by claimAvailable()
|
|
232
|
+
try {
|
|
233
|
+
const git = this._simpleGit(poolEntry.path);
|
|
234
|
+
|
|
235
|
+
// Resolve the remote
|
|
236
|
+
const remotes = await git.getRemotes();
|
|
237
|
+
const remote = remotes.find(r => r.name === 'origin') || remotes[0];
|
|
238
|
+
const remoteName = remote ? remote.name : 'origin';
|
|
239
|
+
|
|
240
|
+
// Fetch new PR refs (incremental -- cheap on a warm worktree)
|
|
241
|
+
logger.info(`Fetching PR #${prInfo.prNumber} refs into pool worktree ${poolEntry.id}`);
|
|
242
|
+
await git.fetch([remoteName, `+refs/pull/${prInfo.prNumber}/head:refs/remotes/${remoteName}/pr-${prInfo.prNumber}`]);
|
|
243
|
+
|
|
244
|
+
// Clean the working tree before switching PRs. Without this, untracked
|
|
245
|
+
// files (build artifacts, generated code) from the previous PR leak into
|
|
246
|
+
// the new checkout, and modified tracked files can cause checkout to fail.
|
|
247
|
+
// Use -fd (not -fdx) to preserve gitignored files like node_modules that
|
|
248
|
+
// the resetScript may depend on.
|
|
249
|
+
await git.reset(['--hard', 'HEAD']);
|
|
250
|
+
await git.clean('f', ['-d']);
|
|
251
|
+
|
|
252
|
+
// Checkout specific head SHA (stored SHA in restore mode, latest in fresh mode)
|
|
253
|
+
const targetSha = prData.head?.sha || prData.head_sha;
|
|
254
|
+
if (targetSha) {
|
|
255
|
+
await git.checkout([targetSha]);
|
|
256
|
+
} else {
|
|
257
|
+
await git.checkout([`refs/remotes/${remoteName}/pr-${prInfo.prNumber}`]);
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
// Run reset_script if configured
|
|
261
|
+
if (options.resetScript) {
|
|
262
|
+
logger.info(`Executing reset script: ${options.resetScript}`);
|
|
263
|
+
const headRef = prData.head?.ref || prData.head_branch || '';
|
|
264
|
+
const baseRef = prData.base?.ref || prData.base_branch || '';
|
|
265
|
+
const headSha = prData.head?.sha || prData.head_sha || '';
|
|
266
|
+
const baseSha = prData.base?.sha || prData.base_sha || '';
|
|
267
|
+
|
|
268
|
+
const scriptEnv = {
|
|
269
|
+
BASE_BRANCH: baseRef,
|
|
270
|
+
HEAD_BRANCH: headRef,
|
|
271
|
+
BASE_SHA: baseSha,
|
|
272
|
+
HEAD_SHA: headSha,
|
|
273
|
+
PR_NUMBER: String(prInfo.prNumber),
|
|
274
|
+
WORKTREE_PATH: poolEntry.path,
|
|
275
|
+
};
|
|
276
|
+
const worktreeManager = new this._GitWorktreeManager();
|
|
277
|
+
await worktreeManager.executeCheckoutScript(
|
|
278
|
+
options.resetScript, poolEntry.path, scriptEnv, options.checkoutTimeout
|
|
279
|
+
);
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
// Update worktrees table (returns paths of deleted non-pool worktree records)
|
|
283
|
+
const branch = prData.head?.ref || prData.head_branch || '';
|
|
284
|
+
const deletedPaths = await this._worktreeRepo.switchPR(poolEntry.id, prInfo.prNumber, branch);
|
|
285
|
+
|
|
286
|
+
// Best-effort disk cleanup for deleted non-pool worktree directories
|
|
287
|
+
if (deletedPaths && deletedPaths.length > 0) {
|
|
288
|
+
const worktreeManager = new this._GitWorktreeManager();
|
|
289
|
+
for (const deletedPath of deletedPaths) {
|
|
290
|
+
try {
|
|
291
|
+
await worktreeManager.cleanupWorktree(deletedPath);
|
|
292
|
+
logger.info(`Cleaned up obsolete worktree directory: ${deletedPath}`);
|
|
293
|
+
} catch (cleanupErr) {
|
|
294
|
+
logger.warn(`Failed to clean up obsolete worktree directory ${deletedPath}: ${cleanupErr.message}`);
|
|
295
|
+
}
|
|
296
|
+
}
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
// Forcefully clear all previous tracking state (sessions, analyses, grace
|
|
300
|
+
// timers, review mappings) before assigning to the new PR. Without this,
|
|
301
|
+
// zombie holds from the previous PR could trigger a false onIdle event
|
|
302
|
+
// that marks the worktree available while the new PR is using it.
|
|
303
|
+
this._usageTracker.clearWorktree(poolEntry.id);
|
|
304
|
+
|
|
305
|
+
// Mark in_use in pool table
|
|
306
|
+
await this._poolRepo.markInUse(poolEntry.id, prInfo.prNumber);
|
|
307
|
+
|
|
308
|
+
logger.info(`Switched pool worktree ${poolEntry.id} to PR #${prInfo.prNumber}`);
|
|
309
|
+
return { worktreePath: poolEntry.path, worktreeId: poolEntry.id };
|
|
310
|
+
} catch (err) {
|
|
311
|
+
// Roll back to available on failure
|
|
312
|
+
try {
|
|
313
|
+
await this._poolRepo.markAvailable(poolEntry.id);
|
|
314
|
+
} catch (rollbackErr) {
|
|
315
|
+
logger.error(`Failed to roll back pool worktree ${poolEntry.id} status: ${rollbackErr.message}`);
|
|
316
|
+
}
|
|
317
|
+
throw err;
|
|
318
|
+
}
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
/**
|
|
322
|
+
* Refresh an existing pool worktree that is already assigned to the right PR.
|
|
323
|
+
* Delegates to GitWorktreeManager.refreshWorktree for the git operations,
|
|
324
|
+
* then ensures the pool entry is marked as in_use.
|
|
325
|
+
*
|
|
326
|
+
* @param {Object} poolEntry - Pool table record
|
|
327
|
+
* @param {Object} worktreeRecord - Worktrees table record
|
|
328
|
+
* @param {Object} prInfo
|
|
329
|
+
* @param {Object} prData
|
|
330
|
+
* @returns {Promise<{ worktreePath: string, worktreeId: string }>}
|
|
331
|
+
*/
|
|
332
|
+
async _refreshPoolWorktree(poolEntry, worktreeRecord, prInfo, prData) {
|
|
333
|
+
const targetSha = prData.head?.sha || prData.head_sha;
|
|
334
|
+
if (targetSha) {
|
|
335
|
+
try {
|
|
336
|
+
const git = this._simpleGit(poolEntry.path);
|
|
337
|
+
const currentHead = (await git.revparse(['HEAD'])).trim();
|
|
338
|
+
if (currentHead === targetSha) {
|
|
339
|
+
logger.info(`Pool worktree ${poolEntry.id} already at target SHA ${targetSha.slice(0, 8)}, skipping refresh`);
|
|
340
|
+
await this._poolRepo.markInUse(poolEntry.id, prInfo.prNumber);
|
|
341
|
+
return { worktreePath: poolEntry.path, worktreeId: poolEntry.id };
|
|
342
|
+
}
|
|
343
|
+
} catch (err) {
|
|
344
|
+
logger.warn(`Could not check HEAD of pool worktree ${poolEntry.id}: ${err.message}`);
|
|
345
|
+
}
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
const normalizedPrData = {
|
|
349
|
+
head_sha: prData.head?.sha || prData.head_sha,
|
|
350
|
+
head_branch: prData.head?.ref || prData.head_branch,
|
|
351
|
+
base_sha: prData.base?.sha || prData.base_sha,
|
|
352
|
+
base_branch: prData.base?.ref || prData.base_branch,
|
|
353
|
+
repository: prData.repository,
|
|
354
|
+
};
|
|
355
|
+
|
|
356
|
+
const normalizedPrInfo = {
|
|
357
|
+
owner: prInfo.owner,
|
|
358
|
+
repo: prInfo.repo,
|
|
359
|
+
number: prInfo.prNumber,
|
|
360
|
+
};
|
|
361
|
+
|
|
362
|
+
// Pool worktrees are unattended — dirty state is leftover noise, not
|
|
363
|
+
// intentional edits. Clean before refresh so refreshWorktree's
|
|
364
|
+
// hasLocalChanges guard never trips. Mirrors _switchPoolWorktree cleanup.
|
|
365
|
+
const refreshGit = this._simpleGit(poolEntry.path);
|
|
366
|
+
await refreshGit.reset(['--hard', 'HEAD']);
|
|
367
|
+
await refreshGit.clean('f', ['-d']);
|
|
368
|
+
|
|
369
|
+
const worktreeManager = new this._GitWorktreeManager(this.db);
|
|
370
|
+
await worktreeManager.refreshWorktree(worktreeRecord, normalizedPrInfo.number, normalizedPrData, normalizedPrInfo);
|
|
371
|
+
|
|
372
|
+
// refreshWorktree fetches from GitHub and resets to FETCH_HEAD, which is
|
|
373
|
+
// the latest PR head. In restore mode targetSha may be an older commit
|
|
374
|
+
// from the cached review. Explicitly check out the target SHA so the
|
|
375
|
+
// worktree lands on the expected commit rather than FETCH_HEAD.
|
|
376
|
+
if (targetSha) {
|
|
377
|
+
try {
|
|
378
|
+
const git = this._simpleGit(poolEntry.path);
|
|
379
|
+
await git.checkout([targetSha]);
|
|
380
|
+
} catch (err) {
|
|
381
|
+
logger.warn(`Could not checkout target SHA ${targetSha.slice(0, 8)} in pool worktree ${poolEntry.id}, continuing at FETCH_HEAD: ${err.message}`);
|
|
382
|
+
}
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
// Ensure pool entry is marked in_use
|
|
386
|
+
await this._poolRepo.markInUse(poolEntry.id, prInfo.prNumber);
|
|
387
|
+
|
|
388
|
+
logger.info(`Refreshed pool worktree ${poolEntry.id} for PR #${prInfo.prNumber}`);
|
|
389
|
+
return { worktreePath: poolEntry.path, worktreeId: poolEntry.id };
|
|
390
|
+
}
|
|
391
|
+
|
|
392
|
+
/**
|
|
393
|
+
* Release a pool worktree, marking it as available for reuse.
|
|
394
|
+
* Kept for backward compatibility -- callers that only need to mark
|
|
395
|
+
* a worktree available without touching usage tracking can use this.
|
|
396
|
+
*
|
|
397
|
+
* @param {string} worktreeId - Pool worktree ID
|
|
398
|
+
*/
|
|
399
|
+
async release(worktreeId) {
|
|
400
|
+
await this._poolRepo.markAvailable(worktreeId);
|
|
401
|
+
logger.info(`Pool worktree ${worktreeId} released`);
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
// ── New lifecycle methods ────────────────────────────────────────────────
|
|
405
|
+
|
|
406
|
+
/**
|
|
407
|
+
* Register a WebSocket session for a pool worktree.
|
|
408
|
+
* Looks up the worktree by review ID, then adds the session to the
|
|
409
|
+
* in-memory usage tracker.
|
|
410
|
+
*
|
|
411
|
+
* @param {number} reviewId - The review ID to look up
|
|
412
|
+
* @param {string} sessionKey - Unique key for this WS connection
|
|
413
|
+
* @returns {Promise<{ worktreeId: string }|null>} worktreeId if found, null otherwise
|
|
414
|
+
*/
|
|
415
|
+
async startSession(reviewId, sessionKey) {
|
|
416
|
+
const entry = await this._poolRepo.findByReviewId(reviewId);
|
|
417
|
+
if (!entry) return null;
|
|
418
|
+
|
|
419
|
+
this._usageTracker.addSession(entry.id, sessionKey);
|
|
420
|
+
return { worktreeId: entry.id };
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
/**
|
|
424
|
+
* Remove a WebSocket session from the usage tracker.
|
|
425
|
+
* Synchronous -- does not touch the database.
|
|
426
|
+
*
|
|
427
|
+
* @param {string} worktreeId - Pool worktree ID
|
|
428
|
+
* @param {string} sessionKey - Unique key for the WS connection
|
|
429
|
+
*/
|
|
430
|
+
endSession(worktreeId, sessionKey) {
|
|
431
|
+
this._usageTracker.removeSession(worktreeId, sessionKey);
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
/**
|
|
435
|
+
* Register an AI analysis for a pool worktree.
|
|
436
|
+
* Looks up the worktree by review ID, then adds the analysis to the
|
|
437
|
+
* in-memory usage tracker.
|
|
438
|
+
*
|
|
439
|
+
* @param {number} reviewId - The review ID to look up
|
|
440
|
+
* @param {string} analysisId - Unique analysis identifier
|
|
441
|
+
* @returns {Promise<string|null>} worktreeId if found, null otherwise
|
|
442
|
+
*/
|
|
443
|
+
async startAnalysis(reviewId, analysisId) {
|
|
444
|
+
const entry = await this._poolRepo.findByReviewId(reviewId);
|
|
445
|
+
if (!entry) return null;
|
|
446
|
+
|
|
447
|
+
this._usageTracker.addAnalysis(entry.id, analysisId);
|
|
448
|
+
return entry.id;
|
|
449
|
+
}
|
|
450
|
+
|
|
451
|
+
/**
|
|
452
|
+
* Remove an analysis hold from the usage tracker by analysis ID.
|
|
453
|
+
* Synchronous -- does not touch the database.
|
|
454
|
+
*
|
|
455
|
+
* @param {string} analysisId - Unique analysis identifier
|
|
456
|
+
*/
|
|
457
|
+
endAnalysis(analysisId) {
|
|
458
|
+
this._usageTracker.removeAnalysisById(analysisId);
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
/**
|
|
462
|
+
* Release a worktree for deletion: clear usage state first, then
|
|
463
|
+
* mark available in the database.
|
|
464
|
+
*
|
|
465
|
+
* @param {string} worktreeId - Pool worktree ID
|
|
466
|
+
*/
|
|
467
|
+
async releaseForDeletion(worktreeId) {
|
|
468
|
+
this._usageTracker.clearWorktree(worktreeId);
|
|
469
|
+
await this._poolRepo.markAvailable(worktreeId);
|
|
470
|
+
}
|
|
471
|
+
|
|
472
|
+
/**
|
|
473
|
+
* Release a worktree after a headless analysis completes: clear in-memory
|
|
474
|
+
* usage state first, then mark available in the database. Clearing state
|
|
475
|
+
* before the DB write prevents a race where another request claims the slot
|
|
476
|
+
* (after the DB write) and then has its tracking state wiped by clearWorktree.
|
|
477
|
+
*
|
|
478
|
+
* @param {string} worktreeId - Pool worktree ID
|
|
479
|
+
*/
|
|
480
|
+
async releaseAfterHeadless(worktreeId) {
|
|
481
|
+
this._usageTracker.clearWorktree(worktreeId);
|
|
482
|
+
await this._poolRepo.markAvailable(worktreeId);
|
|
483
|
+
}
|
|
484
|
+
|
|
485
|
+
/**
|
|
486
|
+
* Permanently destroy a pool worktree: cancel active analyses, clear
|
|
487
|
+
* in-memory tracking, remove from disk, and delete from both DB tables.
|
|
488
|
+
*
|
|
489
|
+
* Unlike releaseForDeletion (which merely marks the slot available),
|
|
490
|
+
* this removes the worktree entirely. The pool will create a replacement
|
|
491
|
+
* when the next acquireForPR call triggers reserveSlot.
|
|
492
|
+
*
|
|
493
|
+
* @param {string} worktreeId - Pool worktree ID
|
|
494
|
+
* @param {Object} [options={}]
|
|
495
|
+
* @param {Function} [options.cancelAnalyses] - Async callback(worktreeId, analysisIdSet) to cancel running analyses
|
|
496
|
+
*/
|
|
497
|
+
async destroyPoolWorktree(worktreeId, { cancelAnalyses } = {}) {
|
|
498
|
+
const poolEntry = await this._poolRepo.getPoolEntry(worktreeId);
|
|
499
|
+
if (poolEntry && (poolEntry.status === 'creating' || poolEntry.status === 'switching')) {
|
|
500
|
+
throw new Error(`Cannot delete worktree ${worktreeId}: currently ${poolEntry.status}`);
|
|
501
|
+
}
|
|
502
|
+
|
|
503
|
+
const activeIds = this._usageTracker.getActiveAnalyses(worktreeId);
|
|
504
|
+
if (activeIds.size > 0 && cancelAnalyses) {
|
|
505
|
+
await cancelAnalyses(worktreeId, activeIds);
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
this._usageTracker.clearWorktree(worktreeId);
|
|
509
|
+
|
|
510
|
+
const record = await this._worktreeRepo.findById(worktreeId);
|
|
511
|
+
if (record && record.path) {
|
|
512
|
+
try {
|
|
513
|
+
const mgr = new this._GitWorktreeManager(this.db);
|
|
514
|
+
await mgr.cleanupWorktree(record.path);
|
|
515
|
+
} catch (err) {
|
|
516
|
+
logger.warn(`Could not clean up pool worktree ${worktreeId} from disk: ${err.message}`);
|
|
517
|
+
}
|
|
518
|
+
}
|
|
519
|
+
|
|
520
|
+
await this._poolRepo.delete(worktreeId);
|
|
521
|
+
await this._worktreeRepo.delete(worktreeId);
|
|
522
|
+
logger.info(`Destroyed pool worktree ${worktreeId}`);
|
|
523
|
+
}
|
|
524
|
+
|
|
525
|
+
/**
|
|
526
|
+
* Set the review ID that owns a pool worktree (persistent ownership).
|
|
527
|
+
*
|
|
528
|
+
* @param {string} worktreeId - Pool worktree ID
|
|
529
|
+
* @param {number|null} reviewId - Review ID that owns the worktree
|
|
530
|
+
*/
|
|
531
|
+
async setReviewOwner(worktreeId, reviewId) {
|
|
532
|
+
await this._poolRepo.setCurrentReviewId(worktreeId, reviewId);
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
/**
|
|
536
|
+
* Return the set of active analysis IDs for a worktree.
|
|
537
|
+
*
|
|
538
|
+
* @param {string} worktreeId - Pool worktree ID
|
|
539
|
+
* @returns {Set<string>} Active analysis IDs (may be empty)
|
|
540
|
+
*/
|
|
541
|
+
getActiveAnalyses(worktreeId) {
|
|
542
|
+
return this._usageTracker.getActiveAnalyses(worktreeId);
|
|
543
|
+
}
|
|
544
|
+
|
|
545
|
+
/**
|
|
546
|
+
* Reset stale pool entries and rehydrate preserved ones on startup.
|
|
547
|
+
*
|
|
548
|
+
* This method:
|
|
549
|
+
* 1. Calls resetStaleAndPreserve() to clean up stale DB entries and
|
|
550
|
+
* identify entries with valid review ownership
|
|
551
|
+
* 2. Wires the onIdle callback with retry logic (2 attempts, 1s delay)
|
|
552
|
+
* so that idle worktrees are automatically marked available
|
|
553
|
+
* 3. Rehydrates preserved entries by triggering a synthetic
|
|
554
|
+
* session add/remove cycle, which starts the grace-period timer.
|
|
555
|
+
* If a real user reconnects before the timer fires, their WS
|
|
556
|
+
* session will cancel it automatically.
|
|
557
|
+
*
|
|
558
|
+
* @returns {Promise<Array<{id: string, current_review_id: number}>>} Preserved entries
|
|
559
|
+
*/
|
|
560
|
+
async resetAndRehydrate() {
|
|
561
|
+
// 1. Reset stale entries and get preserved ones
|
|
562
|
+
const preserved = await this._poolRepo.resetStaleAndPreserve();
|
|
563
|
+
if (preserved.length > 0) {
|
|
564
|
+
logger.info(`Pool startup: preserved ${preserved.length} active worktree(s)`);
|
|
565
|
+
}
|
|
566
|
+
|
|
567
|
+
// 1b. Adopt existing non-pool worktrees into pool for pool-enabled repos
|
|
568
|
+
const adopted = await this._adoptExistingWorktrees();
|
|
569
|
+
for (const entry of adopted) {
|
|
570
|
+
preserved.push(entry);
|
|
571
|
+
}
|
|
572
|
+
|
|
573
|
+
// 2. Wire up idle callback with retry logic (2 attempts, 1s delay)
|
|
574
|
+
this._usageTracker.onIdle = async (worktreeId) => {
|
|
575
|
+
for (let attempt = 1; attempt <= 2; attempt++) {
|
|
576
|
+
try {
|
|
577
|
+
await this._poolRepo.markAvailable(worktreeId);
|
|
578
|
+
logger.info(`Pool worktree ${worktreeId} is now available`);
|
|
579
|
+
return;
|
|
580
|
+
} catch (err) {
|
|
581
|
+
if (attempt < 2) {
|
|
582
|
+
logger.warn(`Failed to release pool worktree ${worktreeId} (attempt ${attempt}), retrying: ${err.message}`);
|
|
583
|
+
await new Promise(r => setTimeout(r, 1000));
|
|
584
|
+
} else {
|
|
585
|
+
logger.error(`Failed to release pool worktree ${worktreeId} after ${attempt} attempts: ${err.message}`);
|
|
586
|
+
}
|
|
587
|
+
}
|
|
588
|
+
}
|
|
589
|
+
};
|
|
590
|
+
|
|
591
|
+
// 3. Rehydrate preserved entries by triggering grace-period timers
|
|
592
|
+
if (preserved.length > 0) {
|
|
593
|
+
logger.info(`Pool startup: preserved ${preserved.length} active worktree(s), starting grace periods`);
|
|
594
|
+
for (const entry of preserved) {
|
|
595
|
+
// Add then immediately remove a synthetic session to trigger the
|
|
596
|
+
// idle grace period timer. If a real user reconnects before the
|
|
597
|
+
// timer fires, their WS session will cancel it automatically.
|
|
598
|
+
this._usageTracker.addSession(entry.id, 'startup-rehydration');
|
|
599
|
+
this._usageTracker.removeSession(entry.id, 'startup-rehydration');
|
|
600
|
+
}
|
|
601
|
+
}
|
|
602
|
+
|
|
603
|
+
return preserved;
|
|
604
|
+
}
|
|
605
|
+
|
|
606
|
+
/**
|
|
607
|
+
* Adopt existing non-pool worktrees into the pool for repos that have pool_size configured.
|
|
608
|
+
* Worktrees already in worktree_pool are skipped. Adoption stops at pool capacity.
|
|
609
|
+
*
|
|
610
|
+
* Returns adopted entries that have `status = 'in_use'` so the caller can
|
|
611
|
+
* rehydrate them with synthetic sessions (same as preserved entries).
|
|
612
|
+
*
|
|
613
|
+
* @returns {Promise<Array<{id: string, current_review_id: number}>>} Adopted in_use entries
|
|
614
|
+
* @private
|
|
615
|
+
*/
|
|
616
|
+
async _adoptExistingWorktrees() {
|
|
617
|
+
const repos = this.config.repos || {};
|
|
618
|
+
const adoptedInUse = [];
|
|
619
|
+
|
|
620
|
+
for (const repoName of Object.keys(repos)) {
|
|
621
|
+
const poolSize = getRepoPoolSize(this.config, repoName);
|
|
622
|
+
if (!poolSize) continue;
|
|
623
|
+
|
|
624
|
+
// Count existing pool entries for this repo
|
|
625
|
+
const existingCount = await this._poolRepo.countForRepo(repoName);
|
|
626
|
+
if (existingCount >= poolSize) continue; // already at capacity
|
|
627
|
+
|
|
628
|
+
// Find worktrees for this repo that are NOT in the pool (includes review ID via JOIN)
|
|
629
|
+
const orphans = await this._poolRepo.findOrphanWorktrees(repoName);
|
|
630
|
+
|
|
631
|
+
let adopted = 0;
|
|
632
|
+
for (const orphan of orphans) {
|
|
633
|
+
if (existingCount + adopted >= poolSize) break; // respect capacity
|
|
634
|
+
|
|
635
|
+
// Skip orphans whose directory no longer exists on disk
|
|
636
|
+
if (!this._fs.existsSync(orphan.path)) {
|
|
637
|
+
logger.warn(`Pool startup: skipping adoption of ${orphan.id} — directory missing (${orphan.path})`);
|
|
638
|
+
continue;
|
|
639
|
+
}
|
|
640
|
+
|
|
641
|
+
if (orphan.reviewId) {
|
|
642
|
+
// Adopt as in_use with review ownership
|
|
643
|
+
await this._poolRepo.create({
|
|
644
|
+
id: orphan.id,
|
|
645
|
+
repository: orphan.repository,
|
|
646
|
+
path: orphan.path,
|
|
647
|
+
prNumber: orphan.pr_number,
|
|
648
|
+
});
|
|
649
|
+
await this._poolRepo.setCurrentReviewId(orphan.id, orphan.reviewId);
|
|
650
|
+
adoptedInUse.push({ id: orphan.id, current_review_id: orphan.reviewId });
|
|
651
|
+
logger.info(`Pool startup: adopted worktree ${orphan.id} for PR #${orphan.pr_number} (in_use, review ${orphan.reviewId})`);
|
|
652
|
+
} else {
|
|
653
|
+
// Adopt as available (no active review)
|
|
654
|
+
await this._poolRepo.create({
|
|
655
|
+
id: orphan.id,
|
|
656
|
+
repository: orphan.repository,
|
|
657
|
+
path: orphan.path,
|
|
658
|
+
});
|
|
659
|
+
logger.info(`Pool startup: adopted worktree ${orphan.id} for PR #${orphan.pr_number} (available, no review)`);
|
|
660
|
+
}
|
|
661
|
+
|
|
662
|
+
adopted++;
|
|
663
|
+
}
|
|
664
|
+
|
|
665
|
+
if (adopted > 0) {
|
|
666
|
+
logger.info(`Pool startup: adopted ${adopted} worktree(s) for ${repoName}`);
|
|
667
|
+
}
|
|
668
|
+
}
|
|
669
|
+
|
|
670
|
+
return adoptedInUse;
|
|
671
|
+
}
|
|
672
|
+
}
|
|
673
|
+
|
|
674
|
+
module.exports = { WorktreePoolLifecycle };
|