@in-the-loop-labs/pair-review 3.1.3 → 3.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/plugin/.claude-plugin/plugin.json +1 -1
- package/plugin-code-critic/.claude-plugin/plugin.json +1 -1
- package/public/css/pr.css +980 -3
- package/public/js/components/AIPanel.js +7 -4
- package/public/js/components/ChatPanel.js +34 -4
- package/public/js/components/CouncilProgressModal.js +11 -0
- package/public/js/components/NotificationDropdown.js +257 -0
- package/public/js/components/StackAnalysisDialog.js +313 -0
- package/public/js/components/StackProgressModal.js +475 -0
- package/public/js/components/StatusIndicator.js +1 -0
- package/public/js/components/SuggestionNavigator.js +2 -0
- package/public/js/modules/comment-manager.js +7 -0
- package/public/js/modules/comment-minimizer.js +151 -4
- package/public/js/modules/file-comment-manager.js +66 -2
- package/public/js/modules/suggestion-manager.js +2 -1
- package/public/js/pr.js +433 -2
- package/public/js/utils/notification-sounds.js +62 -0
- package/public/local.html +10 -0
- package/public/pr.html +12 -0
- package/public/setup.html +4 -0
- package/src/ai/claude-provider.js +1 -11
- package/src/ai/codex-provider.js +18 -16
- package/src/ai/copilot-provider.js +21 -21
- package/src/ai/gemini-provider.js +10 -0
- package/src/ai/pi-provider.js +22 -25
- package/src/ai/provider.js +26 -3
- package/src/chat/pi-bridge.js +8 -0
- package/src/chat/session-manager.js +1 -0
- package/src/git/base-branch.js +1 -51
- package/src/git/worktree-lock.js +88 -0
- package/src/git/worktree.js +64 -0
- package/src/github/stack-walker.js +196 -0
- package/src/routes/local.js +12 -8
- package/src/routes/pr.js +139 -26
- package/src/routes/sound.js +49 -0
- package/src/routes/stack-analysis.js +886 -0
- package/src/server.js +4 -0
- package/src/setup/stack-setup.js +77 -0
|
@@ -0,0 +1,886 @@
|
|
|
1
|
+
// Copyright 2026 Tim Perkins (tjwp) | SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
/**
|
|
3
|
+
* Stack Analysis Routes & Orchestrator
|
|
4
|
+
*
|
|
5
|
+
* Provides endpoints for analyzing a Graphite stack of PRs in parallel:
|
|
6
|
+
* - POST /api/pr/:owner/:repo/:number/analyses/stack — start stack analysis
|
|
7
|
+
* - GET /api/analyses/stack/:stackAnalysisId — get stack analysis status
|
|
8
|
+
* - POST /api/analyses/stack/:stackAnalysisId/cancel — cancel stack analysis
|
|
9
|
+
*
|
|
10
|
+
* The orchestrator creates per-PR worktrees and runs analyses in parallel,
|
|
11
|
+
* using the configured analysis type (single, council, or executable).
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
const express = require('express');
|
|
15
|
+
const { v4: uuidv4 } = require('uuid');
|
|
16
|
+
const { execSync } = require('child_process');
|
|
17
|
+
const logger = require('../utils/logger');
|
|
18
|
+
const { normalizeRepository } = require('../utils/paths');
|
|
19
|
+
const { mergeInstructions } = require('../utils/instructions');
|
|
20
|
+
const { GitWorktreeManager } = require('../git/worktree');
|
|
21
|
+
const { GitHubClient } = require('../github/client');
|
|
22
|
+
const { getGitHubToken } = require('../config');
|
|
23
|
+
const { setupStackPR } = require('../setup/stack-setup');
|
|
24
|
+
const Analyzer = require('../ai/analyzer');
|
|
25
|
+
const { getProviderClass, createProvider } = require('../ai/provider');
|
|
26
|
+
const { VALID_TIERS, resolveTier } = require('../ai/prompts/config');
|
|
27
|
+
const { validateCouncilConfig, normalizeCouncilConfig } = require('./councils');
|
|
28
|
+
const ws = require('../ws');
|
|
29
|
+
const {
|
|
30
|
+
query,
|
|
31
|
+
queryOne,
|
|
32
|
+
ReviewRepository,
|
|
33
|
+
RepoSettingsRepository,
|
|
34
|
+
AnalysisRunRepository,
|
|
35
|
+
PRMetadataRepository,
|
|
36
|
+
CouncilRepository
|
|
37
|
+
} = require('../database');
|
|
38
|
+
const {
|
|
39
|
+
activeAnalyses,
|
|
40
|
+
reviewToAnalysisId,
|
|
41
|
+
getModel,
|
|
42
|
+
determineCompletionInfo,
|
|
43
|
+
broadcastProgress,
|
|
44
|
+
createProgressCallback,
|
|
45
|
+
parseEnabledLevels,
|
|
46
|
+
registerProcess: registerProcessForCancellation,
|
|
47
|
+
killProcesses
|
|
48
|
+
} = require('./shared');
|
|
49
|
+
const { broadcastReviewEvent } = require('../events/review-events');
|
|
50
|
+
const analysesRouter = require('./analyses');
|
|
51
|
+
const { runExecutableAnalysis } = require('./executable-analysis');
|
|
52
|
+
|
|
53
|
+
const router = express.Router();
|
|
54
|
+
|
|
55
|
+
// In-memory tracking for active stack analyses
|
|
56
|
+
const activeStackAnalyses = new Map();
|
|
57
|
+
|
|
58
|
+
// ============================================================================
|
|
59
|
+
// Helper: wait for an individual analysis to reach a terminal state
|
|
60
|
+
// ============================================================================
|
|
61
|
+
|
|
62
|
+
/**
|
|
63
|
+
* Estimate the maximum wall-clock time for a council analysis based on its
|
|
64
|
+
* config. The real per-call timeouts live inside the analyzer; this is a
|
|
65
|
+
* generous upper bound so the stack orchestrator doesn't give up too early.
|
|
66
|
+
*
|
|
67
|
+
* @param {Object} councilConfig - Resolved council configuration
|
|
68
|
+
* @param {string} configType - 'council' (voice-centric) or 'advanced' (level-centric)
|
|
69
|
+
* @returns {number} Timeout in milliseconds
|
|
70
|
+
*/
|
|
71
|
+
function estimateCouncilTimeout(councilConfig, configType) {
|
|
72
|
+
const DEFAULT_VOICE_TIMEOUT = 600_000; // 10 min
|
|
73
|
+
const DEFAULT_CONSOL_TIMEOUT = 300_000; // 5 min
|
|
74
|
+
const DEFAULT_ORCH_TIMEOUT = 600_000; // 10 min
|
|
75
|
+
const MARGIN = 120_000; // 2 min safety margin
|
|
76
|
+
|
|
77
|
+
if (configType === 'council') {
|
|
78
|
+
// Voice-centric: all voices run in parallel, then one consolidation step
|
|
79
|
+
const maxVoiceTimeout = (councilConfig.voices || [])
|
|
80
|
+
.reduce((max, v) => Math.max(max, v.timeout || DEFAULT_VOICE_TIMEOUT), DEFAULT_VOICE_TIMEOUT);
|
|
81
|
+
const consolTimeout = councilConfig.consolidation?.timeout || DEFAULT_CONSOL_TIMEOUT;
|
|
82
|
+
return maxVoiceTimeout + consolTimeout + MARGIN;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
// Level-centric (advanced): per-level voices (parallel) + per-level consolidation,
|
|
86
|
+
// then cross-level orchestration
|
|
87
|
+
const levels = councilConfig.levels || {};
|
|
88
|
+
let levelPhaseTotal = 0;
|
|
89
|
+
for (const lvl of Object.values(levels)) {
|
|
90
|
+
const voices = lvl.voices || [];
|
|
91
|
+
const maxVoice = voices.reduce((max, v) => Math.max(max, v.timeout || DEFAULT_VOICE_TIMEOUT), DEFAULT_VOICE_TIMEOUT);
|
|
92
|
+
const consolTimeout = lvl.consolidation?.timeout || DEFAULT_CONSOL_TIMEOUT;
|
|
93
|
+
levelPhaseTotal += maxVoice + consolTimeout;
|
|
94
|
+
}
|
|
95
|
+
const orchTimeout = (councilConfig.consolidation?.timeout
|
|
96
|
+
|| councilConfig.orchestration?.timeout
|
|
97
|
+
|| DEFAULT_ORCH_TIMEOUT);
|
|
98
|
+
return levelPhaseTotal + orchTimeout + MARGIN;
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
/**
|
|
102
|
+
* Poll activeAnalyses until the given analysisId reaches a terminal state.
|
|
103
|
+
* @param {string} analysisId
|
|
104
|
+
* @param {number} [timeoutMs=3600000] - Maximum wait time (default 60 min)
|
|
105
|
+
* @returns {Promise<Object>} Terminal analysis status
|
|
106
|
+
*/
|
|
107
|
+
function waitForAnalysisCompletion(analysisId, timeoutMs = 3_600_000) {
|
|
108
|
+
return new Promise((resolve, reject) => {
|
|
109
|
+
const start = Date.now();
|
|
110
|
+
const check = () => {
|
|
111
|
+
const status = activeAnalyses.get(analysisId);
|
|
112
|
+
if (!status) {
|
|
113
|
+
// Entry was cleaned up — treat as completed
|
|
114
|
+
resolve({ status: 'completed', id: analysisId });
|
|
115
|
+
return;
|
|
116
|
+
}
|
|
117
|
+
if (['completed', 'failed', 'cancelled'].includes(status.status)) {
|
|
118
|
+
resolve(status);
|
|
119
|
+
return;
|
|
120
|
+
}
|
|
121
|
+
if (Date.now() - start > timeoutMs) {
|
|
122
|
+
reject(new Error(`Analysis ${analysisId} timed out after ${timeoutMs}ms`));
|
|
123
|
+
return;
|
|
124
|
+
}
|
|
125
|
+
setTimeout(check, 1000);
|
|
126
|
+
};
|
|
127
|
+
check();
|
|
128
|
+
});
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
// ============================================================================
|
|
132
|
+
// Helper: broadcast stack progress
|
|
133
|
+
// ============================================================================
|
|
134
|
+
|
|
135
|
+
function broadcastStackProgress(stackAnalysisId, state) {
|
|
136
|
+
const prStatuses = [];
|
|
137
|
+
let runningCount = 0;
|
|
138
|
+
let completedCount = 0;
|
|
139
|
+
for (const [prNum, prStatus] of state.prStatuses) {
|
|
140
|
+
prStatuses.push({ prNumber: prNum, ...prStatus });
|
|
141
|
+
if (prStatus.status === 'running') runningCount++;
|
|
142
|
+
if (prStatus.status === 'completed') completedCount++;
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
ws.broadcast(`stack-analysis:${stackAnalysisId}`, {
|
|
146
|
+
type: 'stack-progress',
|
|
147
|
+
stackAnalysisId,
|
|
148
|
+
status: state.status,
|
|
149
|
+
currentPRNumber: null,
|
|
150
|
+
currentPRIndex: null,
|
|
151
|
+
runningCount,
|
|
152
|
+
completedCount,
|
|
153
|
+
totalPRs: state.totalPRs,
|
|
154
|
+
prStatuses
|
|
155
|
+
});
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
// ============================================================================
|
|
159
|
+
// Core: execute stack analysis (runs in background)
|
|
160
|
+
// ============================================================================
|
|
161
|
+
|
|
162
|
+
const defaults = {
|
|
163
|
+
execSync,
|
|
164
|
+
GitWorktreeManager,
|
|
165
|
+
GitHubClient,
|
|
166
|
+
getGitHubToken,
|
|
167
|
+
setupStackPR,
|
|
168
|
+
Analyzer,
|
|
169
|
+
getProviderClass,
|
|
170
|
+
createProvider,
|
|
171
|
+
launchCouncilAnalysis: analysesRouter.launchCouncilAnalysis,
|
|
172
|
+
runExecutableAnalysis,
|
|
173
|
+
waitForAnalysisCompletion
|
|
174
|
+
};
|
|
175
|
+
|
|
176
|
+
/**
|
|
177
|
+
* Execute parallel stack analysis across multiple PRs.
|
|
178
|
+
* Creates per-PR worktrees and runs analyses concurrently.
|
|
179
|
+
*
|
|
180
|
+
* @param {Object} params
|
|
181
|
+
* @param {Object} params.db - Database handle
|
|
182
|
+
* @param {Object} params.config - Application config
|
|
183
|
+
* @param {string} params.owner - Repository owner
|
|
184
|
+
* @param {string} params.repo - Repository name
|
|
185
|
+
* @param {string} params.repository - Normalized owner/repo
|
|
186
|
+
* @param {number} params.triggerPRNumber - The PR that triggered the stack analysis
|
|
187
|
+
* @param {string} params.worktreePath - Trigger PR worktree path (used to resolve repo)
|
|
188
|
+
* @param {number[]} params.prNumbers - PR numbers to analyze (bottom-up order)
|
|
189
|
+
* @param {Object} params.analysisConfig - Analysis configuration from request
|
|
190
|
+
* @param {string} params.stackAnalysisId - Unique ID for this stack analysis
|
|
191
|
+
* @param {Object} [params._deps] - Dependency overrides for testing
|
|
192
|
+
*/
|
|
193
|
+
async function executeStackAnalysis(params) {
|
|
194
|
+
const {
|
|
195
|
+
db, config, owner, repo, repository, triggerPRNumber,
|
|
196
|
+
worktreePath: triggerWorktreePath, prNumbers, analysisConfig,
|
|
197
|
+
stackAnalysisId, _deps
|
|
198
|
+
} = params;
|
|
199
|
+
|
|
200
|
+
const deps = { ...defaults, ..._deps };
|
|
201
|
+
|
|
202
|
+
const state = activeStackAnalyses.get(stackAnalysisId);
|
|
203
|
+
if (!state) return;
|
|
204
|
+
|
|
205
|
+
try {
|
|
206
|
+
// 1. Resolve repositoryPath from trigger worktree
|
|
207
|
+
const worktreeManager = new deps.GitWorktreeManager(db);
|
|
208
|
+
let repositoryPath;
|
|
209
|
+
try {
|
|
210
|
+
const owningRepoGit = await worktreeManager.resolveOwningRepo(triggerWorktreePath);
|
|
211
|
+
if (owningRepoGit) {
|
|
212
|
+
repositoryPath = (await owningRepoGit.raw(['rev-parse', '--show-toplevel'])).trim();
|
|
213
|
+
}
|
|
214
|
+
} catch (e) {
|
|
215
|
+
logger.warn(`Failed to resolve owning repo for ${triggerWorktreePath}, falling back: ${e.message}`);
|
|
216
|
+
repositoryPath = triggerWorktreePath;
|
|
217
|
+
}
|
|
218
|
+
if (!repositoryPath) repositoryPath = triggerWorktreePath;
|
|
219
|
+
|
|
220
|
+
// 2. Bulk fetch all PR refs (runs against trigger worktree)
|
|
221
|
+
const refspecs = prNumbers.map(n => `+refs/pull/${n}/head:refs/remotes/origin/pr-${n}`);
|
|
222
|
+
try {
|
|
223
|
+
deps.execSync(`git fetch origin ${refspecs.join(' ')}`, {
|
|
224
|
+
cwd: triggerWorktreePath, encoding: 'utf8', stdio: ['pipe', 'pipe', 'pipe'],
|
|
225
|
+
timeout: 60000
|
|
226
|
+
});
|
|
227
|
+
} catch (fetchError) {
|
|
228
|
+
logger.warn(`Bulk git fetch failed, will fetch per-PR: ${fetchError.message}`);
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
// 3. Fetch all PR data from GitHub in parallel
|
|
232
|
+
const githubToken = deps.getGitHubToken(config);
|
|
233
|
+
const prDataMap = new Map();
|
|
234
|
+
if (githubToken) {
|
|
235
|
+
const githubClient = new deps.GitHubClient(githubToken);
|
|
236
|
+
const fetchResults = await Promise.allSettled(
|
|
237
|
+
prNumbers.map(async (prNum) => {
|
|
238
|
+
const prData = await githubClient.fetchPullRequest(owner, repo, prNum);
|
|
239
|
+
return { prNum, prData };
|
|
240
|
+
})
|
|
241
|
+
);
|
|
242
|
+
for (const result of fetchResults) {
|
|
243
|
+
if (result.status === 'fulfilled') {
|
|
244
|
+
prDataMap.set(result.value.prNum, result.value.prData);
|
|
245
|
+
} else {
|
|
246
|
+
logger.warn(`Failed to fetch PR data: ${result.reason?.message}`);
|
|
247
|
+
}
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
// 4. Create per-PR worktrees serially (git worktree add locks .git/worktrees)
|
|
252
|
+
const worktreePathMap = new Map();
|
|
253
|
+
for (const prNum of prNumbers) {
|
|
254
|
+
if (state.cancelled) break;
|
|
255
|
+
|
|
256
|
+
const prData = prDataMap.get(prNum);
|
|
257
|
+
if (!prData) {
|
|
258
|
+
state.prStatuses.set(prNum, { status: 'failed', error: 'Failed to fetch PR data from GitHub' });
|
|
259
|
+
broadcastStackProgress(stackAnalysisId, state);
|
|
260
|
+
continue;
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
try {
|
|
264
|
+
state.prStatuses.set(prNum, { status: 'setting_up' });
|
|
265
|
+
broadcastStackProgress(stackAnalysisId, state);
|
|
266
|
+
|
|
267
|
+
const prInfo = { owner, repo, number: prNum };
|
|
268
|
+
const perPRWorktreePath = await worktreeManager.createWorktreeForPR(
|
|
269
|
+
prInfo, prData, repositoryPath
|
|
270
|
+
);
|
|
271
|
+
worktreePathMap.set(prNum, perPRWorktreePath);
|
|
272
|
+
} catch (wtError) {
|
|
273
|
+
logger.error(`Stack analysis: failed to create worktree for PR #${prNum}: ${wtError.message}`);
|
|
274
|
+
state.prStatuses.set(prNum, { status: 'failed', error: `Worktree creation failed: ${wtError.message}` });
|
|
275
|
+
broadcastStackProgress(stackAnalysisId, state);
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
// 5. Launch analyses in parallel for all PRs with worktrees
|
|
280
|
+
const readyPRs = prNumbers.filter(prNum => worktreePathMap.has(prNum) && !state.cancelled);
|
|
281
|
+
|
|
282
|
+
await Promise.allSettled(
|
|
283
|
+
readyPRs.map(prNum => {
|
|
284
|
+
state.prStatuses.set(prNum, { status: 'running' });
|
|
285
|
+
broadcastStackProgress(stackAnalysisId, state);
|
|
286
|
+
|
|
287
|
+
// Surface analysisId as soon as the launcher creates it (before awaiting completion)
|
|
288
|
+
const onAnalysisIdReady = (analysisId) => {
|
|
289
|
+
const current = state.prStatuses.get(prNum);
|
|
290
|
+
if (current) {
|
|
291
|
+
current.analysisId = analysisId;
|
|
292
|
+
broadcastStackProgress(stackAnalysisId, state);
|
|
293
|
+
}
|
|
294
|
+
};
|
|
295
|
+
|
|
296
|
+
return analyzeStackPR(deps, db, config, {
|
|
297
|
+
owner, repo, repository, prNum,
|
|
298
|
+
worktreePath: worktreePathMap.get(prNum),
|
|
299
|
+
analysisConfig, stackAnalysisId, state,
|
|
300
|
+
githubToken, prData: prDataMap.get(prNum),
|
|
301
|
+
onAnalysisIdReady
|
|
302
|
+
}).then(result => {
|
|
303
|
+
state.prStatuses.set(prNum, {
|
|
304
|
+
status: result.status || 'failed',
|
|
305
|
+
analysisId: result.analysisId,
|
|
306
|
+
suggestionsCount: result.suggestionsCount || 0,
|
|
307
|
+
error: result.error || null
|
|
308
|
+
});
|
|
309
|
+
broadcastStackProgress(stackAnalysisId, state);
|
|
310
|
+
}).catch(error => {
|
|
311
|
+
logger.error(`Stack analysis: PR #${prNum} failed: ${error.message}`);
|
|
312
|
+
state.prStatuses.set(prNum, { status: 'failed', error: error.message });
|
|
313
|
+
broadcastStackProgress(stackAnalysisId, state);
|
|
314
|
+
});
|
|
315
|
+
})
|
|
316
|
+
);
|
|
317
|
+
|
|
318
|
+
// 6. Set final status
|
|
319
|
+
const anySucceeded = [...state.prStatuses.values()].some(s => s.status === 'completed');
|
|
320
|
+
state.status = state.cancelled ? 'cancelled' : (anySucceeded ? 'completed' : 'failed');
|
|
321
|
+
state.completedAt = new Date().toISOString();
|
|
322
|
+
|
|
323
|
+
} catch (outerError) {
|
|
324
|
+
logger.error(`Stack analysis ${stackAnalysisId} failed: ${outerError.message}`);
|
|
325
|
+
state.status = 'failed';
|
|
326
|
+
state.error = outerError.message;
|
|
327
|
+
} finally {
|
|
328
|
+
activeStackAnalyses.set(stackAnalysisId, state);
|
|
329
|
+
broadcastStackProgress(stackAnalysisId, state);
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
/**
|
|
334
|
+
* Run setup + analysis for a single PR in the stack.
|
|
335
|
+
* Called in parallel for all PRs.
|
|
336
|
+
*/
|
|
337
|
+
async function analyzeStackPR(deps, db, config, {
|
|
338
|
+
owner, repo, repository, prNum, worktreePath,
|
|
339
|
+
analysisConfig, stackAnalysisId, state, githubToken, prData,
|
|
340
|
+
onAnalysisIdReady
|
|
341
|
+
}) {
|
|
342
|
+
// 1. Setup PR (generates diff, stores metadata)
|
|
343
|
+
const worktreeManager = new deps.GitWorktreeManager(db);
|
|
344
|
+
await deps.setupStackPR({
|
|
345
|
+
db, owner, repo, prNumber: prNum,
|
|
346
|
+
githubToken, worktreePath, worktreeManager, prData
|
|
347
|
+
});
|
|
348
|
+
|
|
349
|
+
// 2. Fetch prMetadata from DB
|
|
350
|
+
const prMetadataRepo = new PRMetadataRepository(db);
|
|
351
|
+
const prMetadata = await prMetadataRepo.getByPR(prNum, repository);
|
|
352
|
+
if (!prMetadata) {
|
|
353
|
+
throw new Error(`PR metadata not found for PR #${prNum} after setup`);
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
const reviewRepo = new ReviewRepository(db);
|
|
357
|
+
const { review } = await reviewRepo.getOrCreate({ prNumber: prNum, repository });
|
|
358
|
+
const reviewId = review.id;
|
|
359
|
+
|
|
360
|
+
// 3. Resolve analysis config
|
|
361
|
+
const repoSettingsRepo = new RepoSettingsRepository(db);
|
|
362
|
+
const repoSettings = await repoSettingsRepo.getRepoSettings(repository);
|
|
363
|
+
const repoInstructions = repoSettings?.default_instructions || null;
|
|
364
|
+
const globalInstructions = config.globalInstructions || null;
|
|
365
|
+
const requestInstructions = analysisConfig.customInstructions || null;
|
|
366
|
+
|
|
367
|
+
const {
|
|
368
|
+
configType = 'single', provider: reqProvider, model: reqModel,
|
|
369
|
+
tier: reqTier, enabledLevels: reqEnabledLevels,
|
|
370
|
+
isCouncil, councilId, councilConfig: rawCouncilConfig
|
|
371
|
+
} = analysisConfig;
|
|
372
|
+
|
|
373
|
+
// 4. Dispatch to launcher
|
|
374
|
+
let analysisResult;
|
|
375
|
+
|
|
376
|
+
if (configType === 'council' || configType === 'advanced' || isCouncil) {
|
|
377
|
+
analysisResult = await launchStackCouncilAnalysis(deps, db, config, {
|
|
378
|
+
reviewId, worktreePath, prMetadata, prNum, owner, repo, repository,
|
|
379
|
+
globalInstructions, repoInstructions, requestInstructions,
|
|
380
|
+
councilId, rawCouncilConfig, configType, onAnalysisIdReady
|
|
381
|
+
});
|
|
382
|
+
} else {
|
|
383
|
+
let selectedProvider = reqProvider || repoSettings?.default_provider || config.default_provider || config.provider || 'claude';
|
|
384
|
+
let selectedModel = reqModel || repoSettings?.default_model || config.default_model || config.model || 'opus';
|
|
385
|
+
|
|
386
|
+
const ProviderClass = deps.getProviderClass(selectedProvider);
|
|
387
|
+
|
|
388
|
+
if (ProviderClass?.isExecutable) {
|
|
389
|
+
analysisResult = await launchStackExecutableAnalysis(deps, db, config, {
|
|
390
|
+
reviewId, worktreePath, prMetadata, prNum, owner, repo, repository,
|
|
391
|
+
selectedProvider, selectedModel,
|
|
392
|
+
repoInstructions, requestInstructions, onAnalysisIdReady
|
|
393
|
+
});
|
|
394
|
+
} else {
|
|
395
|
+
analysisResult = await launchStackSingleAnalysis(deps, db, config, {
|
|
396
|
+
reviewId, worktreePath, prMetadata, prNum, owner, repo, repository,
|
|
397
|
+
selectedProvider, selectedModel,
|
|
398
|
+
globalInstructions, repoInstructions, requestInstructions,
|
|
399
|
+
reqTier, reqEnabledLevels, onAnalysisIdReady
|
|
400
|
+
});
|
|
401
|
+
}
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
return analysisResult;
|
|
405
|
+
}
|
|
406
|
+
|
|
407
|
+
// ============================================================================
|
|
408
|
+
// Analysis launchers (per type)
|
|
409
|
+
// ============================================================================
|
|
410
|
+
|
|
411
|
+
/**
|
|
412
|
+
* Launch single-model analysis for a stack PR and await completion.
|
|
413
|
+
*/
|
|
414
|
+
async function launchStackSingleAnalysis(deps, db, config, {
|
|
415
|
+
reviewId, worktreePath, prMetadata, prNum, owner, repo, repository,
|
|
416
|
+
selectedProvider, selectedModel,
|
|
417
|
+
globalInstructions, repoInstructions, requestInstructions,
|
|
418
|
+
reqTier, reqEnabledLevels, onAnalysisIdReady
|
|
419
|
+
}) {
|
|
420
|
+
const runId = uuidv4();
|
|
421
|
+
const analysisId = runId;
|
|
422
|
+
if (onAnalysisIdReady) onAnalysisIdReady(analysisId);
|
|
423
|
+
const tier = reqTier ? resolveTier(reqTier) : 'balanced';
|
|
424
|
+
const levelsConfig = parseEnabledLevels(reqEnabledLevels);
|
|
425
|
+
|
|
426
|
+
const analysisRunRepo = new AnalysisRunRepository(db);
|
|
427
|
+
await analysisRunRepo.create({
|
|
428
|
+
id: runId,
|
|
429
|
+
reviewId,
|
|
430
|
+
provider: selectedProvider,
|
|
431
|
+
model: selectedModel,
|
|
432
|
+
tier,
|
|
433
|
+
globalInstructions,
|
|
434
|
+
repoInstructions,
|
|
435
|
+
requestInstructions,
|
|
436
|
+
headSha: prMetadata.head_sha || null,
|
|
437
|
+
configType: 'single',
|
|
438
|
+
levelsConfig
|
|
439
|
+
});
|
|
440
|
+
|
|
441
|
+
const initialStatus = {
|
|
442
|
+
id: analysisId,
|
|
443
|
+
runId,
|
|
444
|
+
reviewId,
|
|
445
|
+
prNumber: prNum,
|
|
446
|
+
repository,
|
|
447
|
+
reviewType: 'pr',
|
|
448
|
+
status: 'running',
|
|
449
|
+
startedAt: new Date().toISOString(),
|
|
450
|
+
progress: 'Starting analysis...',
|
|
451
|
+
levels: {
|
|
452
|
+
1: levelsConfig[1] ? { status: 'running', progress: 'Starting...' } : { status: 'skipped', progress: 'Skipped' },
|
|
453
|
+
2: levelsConfig[2] ? { status: 'running', progress: 'Starting...' } : { status: 'skipped', progress: 'Skipped' },
|
|
454
|
+
3: levelsConfig[3] ? { status: 'running', progress: 'Starting...' } : { status: 'skipped', progress: 'Skipped' },
|
|
455
|
+
4: { status: 'pending', progress: 'Pending' }
|
|
456
|
+
},
|
|
457
|
+
filesAnalyzed: 0,
|
|
458
|
+
filesRemaining: 0
|
|
459
|
+
};
|
|
460
|
+
activeAnalyses.set(analysisId, initialStatus);
|
|
461
|
+
reviewToAnalysisId.set(reviewId, analysisId);
|
|
462
|
+
broadcastProgress(analysisId, initialStatus);
|
|
463
|
+
broadcastReviewEvent(reviewId, { type: 'review:analysis_started', analysisId });
|
|
464
|
+
|
|
465
|
+
const analyzer = new deps.Analyzer(db, selectedModel, selectedProvider);
|
|
466
|
+
const progressCallback = createProgressCallback(analysisId);
|
|
467
|
+
|
|
468
|
+
logger.info(`Stack analysis: starting single-model analysis for PR #${prNum} (${selectedProvider}/${selectedModel})`);
|
|
469
|
+
|
|
470
|
+
try {
|
|
471
|
+
const result = await analyzer.analyzeLevel1(
|
|
472
|
+
reviewId, worktreePath, prMetadata, progressCallback,
|
|
473
|
+
{ globalInstructions, repoInstructions, requestInstructions },
|
|
474
|
+
null,
|
|
475
|
+
{ analysisId, runId, skipRunCreation: true, tier, enabledLevels: levelsConfig }
|
|
476
|
+
);
|
|
477
|
+
|
|
478
|
+
const completionInfo = determineCompletionInfo(result);
|
|
479
|
+
|
|
480
|
+
const currentStatus = activeAnalyses.get(analysisId);
|
|
481
|
+
if (currentStatus && currentStatus.status !== 'cancelled') {
|
|
482
|
+
for (let lvl = 1; lvl <= completionInfo.completedLevel; lvl++) {
|
|
483
|
+
currentStatus.levels[lvl] = { status: 'completed', progress: `Level ${lvl} complete` };
|
|
484
|
+
}
|
|
485
|
+
currentStatus.levels[4] = { status: 'completed', progress: 'Results finalized' };
|
|
486
|
+
const completedStatus = {
|
|
487
|
+
...currentStatus,
|
|
488
|
+
status: 'completed',
|
|
489
|
+
completedAt: new Date().toISOString(),
|
|
490
|
+
progress: completionInfo.progressMessage,
|
|
491
|
+
suggestionsCount: completionInfo.totalSuggestions
|
|
492
|
+
};
|
|
493
|
+
activeAnalyses.set(analysisId, completedStatus);
|
|
494
|
+
broadcastProgress(analysisId, completedStatus);
|
|
495
|
+
broadcastReviewEvent(reviewId, { type: 'review:analysis_completed' });
|
|
496
|
+
}
|
|
497
|
+
|
|
498
|
+
// Update pr_metadata with last_ai_run_id
|
|
499
|
+
try {
|
|
500
|
+
const prMetadataRepo = new PRMetadataRepository(db);
|
|
501
|
+
await prMetadataRepo.updateLastAiRunId(prMetadata.id, runId);
|
|
502
|
+
} catch (e) {
|
|
503
|
+
logger.warn(`Failed to update pr_metadata: ${e.message}`);
|
|
504
|
+
}
|
|
505
|
+
|
|
506
|
+
return {
|
|
507
|
+
analysisId, runId, status: 'completed',
|
|
508
|
+
suggestionsCount: completionInfo.totalSuggestions
|
|
509
|
+
};
|
|
510
|
+
} catch (error) {
|
|
511
|
+
if (error.isCancellation) {
|
|
512
|
+
return { analysisId, runId, status: 'cancelled', suggestionsCount: 0 };
|
|
513
|
+
}
|
|
514
|
+
|
|
515
|
+
logger.error(`Stack single analysis failed for PR #${prNum}: ${error.message}`);
|
|
516
|
+
const currentStatus = activeAnalyses.get(analysisId);
|
|
517
|
+
if (currentStatus) {
|
|
518
|
+
const failedStatus = {
|
|
519
|
+
...currentStatus,
|
|
520
|
+
status: 'failed',
|
|
521
|
+
completedAt: new Date().toISOString(),
|
|
522
|
+
error: error.message,
|
|
523
|
+
progress: 'Analysis failed'
|
|
524
|
+
};
|
|
525
|
+
activeAnalyses.set(analysisId, failedStatus);
|
|
526
|
+
broadcastProgress(analysisId, failedStatus);
|
|
527
|
+
}
|
|
528
|
+
|
|
529
|
+
return { analysisId, runId, status: 'failed', error: error.message, suggestionsCount: 0 };
|
|
530
|
+
} finally {
|
|
531
|
+
reviewToAnalysisId.delete(reviewId);
|
|
532
|
+
}
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
/**
|
|
536
|
+
* Launch council analysis for a stack PR and await completion.
|
|
537
|
+
*/
|
|
538
|
+
async function launchStackCouncilAnalysis(deps, db, config, {
|
|
539
|
+
reviewId, worktreePath, prMetadata, prNum, owner, repo, repository,
|
|
540
|
+
globalInstructions, repoInstructions, requestInstructions,
|
|
541
|
+
councilId, rawCouncilConfig, configType, onAnalysisIdReady
|
|
542
|
+
}) {
|
|
543
|
+
let councilConfig;
|
|
544
|
+
let resolvedConfigType = configType;
|
|
545
|
+
|
|
546
|
+
if (councilId) {
|
|
547
|
+
const councilRepo = new CouncilRepository(db);
|
|
548
|
+
const council = await councilRepo.getById(councilId);
|
|
549
|
+
if (!council) {
|
|
550
|
+
throw new Error(`Council ${councilId} not found`);
|
|
551
|
+
}
|
|
552
|
+
councilConfig = council.config;
|
|
553
|
+
resolvedConfigType = configType || council.type || 'advanced';
|
|
554
|
+
} else if (rawCouncilConfig) {
|
|
555
|
+
councilConfig = rawCouncilConfig;
|
|
556
|
+
} else {
|
|
557
|
+
throw new Error('Council analysis requires councilId or councilConfig');
|
|
558
|
+
}
|
|
559
|
+
|
|
560
|
+
councilConfig = normalizeCouncilConfig(councilConfig, resolvedConfigType);
|
|
561
|
+
const configError = validateCouncilConfig(councilConfig, resolvedConfigType);
|
|
562
|
+
if (configError) {
|
|
563
|
+
throw new Error(`Invalid council config: ${configError}`);
|
|
564
|
+
}
|
|
565
|
+
|
|
566
|
+
const reviewRepo = new ReviewRepository(db);
|
|
567
|
+
const { review } = await reviewRepo.getOrCreate({ prNumber: prNum, repository });
|
|
568
|
+
|
|
569
|
+
logger.info(`Stack analysis: starting council analysis for PR #${prNum}`);
|
|
570
|
+
|
|
571
|
+
const { analysisId, runId } = await deps.launchCouncilAnalysis(
|
|
572
|
+
db,
|
|
573
|
+
{
|
|
574
|
+
reviewId: review.id,
|
|
575
|
+
worktreePath,
|
|
576
|
+
prMetadata,
|
|
577
|
+
changedFiles: null,
|
|
578
|
+
repository,
|
|
579
|
+
headSha: prMetadata.head_sha,
|
|
580
|
+
logLabel: `Stack PR #${prNum}`,
|
|
581
|
+
initialStatusExtra: { prNumber: prNum, reviewType: 'pr' },
|
|
582
|
+
config,
|
|
583
|
+
hookContext: {
|
|
584
|
+
mode: 'pr',
|
|
585
|
+
prContext: {
|
|
586
|
+
number: prNum, owner, repo,
|
|
587
|
+
author: prMetadata.author, baseBranch: prMetadata.base_branch,
|
|
588
|
+
headBranch: prMetadata.head_branch,
|
|
589
|
+
baseSha: prMetadata.base_sha || null, headSha: prMetadata.head_sha || null,
|
|
590
|
+
},
|
|
591
|
+
},
|
|
592
|
+
onSuccess: async (result) => {
|
|
593
|
+
if (result.summary) {
|
|
594
|
+
await reviewRepo.upsertSummary(prNum, repository, result.summary);
|
|
595
|
+
}
|
|
596
|
+
}
|
|
597
|
+
},
|
|
598
|
+
councilConfig,
|
|
599
|
+
councilId,
|
|
600
|
+
{ globalInstructions, repoInstructions, requestInstructions },
|
|
601
|
+
resolvedConfigType
|
|
602
|
+
);
|
|
603
|
+
|
|
604
|
+
if (onAnalysisIdReady) onAnalysisIdReady(analysisId);
|
|
605
|
+
|
|
606
|
+
// Wait for completion — use a timeout derived from the council config
|
|
607
|
+
const timeoutMs = estimateCouncilTimeout(councilConfig, resolvedConfigType);
|
|
608
|
+
logger.info(`Stack analysis: council timeout for PR #${prNum} estimated at ${Math.round(timeoutMs / 60000)}min`);
|
|
609
|
+
const finalStatus = await deps.waitForAnalysisCompletion(analysisId, timeoutMs);
|
|
610
|
+
|
|
611
|
+
return {
|
|
612
|
+
analysisId,
|
|
613
|
+
runId,
|
|
614
|
+
status: finalStatus.status,
|
|
615
|
+
suggestionsCount: finalStatus.suggestionsCount || 0,
|
|
616
|
+
error: finalStatus.error || null
|
|
617
|
+
};
|
|
618
|
+
}
|
|
619
|
+
|
|
620
|
+
/**
|
|
621
|
+
* Launch executable provider analysis for a stack PR and await completion.
|
|
622
|
+
*/
|
|
623
|
+
async function launchStackExecutableAnalysis(deps, db, config, {
|
|
624
|
+
reviewId, worktreePath, prMetadata, prNum, owner, repo, repository,
|
|
625
|
+
selectedProvider, selectedModel,
|
|
626
|
+
repoInstructions, requestInstructions, onAnalysisIdReady
|
|
627
|
+
}) {
|
|
628
|
+
const runId = uuidv4();
|
|
629
|
+
const analysisId = runId;
|
|
630
|
+
if (onAnalysisIdReady) onAnalysisIdReady(analysisId);
|
|
631
|
+
|
|
632
|
+
const reviewRepo = new ReviewRepository(db);
|
|
633
|
+
const { review } = await reviewRepo.getOrCreate({ prNumber: prNum, repository });
|
|
634
|
+
|
|
635
|
+
logger.info(`Stack analysis: starting executable analysis for PR #${prNum} (${selectedProvider})`);
|
|
636
|
+
|
|
637
|
+
// Create a minimal req/res adapter for runExecutableAnalysis
|
|
638
|
+
const fakeReq = {
|
|
639
|
+
app: {
|
|
640
|
+
get: (key) => {
|
|
641
|
+
if (key === 'db') return db;
|
|
642
|
+
if (key === 'config') return config;
|
|
643
|
+
if (key === 'githubToken') return deps.getGitHubToken(config);
|
|
644
|
+
return null;
|
|
645
|
+
}
|
|
646
|
+
}
|
|
647
|
+
};
|
|
648
|
+
// Capture the early response but don't actually send HTTP
|
|
649
|
+
let responded = false;
|
|
650
|
+
const fakeRes = {
|
|
651
|
+
json: () => { responded = true; },
|
|
652
|
+
status: () => ({ json: () => { responded = true; } })
|
|
653
|
+
};
|
|
654
|
+
|
|
655
|
+
const prContext = {
|
|
656
|
+
number: prNum, owner, repo,
|
|
657
|
+
author: prMetadata.author, baseBranch: prMetadata.base_branch,
|
|
658
|
+
headBranch: prMetadata.head_branch,
|
|
659
|
+
baseSha: prMetadata.base_sha || null, headSha: prMetadata.head_sha || null,
|
|
660
|
+
};
|
|
661
|
+
|
|
662
|
+
await deps.runExecutableAnalysis(fakeReq, fakeRes, {
|
|
663
|
+
reviewId: review.id,
|
|
664
|
+
review,
|
|
665
|
+
selectedProvider,
|
|
666
|
+
selectedModel,
|
|
667
|
+
repoInstructions,
|
|
668
|
+
requestInstructions,
|
|
669
|
+
runId,
|
|
670
|
+
analysisId,
|
|
671
|
+
repository,
|
|
672
|
+
reviewType: 'pr',
|
|
673
|
+
headSha: prMetadata.head_sha,
|
|
674
|
+
extraInitialStatus: { prNumber: prNum }
|
|
675
|
+
}, {
|
|
676
|
+
activeAnalyses,
|
|
677
|
+
reviewToAnalysisId,
|
|
678
|
+
broadcastProgress,
|
|
679
|
+
broadcastReviewEvent,
|
|
680
|
+
registerProcessForCancellation
|
|
681
|
+
}, {
|
|
682
|
+
logLabel: `Stack PR #${prNum}`,
|
|
683
|
+
buildContext: (_r, { selectedModel: model, requestInstructions: customInstructions }) => ({
|
|
684
|
+
title: prMetadata.title || `PR #${prNum}`,
|
|
685
|
+
description: prMetadata.description || '',
|
|
686
|
+
cwd: worktreePath,
|
|
687
|
+
model,
|
|
688
|
+
baseSha: prMetadata.base_sha || null,
|
|
689
|
+
headSha: prMetadata.head_sha || null,
|
|
690
|
+
baseBranch: prMetadata.base_branch || null,
|
|
691
|
+
headBranch: prMetadata.head_branch || null,
|
|
692
|
+
customInstructions: customInstructions || null
|
|
693
|
+
}),
|
|
694
|
+
buildHookPayload: () => ({ mode: 'pr', prContext }),
|
|
695
|
+
onSuccess: async (_db, _runId, { summary }) => {
|
|
696
|
+
const prMetadataRepo = new PRMetadataRepository(db);
|
|
697
|
+
try {
|
|
698
|
+
await prMetadataRepo.updateLastAiRunId(prMetadata.id, _runId);
|
|
699
|
+
} catch (e) {
|
|
700
|
+
logger.warn(`Failed to update pr_metadata: ${e.message}`);
|
|
701
|
+
}
|
|
702
|
+
if (summary) {
|
|
703
|
+
try {
|
|
704
|
+
await reviewRepo.upsertSummary(prNum, repository, summary);
|
|
705
|
+
} catch (e) {
|
|
706
|
+
logger.warn(`Failed to save summary: ${e.message}`);
|
|
707
|
+
}
|
|
708
|
+
}
|
|
709
|
+
}
|
|
710
|
+
});
|
|
711
|
+
|
|
712
|
+
// Wait for completion
|
|
713
|
+
const finalStatus = await deps.waitForAnalysisCompletion(analysisId);
|
|
714
|
+
|
|
715
|
+
return {
|
|
716
|
+
analysisId,
|
|
717
|
+
runId,
|
|
718
|
+
status: finalStatus.status,
|
|
719
|
+
suggestionsCount: finalStatus.suggestionsCount || 0,
|
|
720
|
+
error: finalStatus.error || null
|
|
721
|
+
};
|
|
722
|
+
}
|
|
723
|
+
|
|
724
|
+
// ============================================================================
|
|
725
|
+
// Endpoints
|
|
726
|
+
// ============================================================================
|
|
727
|
+
|
|
728
|
+
/**
|
|
729
|
+
* Start a stack analysis across multiple PRs.
|
|
730
|
+
*/
|
|
731
|
+
router.post('/api/pr/:owner/:repo/:number/analyses/stack', async (req, res) => {
|
|
732
|
+
try {
|
|
733
|
+
const { owner, repo, number } = req.params;
|
|
734
|
+
const prNumber = parseInt(number);
|
|
735
|
+
|
|
736
|
+
if (isNaN(prNumber) || prNumber <= 0) {
|
|
737
|
+
return res.status(400).json({ error: 'Invalid pull request number' });
|
|
738
|
+
}
|
|
739
|
+
|
|
740
|
+
const { prNumbers, analysisConfig } = req.body || {};
|
|
741
|
+
|
|
742
|
+
if (!Array.isArray(prNumbers) || prNumbers.length === 0) {
|
|
743
|
+
return res.status(400).json({ error: 'prNumbers must be a non-empty array' });
|
|
744
|
+
}
|
|
745
|
+
|
|
746
|
+
if (!analysisConfig) {
|
|
747
|
+
return res.status(400).json({ error: 'analysisConfig is required' });
|
|
748
|
+
}
|
|
749
|
+
|
|
750
|
+
// Validate all PR numbers
|
|
751
|
+
for (const n of prNumbers) {
|
|
752
|
+
if (!Number.isInteger(n) || n <= 0) {
|
|
753
|
+
return res.status(400).json({ error: `Invalid PR number: ${n}` });
|
|
754
|
+
}
|
|
755
|
+
}
|
|
756
|
+
|
|
757
|
+
const repository = normalizeRepository(owner, repo);
|
|
758
|
+
const db = req.app.get('db');
|
|
759
|
+
const config = req.app.get('config') || {};
|
|
760
|
+
|
|
761
|
+
// Find worktree path from the triggering PR
|
|
762
|
+
const worktreeManager = new GitWorktreeManager(db);
|
|
763
|
+
const worktreePath = await worktreeManager.getWorktreePath({ owner, repo, number: prNumber });
|
|
764
|
+
|
|
765
|
+
if (!worktreePath) {
|
|
766
|
+
return res.status(404).json({ error: 'Worktree not found for this PR. Please load the PR first.' });
|
|
767
|
+
}
|
|
768
|
+
|
|
769
|
+
const stackAnalysisId = uuidv4();
|
|
770
|
+
|
|
771
|
+
// Initialize tracking state
|
|
772
|
+
const prStatuses = new Map();
|
|
773
|
+
for (const n of prNumbers) {
|
|
774
|
+
prStatuses.set(n, { status: 'pending' });
|
|
775
|
+
}
|
|
776
|
+
|
|
777
|
+
const state = {
|
|
778
|
+
id: stackAnalysisId,
|
|
779
|
+
status: 'running',
|
|
780
|
+
triggerWorktreePath: worktreePath,
|
|
781
|
+
prStatuses,
|
|
782
|
+
totalPRs: prNumbers.length,
|
|
783
|
+
startedAt: new Date().toISOString(),
|
|
784
|
+
cancelled: false,
|
|
785
|
+
error: null,
|
|
786
|
+
completedAt: null
|
|
787
|
+
};
|
|
788
|
+
activeStackAnalyses.set(stackAnalysisId, state);
|
|
789
|
+
|
|
790
|
+
// Start execution in background (don't await)
|
|
791
|
+
executeStackAnalysis({
|
|
792
|
+
db, config, owner, repo, repository,
|
|
793
|
+
triggerPRNumber: prNumber,
|
|
794
|
+
worktreePath, prNumbers, analysisConfig, stackAnalysisId
|
|
795
|
+
}).catch(error => {
|
|
796
|
+
logger.error(`Stack analysis ${stackAnalysisId} uncaught error: ${error.message}`);
|
|
797
|
+
});
|
|
798
|
+
|
|
799
|
+
// Respond immediately
|
|
800
|
+
res.json({
|
|
801
|
+
stackAnalysisId,
|
|
802
|
+
status: 'started',
|
|
803
|
+
prAnalyses: prNumbers.map(n => ({ prNumber: n, status: 'pending' }))
|
|
804
|
+
});
|
|
805
|
+
|
|
806
|
+
} catch (error) {
|
|
807
|
+
logger.error('Error starting stack analysis:', error);
|
|
808
|
+
res.status(500).json({ error: 'Failed to start stack analysis' });
|
|
809
|
+
}
|
|
810
|
+
});
|
|
811
|
+
|
|
812
|
+
/**
|
|
813
|
+
* Get current state of a stack analysis.
|
|
814
|
+
*/
|
|
815
|
+
router.get('/api/analyses/stack/:stackAnalysisId', (req, res) => {
|
|
816
|
+
const { stackAnalysisId } = req.params;
|
|
817
|
+
const state = activeStackAnalyses.get(stackAnalysisId);
|
|
818
|
+
|
|
819
|
+
if (!state) {
|
|
820
|
+
return res.status(404).json({ error: 'Stack analysis not found' });
|
|
821
|
+
}
|
|
822
|
+
|
|
823
|
+
const prStatuses = [];
|
|
824
|
+
for (const [prNum, prStatus] of state.prStatuses) {
|
|
825
|
+
prStatuses.push({ prNumber: prNum, ...prStatus });
|
|
826
|
+
}
|
|
827
|
+
|
|
828
|
+
res.json({
|
|
829
|
+
id: state.id,
|
|
830
|
+
status: state.status,
|
|
831
|
+
currentPRNumber: null,
|
|
832
|
+
currentPRIndex: null,
|
|
833
|
+
totalPRs: state.totalPRs,
|
|
834
|
+
startedAt: state.startedAt,
|
|
835
|
+
completedAt: state.completedAt,
|
|
836
|
+
error: state.error,
|
|
837
|
+
prStatuses
|
|
838
|
+
});
|
|
839
|
+
});
|
|
840
|
+
|
|
841
|
+
/**
|
|
842
|
+
* Cancel an active stack analysis.
|
|
843
|
+
*/
|
|
844
|
+
router.post('/api/analyses/stack/:stackAnalysisId/cancel', (req, res) => {
|
|
845
|
+
const { stackAnalysisId } = req.params;
|
|
846
|
+
const state = activeStackAnalyses.get(stackAnalysisId);
|
|
847
|
+
|
|
848
|
+
if (!state) {
|
|
849
|
+
return res.status(404).json({ error: 'Stack analysis not found' });
|
|
850
|
+
}
|
|
851
|
+
|
|
852
|
+
if (['completed', 'failed', 'cancelled'].includes(state.status)) {
|
|
853
|
+
return res.json({
|
|
854
|
+
success: true,
|
|
855
|
+
message: `Stack analysis already ${state.status}`,
|
|
856
|
+
status: state.status
|
|
857
|
+
});
|
|
858
|
+
}
|
|
859
|
+
|
|
860
|
+
logger.info(`Cancelling stack analysis ${stackAnalysisId}`);
|
|
861
|
+
|
|
862
|
+
// Set cancelled flag — the orchestrator checks this
|
|
863
|
+
state.cancelled = true;
|
|
864
|
+
|
|
865
|
+
// Cancel all currently running analyses
|
|
866
|
+
for (const [prNum, prStatus] of state.prStatuses) {
|
|
867
|
+
if (prStatus.status === 'running' && prStatus.analysisId) {
|
|
868
|
+
killProcesses(prStatus.analysisId);
|
|
869
|
+
}
|
|
870
|
+
}
|
|
871
|
+
|
|
872
|
+
activeStackAnalyses.set(stackAnalysisId, state);
|
|
873
|
+
|
|
874
|
+
res.json({
|
|
875
|
+
success: true,
|
|
876
|
+
message: 'Stack analysis cancellation requested',
|
|
877
|
+
status: 'cancelling'
|
|
878
|
+
});
|
|
879
|
+
});
|
|
880
|
+
|
|
881
|
+
// Export for testing and server mounting
|
|
882
|
+
module.exports = router;
|
|
883
|
+
module.exports.activeStackAnalyses = activeStackAnalyses;
|
|
884
|
+
module.exports.executeStackAnalysis = executeStackAnalysis;
|
|
885
|
+
module.exports.waitForAnalysisCompletion = waitForAnalysisCompletion;
|
|
886
|
+
module.exports.estimateCouncilTimeout = estimateCouncilTimeout;
|