newpr 0.6.5 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/package.json +1 -1
  2. package/src/history/store.ts +25 -0
  3. package/src/stack/balance.ts +128 -0
  4. package/src/stack/coupling.test.ts +158 -0
  5. package/src/stack/coupling.ts +135 -0
  6. package/src/stack/delta.test.ts +223 -0
  7. package/src/stack/delta.ts +264 -0
  8. package/src/stack/execute.test.ts +176 -0
  9. package/src/stack/execute.ts +194 -0
  10. package/src/stack/feasibility.test.ts +185 -0
  11. package/src/stack/feasibility.ts +286 -0
  12. package/src/stack/integration.test.ts +266 -0
  13. package/src/stack/merge-groups.test.ts +97 -0
  14. package/src/stack/merge-groups.ts +87 -0
  15. package/src/stack/partition.test.ts +233 -0
  16. package/src/stack/partition.ts +273 -0
  17. package/src/stack/plan.test.ts +154 -0
  18. package/src/stack/plan.ts +139 -0
  19. package/src/stack/pr-title.ts +64 -0
  20. package/src/stack/publish.ts +96 -0
  21. package/src/stack/split.ts +173 -0
  22. package/src/stack/types.ts +202 -0
  23. package/src/stack/verify.test.ts +137 -0
  24. package/src/stack/verify.ts +201 -0
  25. package/src/web/client/components/FeasibilityAlert.tsx +64 -0
  26. package/src/web/client/components/InputScreen.tsx +100 -89
  27. package/src/web/client/components/ResultsScreen.tsx +10 -2
  28. package/src/web/client/components/StackGroupCard.tsx +171 -0
  29. package/src/web/client/components/StackWarnings.tsx +135 -0
  30. package/src/web/client/hooks/useStack.ts +301 -0
  31. package/src/web/client/panels/StackPanel.tsx +289 -0
  32. package/src/web/server/routes.ts +114 -0
  33. package/src/web/server/stack-manager.ts +580 -0
  34. package/src/web/server.ts +15 -0
  35. package/src/web/styles/built.css +1 -1
@@ -0,0 +1,580 @@
1
+ import type { NewprConfig } from "../../types/config.ts";
2
+ import type { FileGroup } from "../../types/output.ts";
3
+ import type { StackWarning, FeasibilityResult, StackExecResult } from "../../stack/types.ts";
4
+ import type { StackPlan } from "../../stack/types.ts";
5
+ import { loadSession } from "../../history/store.ts";
6
+ import { saveStackSidecar, loadStackSidecar } from "../../history/store.ts";
7
+ import { parsePrInput } from "../../github/parse-pr.ts";
8
+ import { fetchPrData } from "../../github/fetch-pr.ts";
9
+ import { ensureRepo } from "../../workspace/repo-cache.ts";
10
+ import { extractDeltas, computeGroupStats } from "../../stack/delta.ts";
11
+ import { partitionGroups } from "../../stack/partition.ts";
12
+ import { applyCouplingRules } from "../../stack/coupling.ts";
13
+ import { splitOversizedGroups } from "../../stack/split.ts";
14
+ import { rebalanceGroups } from "../../stack/balance.ts";
15
+ import { mergeGroups } from "../../stack/merge-groups.ts";
16
+ import { checkFeasibility } from "../../stack/feasibility.ts";
17
+ import { createStackPlan } from "../../stack/plan.ts";
18
+ import { executeStack } from "../../stack/execute.ts";
19
+ import { verifyStack } from "../../stack/verify.ts";
20
+ import { generatePrTitles } from "../../stack/pr-title.ts";
21
+ import { createLlmClient } from "../../llm/client.ts";
22
+
23
+ // ---------------------------------------------------------------------------
24
+ // Types
25
+ // ---------------------------------------------------------------------------
26
+
27
+ export type StackStatus = "running" | "done" | "error" | "canceled";
28
+ export type StackPhase = "partitioning" | "planning" | "executing" | "done";
29
+
30
+ export interface StackEvent {
31
+ id: number;
32
+ timestamp: number;
33
+ phase: StackPhase;
34
+ message: string;
35
+ }
36
+
37
+ export interface StackContext {
38
+ repo_path: string;
39
+ base_sha: string;
40
+ head_sha: string;
41
+ base_branch: string;
42
+ head_branch: string;
43
+ pr_number: number;
44
+ owner: string;
45
+ repo: string;
46
+ }
47
+
48
+ export interface StackPartitionData {
49
+ ownership: Record<string, string>;
50
+ reattributed: Array<{ path: string; from_groups: string[]; to_group: string; reason: string }>;
51
+ warnings: string[];
52
+ structured_warnings: StackWarning[];
53
+ forced_merges: Array<{ path: string; from_group: string; to_group: string }>;
54
+ groups: FileGroup[];
55
+ }
56
+
57
+ export interface StackPlanData {
58
+ base_sha: string;
59
+ head_sha: string;
60
+ groups: StackPlan["groups"];
61
+ expected_trees: Record<string, string>;
62
+ }
63
+
64
+ export interface StackVerifyData {
65
+ verified: boolean;
66
+ errors: string[];
67
+ warnings: string[];
68
+ structured_warnings: StackWarning[];
69
+ }
70
+
71
+ export interface StackStateSnapshot {
72
+ status: StackStatus;
73
+ phase: StackPhase | null;
74
+ error: string | null;
75
+ maxGroups: number | null;
76
+ context: StackContext | null;
77
+ partition: StackPartitionData | null;
78
+ feasibility: FeasibilityResult | null;
79
+ plan: StackPlanData | null;
80
+ execResult: StackExecResult | null;
81
+ verifyResult: StackVerifyData | null;
82
+ startedAt: number;
83
+ finishedAt: number | null;
84
+ }
85
+
86
+ interface StackSession {
87
+ analysisSessionId: string;
88
+ status: StackStatus;
89
+ phase: StackPhase | null;
90
+ error: string | null;
91
+ maxGroups: number | null;
92
+ context: StackContext | null;
93
+ partition: StackPartitionData | null;
94
+ feasibility: FeasibilityResult | null;
95
+ plan: StackPlanData | null;
96
+ execResult: StackExecResult | null;
97
+ verifyResult: StackVerifyData | null;
98
+ events: StackEvent[];
99
+ subscribers: Set<(event: StackEvent | { type: "done" | "error"; data?: string }) => void>;
100
+ startedAt: number;
101
+ finishedAt: number | null;
102
+ abortController: AbortController;
103
+ }
104
+
105
+ // ---------------------------------------------------------------------------
106
+ // State
107
+ // ---------------------------------------------------------------------------
108
+
109
+ const sessions = new Map<string, StackSession>();
110
+
111
+ function emit(session: StackSession, phase: StackPhase, message: string): void {
112
+ const event: StackEvent = {
113
+ id: session.events.length,
114
+ timestamp: Date.now(),
115
+ phase,
116
+ message,
117
+ };
118
+ session.events.push(event);
119
+ for (const sub of session.subscribers) sub(event);
120
+ }
121
+
122
+ function toSnapshot(session: StackSession): StackStateSnapshot {
123
+ return {
124
+ status: session.status,
125
+ phase: session.phase,
126
+ error: session.error,
127
+ maxGroups: session.maxGroups,
128
+ context: session.context,
129
+ partition: session.partition,
130
+ feasibility: session.feasibility,
131
+ plan: session.plan,
132
+ execResult: session.execResult,
133
+ verifyResult: session.verifyResult,
134
+ startedAt: session.startedAt,
135
+ finishedAt: session.finishedAt,
136
+ };
137
+ }
138
+
139
+ // ---------------------------------------------------------------------------
140
+ // Public API
141
+ // ---------------------------------------------------------------------------
142
+
143
+ export function getStackSession(analysisSessionId: string): StackSession | undefined {
144
+ return sessions.get(analysisSessionId);
145
+ }
146
+
147
+ export function getStackState(analysisSessionId: string): StackStateSnapshot | null {
148
+ const session = sessions.get(analysisSessionId);
149
+ if (!session) return null;
150
+ return toSnapshot(session);
151
+ }
152
+
153
+ export function startStack(
154
+ analysisSessionId: string,
155
+ maxGroups: number | null,
156
+ token: string,
157
+ config: NewprConfig,
158
+ ): { ok: true } | { error: string; status: number } {
159
+ const existing = sessions.get(analysisSessionId);
160
+ if (existing?.status === "running") {
161
+ return { ok: true };
162
+ }
163
+
164
+ const session: StackSession = {
165
+ analysisSessionId,
166
+ status: "running",
167
+ phase: null,
168
+ error: null,
169
+ maxGroups,
170
+ context: null,
171
+ partition: null,
172
+ feasibility: null,
173
+ plan: null,
174
+ execResult: null,
175
+ verifyResult: null,
176
+ events: [],
177
+ subscribers: new Set(),
178
+ startedAt: Date.now(),
179
+ finishedAt: null,
180
+ abortController: new AbortController(),
181
+ };
182
+ sessions.set(analysisSessionId, session);
183
+
184
+ runStackPipeline(session, token, config);
185
+
186
+ return { ok: true };
187
+ }
188
+
189
+ export function cancelStack(analysisSessionId: string): boolean {
190
+ const session = sessions.get(analysisSessionId);
191
+ if (!session || session.status !== "running") return false;
192
+ session.abortController.abort();
193
+ session.status = "canceled";
194
+ session.finishedAt = Date.now();
195
+ for (const sub of session.subscribers) sub({ type: "error", data: "Canceled" });
196
+ session.subscribers.clear();
197
+ return true;
198
+ }
199
+
200
+ export function subscribeStack(
201
+ analysisSessionId: string,
202
+ callback: (event: StackEvent | { type: "done" | "error"; data?: string }) => void,
203
+ ): (() => void) | null {
204
+ const session = sessions.get(analysisSessionId);
205
+ if (!session) return null;
206
+
207
+ for (const past of session.events) callback(past);
208
+
209
+ if (session.status === "done") {
210
+ callback({ type: "done" });
211
+ return () => {};
212
+ }
213
+ if (session.status === "error" || session.status === "canceled") {
214
+ callback({ type: "error", data: session.error ?? undefined });
215
+ return () => {};
216
+ }
217
+
218
+ session.subscribers.add(callback);
219
+ return () => { session.subscribers.delete(callback); };
220
+ }
221
+
222
+ export async function restoreCompletedStacks(sessionIds: string[]): Promise<void> {
223
+ for (const id of sessionIds) {
224
+ if (sessions.has(id)) continue;
225
+ const raw = await loadStackSidecar(id);
226
+ if (!raw) continue;
227
+ const snapshot = raw as unknown as StackStateSnapshot;
228
+
229
+ const session: StackSession = {
230
+ analysisSessionId: id,
231
+ status: snapshot.status === "running" ? "error" : snapshot.status,
232
+ phase: snapshot.phase,
233
+ error: snapshot.status === "running" ? "Server restarted during stack pipeline" : snapshot.error,
234
+ maxGroups: snapshot.maxGroups,
235
+ context: snapshot.context,
236
+ partition: snapshot.partition,
237
+ feasibility: snapshot.feasibility,
238
+ plan: snapshot.plan,
239
+ execResult: snapshot.execResult,
240
+ verifyResult: snapshot.verifyResult,
241
+ events: [],
242
+ subscribers: new Set(),
243
+ startedAt: snapshot.startedAt,
244
+ finishedAt: snapshot.finishedAt ?? Date.now(),
245
+ abortController: new AbortController(),
246
+ };
247
+ sessions.set(id, session);
248
+ }
249
+ }
250
+
251
+ // ---------------------------------------------------------------------------
252
+ // Pipeline
253
+ // ---------------------------------------------------------------------------
254
+
255
+ async function runStackPipeline(
256
+ session: StackSession,
257
+ token: string,
258
+ config: NewprConfig,
259
+ ): Promise<void> {
260
+ try {
261
+ const stored = await loadSession(session.analysisSessionId);
262
+ if (!stored) throw new Error("Analysis session not found");
263
+
264
+ const prUrl = stored.meta.pr_url;
265
+ const parsed = parsePrInput(prUrl);
266
+ if (!parsed) throw new Error("Invalid PR URL in session");
267
+
268
+ // ---- Partition phase ----
269
+ session.phase = "partitioning";
270
+ emit(session, "partitioning", "Fetching PR data...");
271
+
272
+ const ghHeaders: Record<string, string> = {
273
+ Accept: "application/vnd.github.v3+json",
274
+ "User-Agent": "newpr",
275
+ };
276
+ if (token) ghHeaders.Authorization = `token ${token}`;
277
+
278
+ const prData = await fetchPrData(parsed, token);
279
+
280
+ const prApiUrl = `https://api.github.com/repos/${parsed.owner}/${parsed.repo}/pulls/${parsed.number}`;
281
+ const prResp = await fetch(prApiUrl, { headers: ghHeaders });
282
+ if (!prResp.ok) throw new Error("Failed to fetch PR data from GitHub");
283
+ const prJson = await prResp.json() as Record<string, unknown>;
284
+ const baseObj = prJson.base as Record<string, unknown>;
285
+ const headObj = prJson.head as Record<string, unknown>;
286
+ const baseSha = baseObj.sha as string;
287
+ const headSha = headObj.sha as string;
288
+ const baseBranch = baseObj.ref as string;
289
+ const headBranch = headObj.ref as string;
290
+
291
+ const repoPath = await ensureRepo(parsed.owner, parsed.repo, token);
292
+
293
+ session.context = {
294
+ repo_path: repoPath,
295
+ base_sha: baseSha,
296
+ head_sha: headSha,
297
+ base_branch: baseBranch,
298
+ head_branch: headBranch,
299
+ pr_number: parsed.number,
300
+ owner: parsed.owner,
301
+ repo: parsed.repo,
302
+ };
303
+
304
+ checkAborted(session);
305
+
306
+ emit(session, "partitioning", "Extracting deltas...");
307
+ const deltas = await extractDeltas(repoPath, baseSha, headSha);
308
+
309
+ const analysisFiles = stored.files.map((f) => f.path);
310
+ const fileSummaries = stored.files.map((f) => ({
311
+ path: f.path,
312
+ status: f.status,
313
+ summary: f.summary,
314
+ }));
315
+ const deltaFilePaths = new Set<string>();
316
+ for (const delta of deltas) {
317
+ for (const change of delta.changes) {
318
+ deltaFilePaths.add(change.path);
319
+ if (change.old_path) deltaFilePaths.add(change.old_path);
320
+ }
321
+ }
322
+ const analysisSet = new Set(analysisFiles);
323
+ const deltaOnlyFiles = [...deltaFilePaths].filter((p) => !analysisSet.has(p));
324
+ const changedFiles = [...analysisFiles, ...deltaOnlyFiles];
325
+
326
+ emit(session, "partitioning", "Classifying files into groups...");
327
+ const llmClient = createLlmClient({
328
+ api_key: config.openrouter_api_key,
329
+ model: config.model,
330
+ timeout: config.timeout,
331
+ });
332
+ const partition = await partitionGroups(
333
+ llmClient,
334
+ stored.groups,
335
+ changedFiles,
336
+ fileSummaries,
337
+ prData.commits,
338
+ );
339
+
340
+ checkAborted(session);
341
+
342
+ const groupOrder = stored.groups.map((g) => g.name);
343
+ const coupled = applyCouplingRules(partition.ownership, changedFiles, groupOrder);
344
+ const mergedOwnership = new Map(coupled.ownership);
345
+ const allWarnings = [...partition.warnings, ...coupled.warnings];
346
+ const allStructuredWarnings: StackWarning[] = [...partition.structured_warnings, ...coupled.structured_warnings];
347
+
348
+ buildReattributionWarnings(partition, analysisSet, allStructuredWarnings);
349
+
350
+ const lastGroup = groupOrder[groupOrder.length - 1];
351
+ if (lastGroup) {
352
+ const backfilled: string[] = [];
353
+ for (const path of deltaFilePaths) {
354
+ if (!mergedOwnership.has(path)) {
355
+ mergedOwnership.set(path, lastGroup);
356
+ backfilled.push(path);
357
+ }
358
+ }
359
+ if (backfilled.length > 0) {
360
+ allWarnings.push(`Files still unassigned after AI classification, fallback to "${lastGroup}": ${backfilled.join(", ")}`);
361
+ allStructuredWarnings.push({
362
+ category: "assignment",
363
+ severity: "warn",
364
+ title: `${backfilled.length} file(s) fell back to last group`,
365
+ message: `AI could not classify these files — assigned to "${lastGroup}" as fallback`,
366
+ details: backfilled,
367
+ });
368
+ }
369
+ }
370
+
371
+ emit(session, "partitioning", "Splitting oversized groups...");
372
+ const split = await splitOversizedGroups(llmClient, stored.groups, mergedOwnership);
373
+ let currentGroups = split.groups;
374
+ for (const [path, groupId] of split.ownership) {
375
+ mergedOwnership.set(path, groupId);
376
+ }
377
+ allStructuredWarnings.push(...split.warnings);
378
+
379
+ emit(session, "partitioning", "Rebalancing groups...");
380
+ const balanced = await rebalanceGroups(llmClient, mergedOwnership, currentGroups);
381
+ for (const [path, groupId] of balanced.ownership) {
382
+ mergedOwnership.set(path, groupId);
383
+ }
384
+ allStructuredWarnings.push(...balanced.warnings);
385
+
386
+ if (session.maxGroups && session.maxGroups > 0 && currentGroups.length > session.maxGroups) {
387
+ const merged = mergeGroups(currentGroups, mergedOwnership, session.maxGroups);
388
+ currentGroups = merged.groups;
389
+ for (const [path, groupId] of merged.ownership) {
390
+ mergedOwnership.set(path, groupId);
391
+ }
392
+ const mergeDetails: string[] = [];
393
+ for (const m of merged.merges) {
394
+ allWarnings.push(`Merged group "${m.absorbed}" into "${m.into}"`);
395
+ mergeDetails.push(`"${m.absorbed}" → "${m.into}"`);
396
+ }
397
+ if (mergeDetails.length > 0) {
398
+ allStructuredWarnings.push({
399
+ category: "grouping",
400
+ severity: "info",
401
+ title: `${mergeDetails.length} group(s) merged to reduce PR count`,
402
+ message: "Smaller groups were combined with adjacent ones to meet the max PRs limit",
403
+ details: mergeDetails,
404
+ });
405
+ }
406
+ }
407
+
408
+ emit(session, "partitioning", "Checking feasibility...");
409
+ const feasibility = checkFeasibility({ deltas, ownership: mergedOwnership });
410
+ const ownershipObj = Object.fromEntries(mergedOwnership);
411
+
412
+ session.partition = {
413
+ ownership: ownershipObj,
414
+ reattributed: partition.reattributed,
415
+ warnings: allWarnings,
416
+ structured_warnings: allStructuredWarnings,
417
+ forced_merges: coupled.forced_merges,
418
+ groups: currentGroups,
419
+ };
420
+ session.feasibility = feasibility;
421
+
422
+ if (!feasibility.feasible) {
423
+ throw new Error("Stacking is not feasible — dependency cycle detected");
424
+ }
425
+
426
+ checkAborted(session);
427
+
428
+ // ---- Plan phase ----
429
+ session.phase = "planning";
430
+ emit(session, "planning", "Creating stack plan...");
431
+
432
+ const ownership = new Map(Object.entries(ownershipObj));
433
+ const plan = await createStackPlan({
434
+ repo_path: repoPath,
435
+ base_sha: baseSha,
436
+ head_sha: headSha,
437
+ deltas,
438
+ ownership,
439
+ group_order: feasibility.ordered_group_ids!,
440
+ groups: currentGroups,
441
+ });
442
+
443
+ emit(session, "planning", "Computing group stats...");
444
+ const groupStats = await computeGroupStats(
445
+ repoPath,
446
+ baseSha,
447
+ feasibility.ordered_group_ids!,
448
+ plan.expected_trees,
449
+ );
450
+ for (const group of plan.groups) {
451
+ const s = groupStats.get(group.id);
452
+ if (s) group.stats = s;
453
+ }
454
+
455
+ emit(session, "planning", "Generating PR titles...");
456
+ const prTitles = await generatePrTitles(llmClient, plan.groups, stored.meta.pr_title);
457
+ for (const group of plan.groups) {
458
+ const title = prTitles.get(group.id);
459
+ if (title) group.pr_title = title;
460
+ }
461
+
462
+ session.plan = {
463
+ base_sha: plan.base_sha,
464
+ head_sha: plan.head_sha,
465
+ groups: plan.groups,
466
+ expected_trees: Object.fromEntries(plan.expected_trees),
467
+ };
468
+
469
+ checkAborted(session);
470
+
471
+ // ---- Execute phase ----
472
+ session.phase = "executing";
473
+ emit(session, "executing", "Building stack commits...");
474
+
475
+ const execResult = await executeStack({
476
+ repo_path: repoPath,
477
+ plan,
478
+ deltas,
479
+ ownership,
480
+ pr_author: {
481
+ name: stored.meta.author,
482
+ email: `${stored.meta.author}@users.noreply.github.com`,
483
+ },
484
+ pr_number: parsed.number,
485
+ head_branch: headBranch,
486
+ });
487
+
488
+ emit(session, "executing", "Verifying tree equivalence...");
489
+ const verifyResult = await verifyStack({
490
+ repo_path: repoPath,
491
+ base_sha: baseSha,
492
+ head_sha: headSha,
493
+ exec_result: execResult,
494
+ ownership,
495
+ });
496
+
497
+ session.execResult = execResult;
498
+ session.verifyResult = {
499
+ verified: verifyResult.verified,
500
+ errors: verifyResult.errors,
501
+ warnings: verifyResult.warnings,
502
+ structured_warnings: verifyResult.structured_warnings,
503
+ };
504
+
505
+ if (!verifyResult.verified) {
506
+ throw new Error(`Verification failed: ${verifyResult.errors.join(", ")}`);
507
+ }
508
+
509
+ // ---- Done ----
510
+ session.phase = "done";
511
+ session.status = "done";
512
+ session.finishedAt = Date.now();
513
+ emit(session, "done", "Stack ready");
514
+
515
+ for (const sub of session.subscribers) sub({ type: "done" });
516
+ session.subscribers.clear();
517
+
518
+ await saveStackSidecar(session.analysisSessionId, toSnapshot(session)).catch(() => {});
519
+ } catch (err) {
520
+ const msg = err instanceof Error ? err.message : String(err);
521
+ if (session.status === "canceled") return;
522
+ session.status = "error";
523
+ session.error = msg;
524
+ session.finishedAt = Date.now();
525
+
526
+ for (const sub of session.subscribers) sub({ type: "error", data: msg });
527
+ session.subscribers.clear();
528
+
529
+ await saveStackSidecar(session.analysisSessionId, toSnapshot(session)).catch(() => {});
530
+ }
531
+ }
532
+
533
+ function checkAborted(session: StackSession): void {
534
+ if (session.abortController.signal.aborted) {
535
+ throw new Error("Stack pipeline canceled");
536
+ }
537
+ }
538
+
539
+ function buildReattributionWarnings(
540
+ partition: { reattributed: Array<{ path: string; from_groups: string[]; to_group: string }> },
541
+ analysisSet: Set<string>,
542
+ warnings: StackWarning[],
543
+ ): void {
544
+ if (partition.reattributed.length === 0) return;
545
+
546
+ const resolved = partition.reattributed.filter((r) => r.from_groups.length > 0);
547
+ const assigned = partition.reattributed.filter((r) => r.from_groups.length === 0);
548
+
549
+ if (resolved.length > 0) {
550
+ warnings.push({
551
+ category: "assignment",
552
+ severity: "info",
553
+ title: `${resolved.length} ambiguous file(s) resolved by AI`,
554
+ message: "These files appeared in multiple groups — AI chose the best fit",
555
+ details: resolved.map((r) => `${r.path} → ${r.to_group}`),
556
+ });
557
+ }
558
+ if (assigned.length > 0) {
559
+ const fromDelta = assigned.filter((r) => !analysisSet.has(r.path));
560
+ const fromAnalysis = assigned.filter((r) => analysisSet.has(r.path));
561
+ if (fromAnalysis.length > 0) {
562
+ warnings.push({
563
+ category: "assignment",
564
+ severity: "info",
565
+ title: `${fromAnalysis.length} unassigned file(s) placed by AI`,
566
+ message: "These files were not in any group — AI assigned them",
567
+ details: fromAnalysis.map((r) => `${r.path} → ${r.to_group}`),
568
+ });
569
+ }
570
+ if (fromDelta.length > 0) {
571
+ warnings.push({
572
+ category: "assignment",
573
+ severity: "info",
574
+ title: `${fromDelta.length} file(s) from git diff classified by AI`,
575
+ message: "These files were in the git diff but not in the analysis — AI assigned them to groups",
576
+ details: fromDelta.map((r) => `${r.path} → ${r.to_group}`),
577
+ });
578
+ }
579
+ }
580
+ }
package/src/web/server.ts CHANGED
@@ -185,6 +185,21 @@ export async function startWebServer(options: WebServerOptions): Promise<void> {
185
185
  if (path === "/api/review" && req.method === "POST") {
186
186
  return routes["POST /api/review"](req);
187
187
  }
188
+ if (path === "/api/stack/start" && req.method === "POST") {
189
+ return routes["POST /api/stack/start"](req);
190
+ }
191
+ if (path === "/api/stack/publish" && req.method === "POST") {
192
+ return routes["POST /api/stack/publish"](req);
193
+ }
194
+ if (path.match(/^\/api\/stack\/[^/]+\/events$/) && req.method === "GET") {
195
+ return routes["GET /api/stack/:id/events"](req);
196
+ }
197
+ if (path.match(/^\/api\/stack\/[^/]+\/cancel$/) && req.method === "POST") {
198
+ return routes["POST /api/stack/:id/cancel"](req);
199
+ }
200
+ if (path.match(/^\/api\/stack\/[^/]+$/) && req.method === "GET") {
201
+ return routes["GET /api/stack/:id"](req);
202
+ }
188
203
 
189
204
  return new Response("Not Found", { status: 404 });
190
205
  },