omegon 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.gitattributes +3 -0
- package/AGENTS.md +16 -0
- package/LICENSE +15 -0
- package/README.md +289 -0
- package/bin/pi.mjs +30 -0
- package/extensions/00-secrets/index.ts +1126 -0
- package/extensions/01-auth/auth.ts +401 -0
- package/extensions/01-auth/index.ts +289 -0
- package/extensions/auto-compact.ts +42 -0
- package/extensions/bootstrap/deps.ts +291 -0
- package/extensions/bootstrap/index.ts +811 -0
- package/extensions/chronos/chronos.sh +487 -0
- package/extensions/chronos/index.ts +148 -0
- package/extensions/cleave/assessment.ts +754 -0
- package/extensions/cleave/bridge.ts +31 -0
- package/extensions/cleave/conflicts.ts +250 -0
- package/extensions/cleave/dispatcher.ts +808 -0
- package/extensions/cleave/guardrails.ts +426 -0
- package/extensions/cleave/index.ts +3121 -0
- package/extensions/cleave/lifecycle-emitter.ts +20 -0
- package/extensions/cleave/openspec.ts +811 -0
- package/extensions/cleave/planner.ts +260 -0
- package/extensions/cleave/review.ts +579 -0
- package/extensions/cleave/skills.ts +355 -0
- package/extensions/cleave/types.ts +261 -0
- package/extensions/cleave/workspace.ts +861 -0
- package/extensions/cleave/worktree.ts +243 -0
- package/extensions/core-renderers.ts +253 -0
- package/extensions/dashboard/context-gauge.ts +58 -0
- package/extensions/dashboard/file-watch.ts +14 -0
- package/extensions/dashboard/footer.ts +1145 -0
- package/extensions/dashboard/git.ts +185 -0
- package/extensions/dashboard/index.ts +478 -0
- package/extensions/dashboard/memory-audit.ts +34 -0
- package/extensions/dashboard/overlay-data.ts +705 -0
- package/extensions/dashboard/overlay.ts +365 -0
- package/extensions/dashboard/render-utils.ts +54 -0
- package/extensions/dashboard/types.ts +191 -0
- package/extensions/dashboard/uri-helper.ts +45 -0
- package/extensions/debug.ts +69 -0
- package/extensions/defaults.ts +282 -0
- package/extensions/design-tree/dashboard-state.ts +161 -0
- package/extensions/design-tree/design-card.ts +362 -0
- package/extensions/design-tree/index.ts +2130 -0
- package/extensions/design-tree/lifecycle-emitter.ts +41 -0
- package/extensions/design-tree/tree.ts +1607 -0
- package/extensions/design-tree/types.ts +163 -0
- package/extensions/distill.ts +127 -0
- package/extensions/effort/index.ts +395 -0
- package/extensions/effort/tiers.ts +146 -0
- package/extensions/effort/types.ts +105 -0
- package/extensions/lib/git-state.ts +227 -0
- package/extensions/lib/local-models.ts +157 -0
- package/extensions/lib/model-preferences.ts +51 -0
- package/extensions/lib/model-routing.ts +720 -0
- package/extensions/lib/operator-fallback.ts +205 -0
- package/extensions/lib/operator-profile.ts +360 -0
- package/extensions/lib/slash-command-bridge.ts +253 -0
- package/extensions/lib/typebox-helpers.ts +16 -0
- package/extensions/local-inference/index.ts +727 -0
- package/extensions/mcp-bridge/README.md +220 -0
- package/extensions/mcp-bridge/index.ts +951 -0
- package/extensions/mcp-bridge/lib.ts +365 -0
- package/extensions/mcp-bridge/mcp.json +3 -0
- package/extensions/mcp-bridge/package.json +11 -0
- package/extensions/model-budget.ts +752 -0
- package/extensions/offline-driver.ts +403 -0
- package/extensions/openspec/archive-gate.ts +164 -0
- package/extensions/openspec/branch-cleanup.ts +64 -0
- package/extensions/openspec/dashboard-state.ts +50 -0
- package/extensions/openspec/index.ts +1917 -0
- package/extensions/openspec/lifecycle-emitter.ts +65 -0
- package/extensions/openspec/lifecycle-files.ts +70 -0
- package/extensions/openspec/lifecycle.ts +50 -0
- package/extensions/openspec/reconcile.ts +187 -0
- package/extensions/openspec/spec.ts +1385 -0
- package/extensions/openspec/types.ts +98 -0
- package/extensions/project-memory/DESIGN-global-mind.md +198 -0
- package/extensions/project-memory/README.md +202 -0
- package/extensions/project-memory/api-types.ts +382 -0
- package/extensions/project-memory/compaction-policy.ts +29 -0
- package/extensions/project-memory/core.ts +164 -0
- package/extensions/project-memory/embeddings.ts +230 -0
- package/extensions/project-memory/extraction-v2.ts +861 -0
- package/extensions/project-memory/factstore.ts +2177 -0
- package/extensions/project-memory/index.ts +3459 -0
- package/extensions/project-memory/injection-metrics.ts +91 -0
- package/extensions/project-memory/jsonl-io.ts +12 -0
- package/extensions/project-memory/lifecycle.ts +331 -0
- package/extensions/project-memory/migration.ts +293 -0
- package/extensions/project-memory/package.json +9 -0
- package/extensions/project-memory/sci-renderers.ts +7 -0
- package/extensions/project-memory/template.ts +103 -0
- package/extensions/project-memory/triggers.ts +52 -0
- package/extensions/project-memory/types.ts +102 -0
- package/extensions/render/composition/fonts/Inter-Bold.ttf +0 -0
- package/extensions/render/composition/fonts/Inter-Regular.ttf +0 -0
- package/extensions/render/composition/fonts/Tomorrow-Bold.ttf +0 -0
- package/extensions/render/composition/fonts/Tomorrow-Regular.ttf +0 -0
- package/extensions/render/composition/package-lock.json +534 -0
- package/extensions/render/composition/package.json +22 -0
- package/extensions/render/composition/render.mjs +246 -0
- package/extensions/render/composition/test-comp.tsx +87 -0
- package/extensions/render/composition/types.ts +24 -0
- package/extensions/render/excalidraw/UPSTREAM.md +81 -0
- package/extensions/render/excalidraw/elements.ts +764 -0
- package/extensions/render/excalidraw/index.ts +66 -0
- package/extensions/render/excalidraw/types.ts +223 -0
- package/extensions/render/excalidraw-renderer/pyproject.toml +8 -0
- package/extensions/render/excalidraw-renderer/render_excalidraw.py +182 -0
- package/extensions/render/excalidraw-renderer/render_template.html +59 -0
- package/extensions/render/index.ts +830 -0
- package/extensions/render/native-diagrams/index.ts +57 -0
- package/extensions/render/native-diagrams/motifs.ts +542 -0
- package/extensions/render/native-diagrams/raster.ts +8 -0
- package/extensions/render/native-diagrams/scene.ts +75 -0
- package/extensions/render/native-diagrams/spec.ts +204 -0
- package/extensions/render/native-diagrams/svg.ts +116 -0
- package/extensions/sci-ui.ts +304 -0
- package/extensions/session-log.ts +174 -0
- package/extensions/shared-state.ts +146 -0
- package/extensions/spinner-verbs.ts +91 -0
- package/extensions/style.ts +281 -0
- package/extensions/terminal-title.ts +191 -0
- package/extensions/tool-profile/index.ts +291 -0
- package/extensions/tool-profile/profiles.ts +290 -0
- package/extensions/types.d.ts +9 -0
- package/extensions/vault/index.ts +185 -0
- package/extensions/version-check.ts +90 -0
- package/extensions/view/index.ts +859 -0
- package/extensions/view/uri-resolver.ts +148 -0
- package/extensions/web-search/index.ts +182 -0
- package/extensions/web-search/providers.ts +121 -0
- package/extensions/web-ui/index.ts +110 -0
- package/extensions/web-ui/server.ts +265 -0
- package/extensions/web-ui/state.ts +462 -0
- package/extensions/web-ui/static/index.html +145 -0
- package/extensions/web-ui/types.ts +284 -0
- package/package.json +76 -0
- package/prompts/init.md +75 -0
- package/prompts/new-repo.md +54 -0
- package/prompts/oci-login.md +56 -0
- package/prompts/status.md +50 -0
- package/settings.json +4 -0
- package/skills/cleave/SKILL.md +218 -0
- package/skills/git/SKILL.md +209 -0
- package/skills/git/_reference/ci-validation.md +204 -0
- package/skills/oci/SKILL.md +338 -0
- package/skills/openspec/SKILL.md +346 -0
- package/skills/pi-extensions/SKILL.md +191 -0
- package/skills/pi-tui/SKILL.md +517 -0
- package/skills/python/SKILL.md +189 -0
- package/skills/rust/SKILL.md +268 -0
- package/skills/security/SKILL.md +206 -0
- package/skills/style/SKILL.md +264 -0
- package/skills/typescript/SKILL.md +225 -0
- package/skills/vault/SKILL.md +102 -0
- package/themes/alpharius-legacy.json +85 -0
- package/themes/alpharius.conf +59 -0
- package/themes/alpharius.json +88 -0
|
@@ -0,0 +1,3121 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* cleave — Recursive task decomposition extension for pi.
|
|
3
|
+
*
|
|
4
|
+
* Provides:
|
|
5
|
+
* - `cleave_assess` tool: Assess directive complexity (LLM-callable)
|
|
6
|
+
* - `/assess` command: Code assessment toolkit (cleave, diff, spec, complexity)
|
|
7
|
+
* - `/cleave` command: Full decomposition workflow
|
|
8
|
+
* - Session-start handler: Surfaces active OpenSpec changes with task progress
|
|
9
|
+
*
|
|
10
|
+
* State machine: ASSESS → PLAN → CONFIRM → DISPATCH → HARVEST → REPORT
|
|
11
|
+
*
|
|
12
|
+
* Ported from styrene-lab/cleave (Python) — the pattern library, complexity
|
|
13
|
+
* formula, conflict detection, and worktree management are preserved.
|
|
14
|
+
* The Claude Code SDK calls are replaced with pi's extension API.
|
|
15
|
+
*/
|
|
16
|
+
|
|
17
|
+
import type { ExtensionAPI, ExtensionCommandContext, AgentToolUpdateCallback } from "@cwilson613/pi-coding-agent";
|
|
18
|
+
import { truncateTail, DEFAULT_MAX_BYTES, DEFAULT_MAX_LINES, formatSize } from "@cwilson613/pi-coding-agent";
|
|
19
|
+
|
|
20
|
+
import { Text } from "@cwilson613/pi-tui";
|
|
21
|
+
import { Type } from "@sinclair/typebox";
|
|
22
|
+
import { spawn, execFile } from "node:child_process";
|
|
23
|
+
import { promisify } from "node:util";
|
|
24
|
+
import { createHash } from "node:crypto";
|
|
25
|
+
|
|
26
|
+
import { sharedState, DASHBOARD_UPDATE_EVENT } from "../shared-state.ts";
|
|
27
|
+
import { sciCall, sciOk, sciErr, sciExpanded } from "../sci-ui.ts";
|
|
28
|
+
import { debug } from "../debug.ts";
|
|
29
|
+
import { emitOpenSpecState } from "../openspec/dashboard-state.ts";
|
|
30
|
+
import { getSharedBridge, buildSlashCommandResult } from "../lib/slash-command-bridge.ts";
|
|
31
|
+
import { buildAssessBridgeResult } from "./bridge.ts";
|
|
32
|
+
import {
|
|
33
|
+
assessDirective,
|
|
34
|
+
PATTERNS,
|
|
35
|
+
runDesignStructuralCheck,
|
|
36
|
+
buildDesignAssessmentPrompt,
|
|
37
|
+
parseDesignAssessmentFindings,
|
|
38
|
+
type AssessCompletion,
|
|
39
|
+
type AssessEffect,
|
|
40
|
+
type AssessLifecycleHint,
|
|
41
|
+
type AssessLifecycleOutcome,
|
|
42
|
+
type AssessLifecycleRecord,
|
|
43
|
+
type AssessSpecScenarioResult,
|
|
44
|
+
type AssessSpecSummary,
|
|
45
|
+
type AssessStructuredResult,
|
|
46
|
+
type DesignAssessmentResult,
|
|
47
|
+
type DesignAssessmentFinding,
|
|
48
|
+
} from "./assessment.ts";
|
|
49
|
+
import { detectConflicts, parseTaskResult } from "./conflicts.ts";
|
|
50
|
+
import { emitResolvedBugCandidate } from "./lifecycle-emitter.ts";
|
|
51
|
+
import { dispatchChildren, resolveExecuteModel } from "./dispatcher.ts";
|
|
52
|
+
import { DEFAULT_REVIEW_CONFIG, type ReviewConfig } from "./review.ts";
|
|
53
|
+
import {
|
|
54
|
+
detectOpenSpec,
|
|
55
|
+
findExecutableChanges,
|
|
56
|
+
openspecChangeToSplitPlanWithContext,
|
|
57
|
+
buildOpenSpecContext,
|
|
58
|
+
writeBackTaskCompletion,
|
|
59
|
+
getActiveChangesStatus,
|
|
60
|
+
type OpenSpecContext,
|
|
61
|
+
} from "./openspec.ts";
|
|
62
|
+
import { buildPlannerPrompt, getRepoTree, parsePlanResponse } from "./planner.ts";
|
|
63
|
+
import {
|
|
64
|
+
matchSkillsToAllChildren,
|
|
65
|
+
resolveSkillPaths,
|
|
66
|
+
getPreferredTier,
|
|
67
|
+
} from "./skills.ts";
|
|
68
|
+
import { discoverGuardrails, runGuardrails, formatGuardrailResults } from "./guardrails.ts";
|
|
69
|
+
import type { CleaveState, ChildState, SplitPlan } from "./types.ts";
|
|
70
|
+
import { DEFAULT_CONFIG } from "./types.ts";
|
|
71
|
+
import {
|
|
72
|
+
buildCheckpointPlan,
|
|
73
|
+
classifyDirtyPaths as classifyPreflightDirtyPaths,
|
|
74
|
+
findIncompleteRuns,
|
|
75
|
+
initWorkspace,
|
|
76
|
+
loadState,
|
|
77
|
+
readTaskFiles,
|
|
78
|
+
saveState,
|
|
79
|
+
type ClassifiedDirtyPath,
|
|
80
|
+
type DirtyTreeClassification as WorkspaceDirtyTreeClassification,
|
|
81
|
+
} from "./workspace.ts";
|
|
82
|
+
import type { SkillDirective } from "./workspace.ts";
|
|
83
|
+
import {
|
|
84
|
+
cleanupWorktrees,
|
|
85
|
+
createWorktree,
|
|
86
|
+
ensureCleanWorktree,
|
|
87
|
+
getCurrentBranch,
|
|
88
|
+
mergeBranch,
|
|
89
|
+
pruneWorktreeDirs,
|
|
90
|
+
} from "./worktree.ts";
|
|
91
|
+
import { inspectGitState } from "../lib/git-state.ts";
|
|
92
|
+
|
|
93
|
+
// ─── Dashboard state emitter ────────────────────────────────────────────────
|
|
94
|
+
|
|
95
|
+
/** Map internal ChildStatus to the dashboard's simplified status. */
|
|
96
|
+
function mapChildStatus(status: string): "pending" | "running" | "done" | "failed" {
|
|
97
|
+
if (status === "completed") return "done";
|
|
98
|
+
if (status === "running" || status === "failed") return status;
|
|
99
|
+
return "pending"; // pending, needs_decomposition → pending
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
/**
|
|
103
|
+
* Emit cleave dashboard state to sharedState.cleave and fire the
|
|
104
|
+
* dashboard update event so the footer re-renders immediately.
|
|
105
|
+
*
|
|
106
|
+
* Called at lifecycle transitions so the unified dashboard can
|
|
107
|
+
* render live progress without polling.
|
|
108
|
+
*/
|
|
109
|
+
function emitCleaveState(
|
|
110
|
+
pi: ExtensionAPI,
|
|
111
|
+
status: string,
|
|
112
|
+
runId?: string,
|
|
113
|
+
children?: Array<{ label: string; status: string; durationSec?: number }>,
|
|
114
|
+
): void {
|
|
115
|
+
(sharedState as any).cleave = {
|
|
116
|
+
status,
|
|
117
|
+
runId,
|
|
118
|
+
updatedAt: Date.now(),
|
|
119
|
+
children: children?.map((c) => ({
|
|
120
|
+
label: c.label,
|
|
121
|
+
status: mapChildStatus(c.status),
|
|
122
|
+
elapsed: c.durationSec,
|
|
123
|
+
})),
|
|
124
|
+
};
|
|
125
|
+
debug("cleave", "emitState", { status, runId, childCount: children?.length });
|
|
126
|
+
pi.events.emit(DASHBOARD_UPDATE_EVENT, { source: "cleave" });
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
// ─── Helpers ────────────────────────────────────────────────────────────────
|
|
130
|
+
|
|
131
|
+
function generateRunId(): string {
|
|
132
|
+
return `clv-${Date.now().toString(36)}-${Math.random().toString(36).slice(2, 6)}`;
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
function formatAssessment(a: ReturnType<typeof assessDirective>): string {
|
|
136
|
+
const lines = [
|
|
137
|
+
"**Assessment**",
|
|
138
|
+
"",
|
|
139
|
+
` Decision: **${a.decision}**`,
|
|
140
|
+
` Complexity: ${a.complexity}`,
|
|
141
|
+
` Systems: ${a.systems}`,
|
|
142
|
+
` Modifiers: ${a.modifiers.length > 0 ? a.modifiers.join(", ") : "none"}`,
|
|
143
|
+
` Method: ${a.method}`,
|
|
144
|
+
];
|
|
145
|
+
if (a.pattern) {
|
|
146
|
+
lines.push(` Pattern: ${a.pattern} (${(a.confidence * 100).toFixed(0)}%)`);
|
|
147
|
+
}
|
|
148
|
+
lines.push("", ` ${a.reasoning}`);
|
|
149
|
+
return lines.join("\n");
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
function formatConflicts(conflicts: ReturnType<typeof detectConflicts>): string {
|
|
153
|
+
if (conflicts.length === 0) return "No conflicts detected. ✓";
|
|
154
|
+
return conflicts
|
|
155
|
+
.map(
|
|
156
|
+
(c, i) =>
|
|
157
|
+
`**Conflict ${i + 1}:** ${c.type}\n` +
|
|
158
|
+
` ${c.description}\n` +
|
|
159
|
+
` Involved: tasks ${c.involved.join(", ")}\n` +
|
|
160
|
+
` Resolution: ${c.resolution}`,
|
|
161
|
+
)
|
|
162
|
+
.join("\n\n");
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
function formatSpecVerification(ctx: OpenSpecContext): string {
|
|
166
|
+
const lines = [
|
|
167
|
+
"### Spec Verification",
|
|
168
|
+
"",
|
|
169
|
+
"The following spec scenarios should now be satisfied. **Verify each one:**",
|
|
170
|
+
"",
|
|
171
|
+
];
|
|
172
|
+
|
|
173
|
+
for (const ss of ctx.specScenarios) {
|
|
174
|
+
lines.push(`**${ss.domain} → ${ss.requirement}**`);
|
|
175
|
+
for (const scenario of ss.scenarios) {
|
|
176
|
+
// Extract just the scenario name (first line) and the Given/When/Then
|
|
177
|
+
const scenarioLines = scenario.split("\n");
|
|
178
|
+
const name = scenarioLines[0];
|
|
179
|
+
lines.push(`- [ ] ${name}`);
|
|
180
|
+
// Include Given/When/Then as indented detail
|
|
181
|
+
const gwt = scenarioLines.slice(1).filter((l) => l.trim());
|
|
182
|
+
if (gwt.length > 0) {
|
|
183
|
+
for (const l of gwt) {
|
|
184
|
+
lines.push(` ${l.trim()}`);
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
lines.push("");
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
if (ctx.apiContract) {
|
|
192
|
+
lines.push(
|
|
193
|
+
"**API Contract Conformance (`api.yaml`)**",
|
|
194
|
+
"- [ ] All contract paths/methods are implemented",
|
|
195
|
+
"- [ ] Request/response schemas match the contract",
|
|
196
|
+
"- [ ] Status codes and error responses match the contract",
|
|
197
|
+
"- [ ] No undocumented endpoints exist outside the contract",
|
|
198
|
+
"",
|
|
199
|
+
);
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
lines.push(
|
|
203
|
+
"---",
|
|
204
|
+
"Run tests, inspect the code, or manually verify each scenario above.",
|
|
205
|
+
"If all pass, the change is ready for `/opsx:archive`.",
|
|
206
|
+
);
|
|
207
|
+
|
|
208
|
+
return lines.join("\n");
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
interface DirtyTreePreflightOptions {
|
|
212
|
+
repoPath: string;
|
|
213
|
+
openspecChangePath?: string;
|
|
214
|
+
onUpdate?: AgentToolUpdateCallback<Record<string, unknown>>;
|
|
215
|
+
ui?: {
|
|
216
|
+
/** Text input — used for commit-message approval in the checkpoint flow. */
|
|
217
|
+
input?: (prompt: string, initial?: string) => Promise<string | undefined>;
|
|
218
|
+
/** Modal select — used to choose the preflight action. Falls back to input when absent. */
|
|
219
|
+
select?: (title: string, options: string[]) => Promise<string | undefined>;
|
|
220
|
+
};
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
/** Labelled preflight actions shown in the modal select UI. */
|
|
224
|
+
const PREFLIGHT_ACTION_OPTIONS = [
|
|
225
|
+
"checkpoint — commit related changes and continue [use when work is ready to save before cleaving]",
|
|
226
|
+
"stash-unrelated — stash unrelated/unknown files and continue [use when dirty files are not part of this change]",
|
|
227
|
+
"stash-volatile — stash volatile artifacts only [use when only build/cache artifacts are dirty]",
|
|
228
|
+
"proceed-without-cleave — skip cleave and continue working [use when you want to defer parallel dispatch]",
|
|
229
|
+
"cancel — abort cleave [use when you want to exit without making any changes]",
|
|
230
|
+
] as const;
|
|
231
|
+
|
|
232
|
+
/** Extract the action keyword from a labelled option string (text before first whitespace run ending in ' — '). */
|
|
233
|
+
function parsePreflightAction(selected: string | undefined): string | undefined {
|
|
234
|
+
if (!selected) return undefined;
|
|
235
|
+
const normalized = normalizePreflightInput(selected);
|
|
236
|
+
if (!normalized) return undefined;
|
|
237
|
+
// Trim leading spaces then grab the first non-space token.
|
|
238
|
+
return normalized.trim().split(/\s+/)[0]?.toLowerCase();
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
const TRANSIENT_CLIPBOARD_ATTACHMENT_PATH =
|
|
242
|
+
/^\/var\/folders\/[A-Za-z0-9_-]+\/[A-Za-z0-9_-]+\/T\/pi-clipboard-[A-Fa-f0-9-]+\.(?:png|jpe?g|gif|webp)$/;
|
|
243
|
+
|
|
244
|
+
function normalizePreflightInput(response: string | undefined): string | undefined {
|
|
245
|
+
const trimmed = response?.trim();
|
|
246
|
+
if (!trimmed) return undefined;
|
|
247
|
+
if (TRANSIENT_CLIPBOARD_ATTACHMENT_PATH.test(trimmed)) return undefined;
|
|
248
|
+
return trimmed;
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
function formatDirtyTreeSummary(classification: WorkspaceDirtyTreeClassification, suggestedMessage: string | null): string {
|
|
252
|
+
const renderGroup = (title: string, entries: ClassifiedDirtyPath[], empty: string): string[] => [
|
|
253
|
+
title,
|
|
254
|
+
...(entries.length > 0
|
|
255
|
+
? entries.map((entry) => `- [${entry.confidence}] \`${entry.path}\` — ${entry.reason}`)
|
|
256
|
+
: [`- ${empty}`]),
|
|
257
|
+
"",
|
|
258
|
+
];
|
|
259
|
+
const unrelatedOrUnknown = [...classification.unrelated, ...classification.unknown];
|
|
260
|
+
const lines = [
|
|
261
|
+
"### Dirty Tree Preflight",
|
|
262
|
+
"",
|
|
263
|
+
"Cleave requires an explicit preflight decision before worktree creation.",
|
|
264
|
+
"",
|
|
265
|
+
...renderGroup("**Related changes**", classification.related, "none detected"),
|
|
266
|
+
...renderGroup("**Unrelated / unknown changes**", unrelatedOrUnknown, "none detected"),
|
|
267
|
+
...renderGroup("**Volatile artifacts**", classification.volatile, "none detected"),
|
|
268
|
+
"**Actions:** `checkpoint`, `stash-unrelated`, `stash-volatile`, `proceed-without-cleave`, `cancel`",
|
|
269
|
+
...(suggestedMessage ? ["", `Suggested checkpoint commit: \`${suggestedMessage}\``] : []),
|
|
270
|
+
];
|
|
271
|
+
return lines.join("\n");
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
async function stashPaths(pi: ExtensionAPI, repoPath: string, label: string, entries: ClassifiedDirtyPath[]): Promise<void> {
|
|
275
|
+
if (entries.length === 0) return;
|
|
276
|
+
const args = ["stash", "push", "-u", "-m", label, "--", ...entries.map((entry) => entry.path)];
|
|
277
|
+
const result = await pi.exec("git", args, { cwd: repoPath, timeout: 15_000 });
|
|
278
|
+
if (result.code !== 0) throw new Error(result.stderr.trim() || `Failed to stash ${label}`);
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
async function checkpointRelatedChanges(
|
|
282
|
+
pi: ExtensionAPI,
|
|
283
|
+
repoPath: string,
|
|
284
|
+
classification: WorkspaceDirtyTreeClassification,
|
|
285
|
+
checkpointMessage: string | null,
|
|
286
|
+
ui?: { input?: (prompt: string, initial?: string) => Promise<string | undefined> },
|
|
287
|
+
): Promise<void> {
|
|
288
|
+
// When the user explicitly chooses "checkpoint", commit all non-volatile dirty
|
|
289
|
+
// files — not just those confidently classified as related. The conservative
|
|
290
|
+
// classification is for automatic decisions; an explicit user choice overrides it.
|
|
291
|
+
const allNonVolatile = [
|
|
292
|
+
...classification.related,
|
|
293
|
+
...classification.unrelated,
|
|
294
|
+
...classification.unknown,
|
|
295
|
+
].map((f) => f.path);
|
|
296
|
+
|
|
297
|
+
const filesToCommit = classification.checkpointFiles.length > 0
|
|
298
|
+
? classification.checkpointFiles
|
|
299
|
+
: allNonVolatile;
|
|
300
|
+
|
|
301
|
+
if (filesToCommit.length === 0) {
|
|
302
|
+
throw new Error(
|
|
303
|
+
"Checkpoint scope is empty — no dirty files found to commit (only volatile artifacts are dirty). " +
|
|
304
|
+
"Choose a different preflight action.",
|
|
305
|
+
);
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
// Patch classification so the rest of the function uses the resolved file list.
|
|
309
|
+
classification = { ...classification, checkpointFiles: filesToCommit };
|
|
310
|
+
if (typeof ui?.input !== "function") {
|
|
311
|
+
throw new Error("Checkpoint requires interactive approval, but input is unavailable.");
|
|
312
|
+
}
|
|
313
|
+
const suggested = checkpointMessage ?? "chore(cleave): checkpoint before cleave";
|
|
314
|
+
const response = normalizePreflightInput(await ui.input(
|
|
315
|
+
[
|
|
316
|
+
`Checkpoint ${classification.checkpointFiles.length} related file(s).`,
|
|
317
|
+
`Press Enter to approve the suggested message, type a custom commit message to approve with edits, or type 'cancel' to decline.`,
|
|
318
|
+
`Suggested message: ${suggested}`,
|
|
319
|
+
].join("\n"),
|
|
320
|
+
suggested,
|
|
321
|
+
));
|
|
322
|
+
if (!response) {
|
|
323
|
+
// Accept the suggested message when the operator confirms with Enter.
|
|
324
|
+
} else if (response.toLowerCase() === "cancel") {
|
|
325
|
+
throw new Error("Checkpoint cancelled before commit approval.");
|
|
326
|
+
}
|
|
327
|
+
const commitMessage = response && response.length > 0 ? response : suggested;
|
|
328
|
+
const addResult = await pi.exec("git", ["add", "--", ...classification.checkpointFiles], { cwd: repoPath, timeout: 15_000 });
|
|
329
|
+
if (addResult.code !== 0) {
|
|
330
|
+
throw new Error(
|
|
331
|
+
`git add failed during checkpoint — ${addResult.stderr.trim() || "unknown error staging checkpoint files"}. ` +
|
|
332
|
+
"The checkpoint was not created. Choose a different preflight action or resolve the staging error first.",
|
|
333
|
+
);
|
|
334
|
+
}
|
|
335
|
+
const commitResult = await pi.exec("git", ["commit", "-m", commitMessage, "--", ...classification.checkpointFiles], {
|
|
336
|
+
cwd: repoPath,
|
|
337
|
+
timeout: 20_000,
|
|
338
|
+
});
|
|
339
|
+
if (commitResult.code !== 0) {
|
|
340
|
+
throw new Error(
|
|
341
|
+
`git commit failed during checkpoint — ${commitResult.stderr.trim() || "unknown error creating checkpoint commit"}. ` +
|
|
342
|
+
"The checkpoint was not created. Resolve the git error and try again.",
|
|
343
|
+
);
|
|
344
|
+
}
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
export async function runDirtyTreePreflight(pi: ExtensionAPI, options: DirtyTreePreflightOptions): Promise<"continue" | "skip_cleave" | "cancelled"> {
|
|
348
|
+
const status = await pi.exec("git", ["status", "--porcelain"], {
|
|
349
|
+
cwd: options.repoPath,
|
|
350
|
+
timeout: 5_000,
|
|
351
|
+
});
|
|
352
|
+
const gitState = inspectGitState(status.stdout);
|
|
353
|
+
if (gitState.entries.length === 0) return "continue";
|
|
354
|
+
|
|
355
|
+
const openspecContext = options.openspecChangePath
|
|
356
|
+
? (() => {
|
|
357
|
+
try {
|
|
358
|
+
return buildOpenSpecContext(options.openspecChangePath!);
|
|
359
|
+
} catch {
|
|
360
|
+
return null;
|
|
361
|
+
}
|
|
362
|
+
})()
|
|
363
|
+
: null;
|
|
364
|
+
const changeName = options.openspecChangePath?.replace(/\\/g, "/").split("/").pop() ?? null;
|
|
365
|
+
const classification = classifyPreflightDirtyPaths(gitState.entries.map((entry) => entry.path), {
|
|
366
|
+
changeName,
|
|
367
|
+
openspecContext,
|
|
368
|
+
});
|
|
369
|
+
// Compute initial checkpoint plan for the summary display only.
|
|
370
|
+
// The plan is rebuilt from currentClassification inside the loop on each attempt (C4).
|
|
371
|
+
const initialCheckpointPlan = buildCheckpointPlan(classification, { changeName, openspecContext });
|
|
372
|
+
const summary = formatDirtyTreeSummary(classification, initialCheckpointPlan.message);
|
|
373
|
+
options.onUpdate?.({ content: [{ type: "text", text: summary }], details: { phase: "preflight" } });
|
|
374
|
+
|
|
375
|
+
if (gitState.nonVolatile.length === 0) {
|
|
376
|
+
if (classification.volatile.length > 0) {
|
|
377
|
+
await stashPaths(pi, options.repoPath, "cleave-preflight-volatile", classification.volatile);
|
|
378
|
+
options.onUpdate?.({
|
|
379
|
+
content: [{ type: "text", text: "Volatile-only dirty tree detected — stashed volatile artifacts automatically before cleave." }],
|
|
380
|
+
details: { phase: "preflight", autoResolved: "volatile_only_stash" },
|
|
381
|
+
});
|
|
382
|
+
}
|
|
383
|
+
return "continue";
|
|
384
|
+
}
|
|
385
|
+
|
|
386
|
+
const hasSelect = typeof options.ui?.select === "function";
|
|
387
|
+
const hasInput = typeof options.ui?.input === "function";
|
|
388
|
+
if (!hasSelect && !hasInput) {
|
|
389
|
+
throw new Error(summary + "\n\nInteractive input is unavailable, so cleave cannot resolve the dirty tree automatically.");
|
|
390
|
+
}
|
|
391
|
+
|
|
392
|
+
// Mutable classification — refreshed after each checkpoint attempt (C1/W1).
|
|
393
|
+
let currentClassification = classification;
|
|
394
|
+
|
|
395
|
+
while (true) {
|
|
396
|
+
let answer: string | undefined;
|
|
397
|
+
if (hasSelect) {
|
|
398
|
+
// Preferred path: show a modal select with labelled options and descriptions.
|
|
399
|
+
const selected = await options.ui!.select!(
|
|
400
|
+
"Dirty tree detected — choose a preflight action to proceed:",
|
|
401
|
+
[...PREFLIGHT_ACTION_OPTIONS],
|
|
402
|
+
);
|
|
403
|
+
answer = parsePreflightAction(selected);
|
|
404
|
+
} else {
|
|
405
|
+
// Fallback: raw text input (headless / test environments).
|
|
406
|
+
answer = normalizePreflightInput(
|
|
407
|
+
await options.ui!.input!("Dirty tree action [checkpoint|stash-unrelated|stash-volatile|proceed-without-cleave|cancel]:"),
|
|
408
|
+
)?.toLowerCase();
|
|
409
|
+
}
|
|
410
|
+
try {
|
|
411
|
+
switch (answer) {
|
|
412
|
+
case "checkpoint": {
|
|
413
|
+
// Rebuild the checkpoint plan from the current (possibly refreshed) classification (C4).
|
|
414
|
+
const currentCheckpointPlan = buildCheckpointPlan(currentClassification, { changeName, openspecContext });
|
|
415
|
+
const committedFiles = new Set(currentClassification.checkpointFiles);
|
|
416
|
+
await checkpointRelatedChanges(pi, options.repoPath, currentClassification, currentCheckpointPlan.message, options.ui);
|
|
417
|
+
// Re-verify cleanliness after the checkpoint commit.
|
|
418
|
+
const postCheckpointStatus = await pi.exec("git", ["status", "--porcelain"], {
|
|
419
|
+
cwd: options.repoPath,
|
|
420
|
+
timeout: 5_000,
|
|
421
|
+
});
|
|
422
|
+
const postState = inspectGitState(postCheckpointStatus.stdout);
|
|
423
|
+
if (postState.entries.length === 0) {
|
|
424
|
+
// Tree is clean — checkpoint fully resolved the dirty tree.
|
|
425
|
+
return "continue";
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
// Re-derive classification from the post-checkpoint state (C1).
|
|
429
|
+
currentClassification = classifyPreflightDirtyPaths(
|
|
430
|
+
postState.entries.map((e) => e.path),
|
|
431
|
+
{ changeName, openspecContext },
|
|
432
|
+
);
|
|
433
|
+
|
|
434
|
+
// C2: If only volatile files remain, auto-stash and continue.
|
|
435
|
+
if (postState.nonVolatile.length === 0 && currentClassification.volatile.length > 0) {
|
|
436
|
+
await stashPaths(pi, options.repoPath, "cleave-preflight-volatile", currentClassification.volatile);
|
|
437
|
+
options.onUpdate?.({
|
|
438
|
+
content: [{ type: "text", text: "Checkpoint succeeded. Remaining volatile artifacts stashed automatically — cleave continuing." }],
|
|
439
|
+
details: { phase: "preflight", autoResolved: "volatile_only_stash" },
|
|
440
|
+
});
|
|
441
|
+
return "continue";
|
|
442
|
+
}
|
|
443
|
+
|
|
444
|
+
// Remaining dirty files — emit precise diagnosis (W1: distinguish committed-but-still-dirty vs excluded-from-scope).
|
|
445
|
+
const remainingPaths = postState.entries.map((e) => e.path);
|
|
446
|
+
const diagnosisLines = [
|
|
447
|
+
"Checkpoint committed successfully, but dirty files remain — cleave cannot continue yet:",
|
|
448
|
+
...currentClassification.related.map((f) =>
|
|
449
|
+
committedFiles.has(f.path)
|
|
450
|
+
? ` • ${f.path} [was committed but remains dirty — file may have been modified after staging or only partially staged]`
|
|
451
|
+
: ` • ${f.path} [related but excluded from checkpoint scope — confidence too low to commit automatically]`
|
|
452
|
+
),
|
|
453
|
+
...currentClassification.unrelated.map((f) => ` • ${f.path} [unrelated: ${f.reason}]`),
|
|
454
|
+
...currentClassification.unknown.map((f) => ` • ${f.path} [unknown — not in change scope, was not checkpointed]`),
|
|
455
|
+
...currentClassification.volatile.map((f) => ` • ${f.path} [volatile artifact — will be auto-stashed]`),
|
|
456
|
+
"",
|
|
457
|
+
"Choose another preflight action to resolve the remaining files.",
|
|
458
|
+
];
|
|
459
|
+
options.onUpdate?.({
|
|
460
|
+
content: [{ type: "text", text: diagnosisLines.join("\n") }],
|
|
461
|
+
details: { phase: "preflight", postCheckpointDirty: remainingPaths },
|
|
462
|
+
});
|
|
463
|
+
break;
|
|
464
|
+
}
|
|
465
|
+
case "stash-unrelated":
|
|
466
|
+
// C1: Use currentClassification (refreshed after checkpoint) not the stale original.
|
|
467
|
+
await stashPaths(pi, options.repoPath, "cleave-preflight-unrelated", [...currentClassification.unrelated, ...currentClassification.unknown]);
|
|
468
|
+
return "continue";
|
|
469
|
+
case "stash-volatile":
|
|
470
|
+
await stashPaths(pi, options.repoPath, "cleave-preflight-volatile", currentClassification.volatile);
|
|
471
|
+
return "continue";
|
|
472
|
+
case "proceed-without-cleave":
|
|
473
|
+
return "skip_cleave";
|
|
474
|
+
case "cancel":
|
|
475
|
+
case "":
|
|
476
|
+
return "cancelled";
|
|
477
|
+
default:
|
|
478
|
+
options.onUpdate?.({
|
|
479
|
+
content: [{ type: "text", text: "Invalid preflight action. Choose checkpoint, stash-unrelated, stash-volatile, proceed-without-cleave, or cancel." }],
|
|
480
|
+
details: { phase: "preflight" },
|
|
481
|
+
});
|
|
482
|
+
}
|
|
483
|
+
} catch (error) {
|
|
484
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
485
|
+
options.onUpdate?.({
|
|
486
|
+
content: [{ type: "text", text: `Preflight action failed: ${message}` }],
|
|
487
|
+
details: { phase: "preflight" },
|
|
488
|
+
});
|
|
489
|
+
}
|
|
490
|
+
}
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
interface AssessExecutionContext {
|
|
494
|
+
cwd: string;
|
|
495
|
+
bridgeInvocation?: boolean;
|
|
496
|
+
hasUI?: boolean;
|
|
497
|
+
model?: { id?: string };
|
|
498
|
+
waitForIdle?: (() => Promise<void>) | undefined;
|
|
499
|
+
}
|
|
500
|
+
|
|
501
|
+
interface AssessDiffContext {
|
|
502
|
+
ref: string;
|
|
503
|
+
diffStat: string;
|
|
504
|
+
diffContent: string;
|
|
505
|
+
recentLog: string;
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
function makeAssessResult<TData>(input: {
|
|
509
|
+
subcommand: AssessStructuredResult<TData>["subcommand"];
|
|
510
|
+
args: string;
|
|
511
|
+
ok: boolean;
|
|
512
|
+
summary: string;
|
|
513
|
+
humanText: string;
|
|
514
|
+
data: TData;
|
|
515
|
+
effects?: AssessEffect[];
|
|
516
|
+
nextSteps?: string[];
|
|
517
|
+
completion?: AssessCompletion;
|
|
518
|
+
lifecycle?: AssessLifecycleHint;
|
|
519
|
+
lifecycleRecord?: AssessLifecycleRecord;
|
|
520
|
+
}): AssessStructuredResult<TData> {
|
|
521
|
+
return {
|
|
522
|
+
command: "assess",
|
|
523
|
+
subcommand: input.subcommand,
|
|
524
|
+
args: input.args,
|
|
525
|
+
ok: input.ok,
|
|
526
|
+
summary: input.summary,
|
|
527
|
+
humanText: input.humanText,
|
|
528
|
+
data: input.data,
|
|
529
|
+
effects: input.effects ?? [],
|
|
530
|
+
nextSteps: input.nextSteps ?? [],
|
|
531
|
+
completion: input.completion,
|
|
532
|
+
lifecycle: input.lifecycle,
|
|
533
|
+
lifecycleRecord: input.lifecycleRecord,
|
|
534
|
+
};
|
|
535
|
+
}
|
|
536
|
+
|
|
537
|
+
async function collectAssessmentSnapshot(pi: ExtensionAPI, cwd: string): Promise<{ gitHead: string | null; fingerprint: string }> {
|
|
538
|
+
let gitHead: string | null = null;
|
|
539
|
+
let status = "";
|
|
540
|
+
|
|
541
|
+
try {
|
|
542
|
+
const head = await pi.exec("git", ["rev-parse", "--short", "HEAD"], { cwd, timeout: 5_000 });
|
|
543
|
+
if (head.code === 0) gitHead = head.stdout.trim() || null;
|
|
544
|
+
} catch {
|
|
545
|
+
/* proceed with null gitHead */
|
|
546
|
+
}
|
|
547
|
+
|
|
548
|
+
try {
|
|
549
|
+
const diff = await pi.exec("git", ["status", "--short", "--untracked-files=all"], { cwd, timeout: 5_000 });
|
|
550
|
+
if (diff.code === 0) status = diff.stdout.trim();
|
|
551
|
+
} catch {
|
|
552
|
+
/* proceed with empty status */
|
|
553
|
+
}
|
|
554
|
+
|
|
555
|
+
const fingerprint = createHash("sha256")
|
|
556
|
+
.update(gitHead ?? "nogit")
|
|
557
|
+
.update("\n")
|
|
558
|
+
.update(status)
|
|
559
|
+
.digest("hex");
|
|
560
|
+
|
|
561
|
+
return { gitHead, fingerprint };
|
|
562
|
+
}
|
|
563
|
+
|
|
564
|
+
async function buildLifecycleRecord(
|
|
565
|
+
pi: ExtensionAPI,
|
|
566
|
+
cwd: string,
|
|
567
|
+
options: {
|
|
568
|
+
changeName: string;
|
|
569
|
+
assessmentKind: "spec" | "cleave";
|
|
570
|
+
outcome: AssessLifecycleOutcome;
|
|
571
|
+
recommendedAction: string | null;
|
|
572
|
+
changedFiles?: string[];
|
|
573
|
+
constraints?: string[];
|
|
574
|
+
snapshot?: { gitHead: string | null; fingerprint: string };
|
|
575
|
+
},
|
|
576
|
+
): Promise<AssessLifecycleRecord> {
|
|
577
|
+
const snapshot = options.snapshot ?? await collectAssessmentSnapshot(pi, cwd);
|
|
578
|
+
return {
|
|
579
|
+
changeName: options.changeName,
|
|
580
|
+
assessmentKind: options.assessmentKind,
|
|
581
|
+
outcome: options.outcome,
|
|
582
|
+
timestamp: new Date().toISOString(),
|
|
583
|
+
snapshot,
|
|
584
|
+
reconciliation: {
|
|
585
|
+
reopen: options.outcome === "reopen",
|
|
586
|
+
changedFiles: [...new Set((options.changedFiles ?? []).map((file) => file.trim()).filter(Boolean))],
|
|
587
|
+
constraints: [...new Set((options.constraints ?? []).map((constraint) => constraint.trim()).filter(Boolean))],
|
|
588
|
+
recommendedAction: options.recommendedAction,
|
|
589
|
+
},
|
|
590
|
+
};
|
|
591
|
+
}
|
|
592
|
+
|
|
593
|
+
interface AssessSpecAgentResult {
|
|
594
|
+
summary: AssessSpecSummary;
|
|
595
|
+
scenarios: AssessSpecScenarioResult[];
|
|
596
|
+
changedFiles?: string[];
|
|
597
|
+
constraints?: string[];
|
|
598
|
+
overallNotes?: string;
|
|
599
|
+
}
|
|
600
|
+
|
|
601
|
+
interface SpecAssessmentRunnerInput {
|
|
602
|
+
repoPath: string;
|
|
603
|
+
changeName: string;
|
|
604
|
+
scenarioText: string;
|
|
605
|
+
designContext: string[];
|
|
606
|
+
apiContractContext: string[];
|
|
607
|
+
diffContent: string;
|
|
608
|
+
expectedScenarioCount: number;
|
|
609
|
+
modelId?: string;
|
|
610
|
+
}
|
|
611
|
+
|
|
612
|
+
interface SpecAssessmentRunnerOutput {
|
|
613
|
+
assessed: AssessSpecAgentResult;
|
|
614
|
+
snapshot?: { gitHead: string | null; fingerprint: string };
|
|
615
|
+
}
|
|
616
|
+
|
|
617
|
+
interface AssessExecutorOverrides {
|
|
618
|
+
runSpecAssessment?: (input: SpecAssessmentRunnerInput) => Promise<SpecAssessmentRunnerOutput>;
|
|
619
|
+
}
|
|
620
|
+
|
|
621
|
+
function isInteractiveAssessContext(ctx: AssessExecutionContext): ctx is AssessExecutionContext & ExtensionCommandContext {
|
|
622
|
+
return ctx.bridgeInvocation !== true && ctx.hasUI === true && typeof ctx.waitForIdle === "function";
|
|
623
|
+
}
|
|
624
|
+
|
|
625
|
+
function countSpecScenarios(specCtx: OpenSpecContext): number {
|
|
626
|
+
return specCtx.specScenarios.reduce((total, scenarioSet) => total + scenarioSet.scenarios.length, 0);
|
|
627
|
+
}
|
|
628
|
+
|
|
629
|
+
function determineSpecOutcome(summary: AssessSpecSummary): AssessLifecycleOutcome {
|
|
630
|
+
if (summary.fail > 0) return "reopen";
|
|
631
|
+
if (summary.unclear > 0) return "ambiguous";
|
|
632
|
+
return "pass";
|
|
633
|
+
}
|
|
634
|
+
|
|
635
|
+
function normalizeSpecAssessment(payload: AssessSpecAgentResult, expectedTotal: number): AssessSpecAgentResult {
|
|
636
|
+
const scenarios = payload.scenarios.map((scenario) => ({
|
|
637
|
+
...scenario,
|
|
638
|
+
evidence: [...new Set((scenario.evidence ?? []).map((entry) => entry.trim()).filter(Boolean))],
|
|
639
|
+
notes: scenario.notes?.trim() || undefined,
|
|
640
|
+
}));
|
|
641
|
+
const summary: AssessSpecSummary = {
|
|
642
|
+
total: payload.summary.total,
|
|
643
|
+
pass: payload.summary.pass,
|
|
644
|
+
fail: payload.summary.fail,
|
|
645
|
+
unclear: payload.summary.unclear,
|
|
646
|
+
};
|
|
647
|
+
if (summary.total !== expectedTotal || scenarios.length !== expectedTotal) {
|
|
648
|
+
throw new Error(`Assessment returned ${scenarios.length}/${expectedTotal} scenarios.`);
|
|
649
|
+
}
|
|
650
|
+
return {
|
|
651
|
+
summary,
|
|
652
|
+
scenarios,
|
|
653
|
+
changedFiles: [...new Set((payload.changedFiles ?? []).map((entry) => entry.trim()).filter(Boolean))],
|
|
654
|
+
constraints: [...new Set((payload.constraints ?? []).map((entry) => entry.trim()).filter(Boolean))],
|
|
655
|
+
overallNotes: payload.overallNotes?.trim() || undefined,
|
|
656
|
+
};
|
|
657
|
+
}
|
|
658
|
+
|
|
659
|
+
function extractJsonObject(text: string): string | null {
|
|
660
|
+
const fenced = text.match(/```json\s*([\s\S]*?)```/i);
|
|
661
|
+
if (fenced?.[1]) return fenced[1].trim();
|
|
662
|
+
const firstBrace = text.indexOf("{");
|
|
663
|
+
const lastBrace = text.lastIndexOf("}");
|
|
664
|
+
if (firstBrace === -1 || lastBrace === -1 || lastBrace <= firstBrace) return null;
|
|
665
|
+
return text.slice(firstBrace, lastBrace + 1).trim();
|
|
666
|
+
}
|
|
667
|
+
|
|
668
|
+
function extractAssistantText(content: unknown): string {
|
|
669
|
+
if (typeof content === "string") return content.trim();
|
|
670
|
+
if (!Array.isArray(content)) return "";
|
|
671
|
+
return content
|
|
672
|
+
.map((item) => {
|
|
673
|
+
if (typeof item === "string") return item;
|
|
674
|
+
if (!item || typeof item !== "object") return "";
|
|
675
|
+
return typeof (item as { text?: unknown }).text === "string"
|
|
676
|
+
? (item as { text: string }).text
|
|
677
|
+
: "";
|
|
678
|
+
})
|
|
679
|
+
.join("\n")
|
|
680
|
+
.trim();
|
|
681
|
+
}
|
|
682
|
+
|
|
683
|
+
function formatSpecOutcomeLabel(outcome: AssessLifecycleOutcome): string {
|
|
684
|
+
switch (outcome) {
|
|
685
|
+
case "pass":
|
|
686
|
+
return "PASS";
|
|
687
|
+
case "reopen":
|
|
688
|
+
return "REOPEN";
|
|
689
|
+
case "ambiguous":
|
|
690
|
+
return "AMBIGUOUS";
|
|
691
|
+
}
|
|
692
|
+
}
|
|
693
|
+
|
|
694
|
+
function buildSpecAssessmentHumanText(changeName: string, assessed: AssessSpecAgentResult, outcome: AssessLifecycleOutcome): string {
|
|
695
|
+
const lines = [
|
|
696
|
+
`**Spec Assessment Complete: \`${changeName}\`**`,
|
|
697
|
+
"",
|
|
698
|
+
`Outcome: **${formatSpecOutcomeLabel(outcome)}**`,
|
|
699
|
+
`Scenarios: ${assessed.summary.pass}/${assessed.summary.total} pass` +
|
|
700
|
+
(assessed.summary.fail > 0 ? `, ${assessed.summary.fail} fail` : "") +
|
|
701
|
+
(assessed.summary.unclear > 0 ? `, ${assessed.summary.unclear} unclear` : ""),
|
|
702
|
+
];
|
|
703
|
+
|
|
704
|
+
for (const scenario of assessed.scenarios) {
|
|
705
|
+
lines.push(
|
|
706
|
+
"",
|
|
707
|
+
`- [${scenario.status}] ${scenario.domain} → ${scenario.requirement}`,
|
|
708
|
+
` ${scenario.scenario.replace(/\n/g, " ")}`,
|
|
709
|
+
...scenario.evidence.map((entry) => ` Evidence: ${entry}`),
|
|
710
|
+
...(scenario.notes ? [` Notes: ${scenario.notes}`] : []),
|
|
711
|
+
);
|
|
712
|
+
}
|
|
713
|
+
|
|
714
|
+
if (assessed.overallNotes) {
|
|
715
|
+
lines.push("", `Overall notes: ${assessed.overallNotes}`);
|
|
716
|
+
}
|
|
717
|
+
|
|
718
|
+
return lines.join("\n");
|
|
719
|
+
}
|
|
720
|
+
|
|
721
|
+
async function runSpecAssessmentSubprocess(
|
|
722
|
+
input: SpecAssessmentRunnerInput,
|
|
723
|
+
): Promise<SpecAssessmentRunnerOutput> {
|
|
724
|
+
const prompt = [
|
|
725
|
+
"You are performing a read-only OpenSpec compliance assessment.",
|
|
726
|
+
"Operate in read-only plan mode. Never call edit, write, or any workspace-mutating command.",
|
|
727
|
+
"Inspect the repository and determine whether the implementation satisfies every OpenSpec scenario below.",
|
|
728
|
+
"Return ONLY a JSON object with this exact shape:",
|
|
729
|
+
"{",
|
|
730
|
+
' "summary": { "total": number, "pass": number, "fail": number, "unclear": number },',
|
|
731
|
+
' "scenarios": [',
|
|
732
|
+
' { "domain": string, "requirement": string, "scenario": string, "status": "PASS"|"FAIL"|"UNCLEAR", "evidence": string[], "notes"?: string }',
|
|
733
|
+
" ],",
|
|
734
|
+
' "changedFiles": string[],',
|
|
735
|
+
' "constraints": string[],',
|
|
736
|
+
' "overallNotes"?: string',
|
|
737
|
+
"}",
|
|
738
|
+
"Rules:",
|
|
739
|
+
`- Emit exactly ${input.expectedScenarioCount} scenario entries.`,
|
|
740
|
+
"- Use FAIL when the code clearly contradicts or omits the scenario.",
|
|
741
|
+
"- Use UNCLEAR only when code inspection cannot safely prove PASS or FAIL.",
|
|
742
|
+
"- Evidence must cite concrete files, symbols, or line references when possible.",
|
|
743
|
+
"- changedFiles should list files that would need modification if the result reopens work.",
|
|
744
|
+
"- constraints should list newly discovered implementation constraints.",
|
|
745
|
+
"- Do not wrap the JSON in explanatory prose.",
|
|
746
|
+
"",
|
|
747
|
+
`Change: ${input.changeName}`,
|
|
748
|
+
"",
|
|
749
|
+
"## Acceptance Criteria",
|
|
750
|
+
"",
|
|
751
|
+
input.scenarioText,
|
|
752
|
+
"",
|
|
753
|
+
...input.designContext,
|
|
754
|
+
...input.apiContractContext,
|
|
755
|
+
...(input.diffContent ? ["### Recent Changes", "", "```diff", input.diffContent, "```", ""] : []),
|
|
756
|
+
].join("\n");
|
|
757
|
+
|
|
758
|
+
const args = ["--mode", "json", "--plan", "-p", "--no-session"];
|
|
759
|
+
if (input.modelId) args.push("--model", input.modelId);
|
|
760
|
+
|
|
761
|
+
return await new Promise<SpecAssessmentRunnerOutput>((resolve, reject) => {
|
|
762
|
+
const proc = spawn("pi", args, {
|
|
763
|
+
cwd: input.repoPath,
|
|
764
|
+
shell: false,
|
|
765
|
+
stdio: ["pipe", "pipe", "pipe"],
|
|
766
|
+
env: {
|
|
767
|
+
...process.env,
|
|
768
|
+
PI_CHILD: "1",
|
|
769
|
+
TERM: process.env.TERM ?? "dumb",
|
|
770
|
+
},
|
|
771
|
+
});
|
|
772
|
+
let stdout = "";
|
|
773
|
+
let stderr = "";
|
|
774
|
+
let buffer = "";
|
|
775
|
+
let assistantText = "";
|
|
776
|
+
let settled = false;
|
|
777
|
+
const settleReject = (error: Error) => {
|
|
778
|
+
if (settled) return;
|
|
779
|
+
settled = true;
|
|
780
|
+
clearTimeout(timer);
|
|
781
|
+
reject(error);
|
|
782
|
+
};
|
|
783
|
+
const settleResolve = (value: SpecAssessmentRunnerOutput) => {
|
|
784
|
+
if (settled) return;
|
|
785
|
+
settled = true;
|
|
786
|
+
clearTimeout(timer);
|
|
787
|
+
resolve(value);
|
|
788
|
+
};
|
|
789
|
+
const timer = setTimeout(() => {
|
|
790
|
+
proc.kill("SIGTERM");
|
|
791
|
+
setTimeout(() => {
|
|
792
|
+
if (!proc.killed) proc.kill("SIGKILL");
|
|
793
|
+
}, 5_000);
|
|
794
|
+
settleReject(new Error(`Timed out after 120s while assessing ${input.changeName}.`));
|
|
795
|
+
}, 120_000);
|
|
796
|
+
|
|
797
|
+
const processLine = (line: string) => {
|
|
798
|
+
if (!line.trim()) return;
|
|
799
|
+
stdout += line + "\n";
|
|
800
|
+
let event: unknown;
|
|
801
|
+
try {
|
|
802
|
+
event = JSON.parse(line);
|
|
803
|
+
} catch {
|
|
804
|
+
return;
|
|
805
|
+
}
|
|
806
|
+
if (!event || typeof event !== "object") return;
|
|
807
|
+
const typed = event as { type?: string; message?: { role?: string; content?: unknown } };
|
|
808
|
+
if (typed.type === "message_end" && typed.message?.role === "assistant") {
|
|
809
|
+
assistantText = extractAssistantText(typed.message.content);
|
|
810
|
+
}
|
|
811
|
+
};
|
|
812
|
+
|
|
813
|
+
proc.stdout.on("data", (data) => {
|
|
814
|
+
buffer += data.toString();
|
|
815
|
+
const lines = buffer.split("\n");
|
|
816
|
+
buffer = lines.pop() || "";
|
|
817
|
+
for (const line of lines) processLine(line);
|
|
818
|
+
});
|
|
819
|
+
proc.stderr.on("data", (data) => {
|
|
820
|
+
stderr += data.toString();
|
|
821
|
+
});
|
|
822
|
+
proc.on("error", (error) => {
|
|
823
|
+
settleReject(error);
|
|
824
|
+
});
|
|
825
|
+
proc.on("close", (code) => {
|
|
826
|
+
if (buffer.trim()) processLine(buffer.trim());
|
|
827
|
+
if ((code ?? 1) !== 0) {
|
|
828
|
+
settleReject(new Error(stderr.trim() || `Assessment subprocess exited with code ${code ?? 1}.`));
|
|
829
|
+
return;
|
|
830
|
+
}
|
|
831
|
+
const sourceText = assistantText || stdout;
|
|
832
|
+
const jsonText = extractJsonObject(sourceText);
|
|
833
|
+
if (!jsonText) {
|
|
834
|
+
settleReject(new Error(`Assessment subprocess did not return parseable JSON.\n${stderr || stdout}`));
|
|
835
|
+
return;
|
|
836
|
+
}
|
|
837
|
+
try {
|
|
838
|
+
const parsed = JSON.parse(jsonText) as AssessSpecAgentResult;
|
|
839
|
+
settleResolve({
|
|
840
|
+
assessed: normalizeSpecAssessment(parsed, input.expectedScenarioCount),
|
|
841
|
+
});
|
|
842
|
+
} catch (error) {
|
|
843
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
844
|
+
settleReject(new Error(`Assessment JSON was invalid: ${message}`));
|
|
845
|
+
}
|
|
846
|
+
});
|
|
847
|
+
|
|
848
|
+
proc.stdin.write(prompt);
|
|
849
|
+
proc.stdin.end();
|
|
850
|
+
});
|
|
851
|
+
}
|
|
852
|
+
|
|
853
|
+
function applyAssessEffects(pi: ExtensionAPI, result: AssessStructuredResult): void {
|
|
854
|
+
for (const effect of result.effects) {
|
|
855
|
+
if (effect.type === "view") {
|
|
856
|
+
pi.sendMessage({
|
|
857
|
+
customType: "view",
|
|
858
|
+
content: effect.content,
|
|
859
|
+
display: effect.display ?? true,
|
|
860
|
+
});
|
|
861
|
+
continue;
|
|
862
|
+
}
|
|
863
|
+
if (effect.type === "follow_up") {
|
|
864
|
+
pi.sendUserMessage(effect.content, { deliverAs: "followUp" });
|
|
865
|
+
}
|
|
866
|
+
}
|
|
867
|
+
}
|
|
868
|
+
|
|
869
|
+
async function collectAssessDiffContext(
|
|
870
|
+
pi: ExtensionAPI,
|
|
871
|
+
cwd: string,
|
|
872
|
+
ref: string,
|
|
873
|
+
fallbackToUnstaged: boolean,
|
|
874
|
+
): Promise<AssessDiffContext | null> {
|
|
875
|
+
let effectiveRef = ref;
|
|
876
|
+
let diffStat = "";
|
|
877
|
+
let diffContent = "";
|
|
878
|
+
let recentLog = "";
|
|
879
|
+
|
|
880
|
+
if (!effectiveRef && fallbackToUnstaged) {
|
|
881
|
+
for (const candidate of ["HEAD~3", "HEAD~2", "HEAD~1"]) {
|
|
882
|
+
try {
|
|
883
|
+
const test = await pi.exec("git", ["rev-parse", "--verify", candidate], { cwd, timeout: 3_000 });
|
|
884
|
+
if (test.code === 0) {
|
|
885
|
+
effectiveRef = candidate;
|
|
886
|
+
break;
|
|
887
|
+
}
|
|
888
|
+
} catch {
|
|
889
|
+
/* try next */
|
|
890
|
+
}
|
|
891
|
+
}
|
|
892
|
+
}
|
|
893
|
+
|
|
894
|
+
if (effectiveRef) {
|
|
895
|
+
try {
|
|
896
|
+
const stat = await pi.exec("git", ["diff", "--stat", effectiveRef], { cwd, timeout: 5_000 });
|
|
897
|
+
diffStat = stat.stdout.trim();
|
|
898
|
+
const diff = await pi.exec("git", ["diff", effectiveRef], { cwd, timeout: 10_000 });
|
|
899
|
+
diffContent = diff.stdout.slice(0, 40_000);
|
|
900
|
+
const log = await pi.exec("git", ["log", "--oneline", "-10"], { cwd, timeout: 5_000 });
|
|
901
|
+
recentLog = log.stdout.trim();
|
|
902
|
+
} catch {
|
|
903
|
+
/* fall through */
|
|
904
|
+
}
|
|
905
|
+
}
|
|
906
|
+
|
|
907
|
+
if (!diffStat && !diffContent && fallbackToUnstaged) {
|
|
908
|
+
try {
|
|
909
|
+
const stat = await pi.exec("git", ["diff", "--stat"], { cwd, timeout: 5_000 });
|
|
910
|
+
diffStat = stat.stdout.trim();
|
|
911
|
+
const diff = await pi.exec("git", ["diff"], { cwd, timeout: 10_000 });
|
|
912
|
+
diffContent = diff.stdout.slice(0, 40_000);
|
|
913
|
+
effectiveRef = "unstaged";
|
|
914
|
+
} catch {
|
|
915
|
+
/* proceed without git diff */
|
|
916
|
+
}
|
|
917
|
+
}
|
|
918
|
+
|
|
919
|
+
if (!diffStat && !diffContent) return null;
|
|
920
|
+
return { ref: effectiveRef, diffStat, diffContent, recentLog };
|
|
921
|
+
}
|
|
922
|
+
|
|
923
|
+
function buildGuardrailPreamble(cwd: string): string {
|
|
924
|
+
try {
|
|
925
|
+
const checks = discoverGuardrails(cwd);
|
|
926
|
+
if (checks.length === 0) return "";
|
|
927
|
+
const suite = runGuardrails(cwd, checks);
|
|
928
|
+
return formatGuardrailResults(suite);
|
|
929
|
+
} catch {
|
|
930
|
+
return "";
|
|
931
|
+
}
|
|
932
|
+
}
|
|
933
|
+
|
|
934
|
+
async function executeAssessCleave(
|
|
935
|
+
pi: ExtensionAPI,
|
|
936
|
+
ctx: AssessExecutionContext,
|
|
937
|
+
args: string,
|
|
938
|
+
): Promise<AssessStructuredResult> {
|
|
939
|
+
const diffContext = await collectAssessDiffContext(pi, ctx.cwd, args.trim(), true);
|
|
940
|
+
if (!diffContext) {
|
|
941
|
+
return makeAssessResult({
|
|
942
|
+
subcommand: "cleave",
|
|
943
|
+
args,
|
|
944
|
+
ok: false,
|
|
945
|
+
summary: "No recent changes found",
|
|
946
|
+
humanText: "No recent changes found. Nothing to assess.",
|
|
947
|
+
data: { reason: "no_changes" },
|
|
948
|
+
effects: [{ type: "view", content: "No recent changes found. Nothing to assess." }],
|
|
949
|
+
});
|
|
950
|
+
}
|
|
951
|
+
|
|
952
|
+
const activeOpenSpec = getActiveChangesStatus(ctx.cwd)
|
|
953
|
+
.filter((status) => status.totalTasks > 0)
|
|
954
|
+
.sort((a, b) => b.lastModifiedMs - a.lastModifiedMs);
|
|
955
|
+
const targetChange = activeOpenSpec[0]?.name;
|
|
956
|
+
const lifecycle = targetChange
|
|
957
|
+
? { changeName: targetChange, assessmentKind: "cleave" as const, outcomes: ["pass", "reopen", "ambiguous"] as const }
|
|
958
|
+
: undefined;
|
|
959
|
+
const postAssessInstruction = targetChange
|
|
960
|
+
? [
|
|
961
|
+
"",
|
|
962
|
+
`After review/fixes/tests, call \`openspec_manage\` with action \`reconcile_after_assess\`, change_name \`${targetChange}\`, assessment_kind \`cleave\`, and outcome:`,
|
|
963
|
+
"- `pass` if all Critical/Warning work is resolved cleanly",
|
|
964
|
+
"- `reopen` if remaining work or follow-up fixes reopen implementation",
|
|
965
|
+
"- `ambiguous` if you cannot safely map reviewer findings back to task state",
|
|
966
|
+
"Include `changed_files` for any follow-up fix files and `constraints` for new implementation constraints discovered during review.",
|
|
967
|
+
]
|
|
968
|
+
: [];
|
|
969
|
+
const guardrailPreamble = buildGuardrailPreamble(ctx.cwd);
|
|
970
|
+
const prompt = [
|
|
971
|
+
"## Adversarial Review → Auto-Fix Pipeline",
|
|
972
|
+
"",
|
|
973
|
+
"You are doing an adversarial code review of recent changes.",
|
|
974
|
+
"Your job is to find real issues, then fix them automatically.",
|
|
975
|
+
"",
|
|
976
|
+
...(guardrailPreamble ? [
|
|
977
|
+
"### Deterministic Analysis",
|
|
978
|
+
"",
|
|
979
|
+
guardrailPreamble,
|
|
980
|
+
"",
|
|
981
|
+
"The above are compiler/linter findings — treat failures as Critical issues.",
|
|
982
|
+
"",
|
|
983
|
+
] : []),
|
|
984
|
+
"### Step 1: Review",
|
|
985
|
+
"",
|
|
986
|
+
"Analyze these recent changes for:",
|
|
987
|
+
"- **Critical bugs**: logic errors, race conditions, missing error handling",
|
|
988
|
+
"- **Warnings**: misleading names, missing edge cases, fragile patterns",
|
|
989
|
+
"- **Nits**: dead code, style inconsistencies (low priority)",
|
|
990
|
+
"",
|
|
991
|
+
"Recent commits:",
|
|
992
|
+
"```",
|
|
993
|
+
diffContext.recentLog,
|
|
994
|
+
"```",
|
|
995
|
+
"",
|
|
996
|
+
"Diff stat:",
|
|
997
|
+
"```",
|
|
998
|
+
diffContext.diffStat,
|
|
999
|
+
"```",
|
|
1000
|
+
"",
|
|
1001
|
+
"Full diff (truncated to 40KB):",
|
|
1002
|
+
"```diff",
|
|
1003
|
+
diffContext.diffContent,
|
|
1004
|
+
"```",
|
|
1005
|
+
"",
|
|
1006
|
+
"### Step 2: Categorize",
|
|
1007
|
+
"",
|
|
1008
|
+
"Present findings as a numbered list grouped by severity:",
|
|
1009
|
+
"- **C1, C2...** for critical issues",
|
|
1010
|
+
"- **W1, W2...** for warnings",
|
|
1011
|
+
"- **N1, N2...** for nits",
|
|
1012
|
+
"",
|
|
1013
|
+
"### Step 3: Fix",
|
|
1014
|
+
"",
|
|
1015
|
+
"After presenting the list, **immediately fix all Critical and Warning issues**.",
|
|
1016
|
+
"Do NOT wait for confirmation — the user invoked `/assess cleave` which means",
|
|
1017
|
+
'"assess and fix in one shot". Work through C and W items systematically.',
|
|
1018
|
+
"Nits are optional — fix them if trivial, skip if not.",
|
|
1019
|
+
"",
|
|
1020
|
+
"After all fixes, run the test suite to verify nothing broke.",
|
|
1021
|
+
"Then commit with a conventional commit message summarizing all fixes.",
|
|
1022
|
+
...postAssessInstruction,
|
|
1023
|
+
].join("\n");
|
|
1024
|
+
const lifecycleRecord = targetChange
|
|
1025
|
+
? await buildLifecycleRecord(pi, ctx.cwd, {
|
|
1026
|
+
changeName: targetChange,
|
|
1027
|
+
assessmentKind: "cleave",
|
|
1028
|
+
outcome: "ambiguous",
|
|
1029
|
+
recommendedAction: `Run openspec_manage reconcile_after_assess ${targetChange} with outcome pass, reopen, or ambiguous after review completes.`,
|
|
1030
|
+
})
|
|
1031
|
+
: undefined;
|
|
1032
|
+
const humanText = [
|
|
1033
|
+
"**Assess → Cleave pipeline starting...**",
|
|
1034
|
+
"",
|
|
1035
|
+
`Reviewing changes since \`${diffContext.ref}\`:`,
|
|
1036
|
+
"```",
|
|
1037
|
+
diffContext.diffStat,
|
|
1038
|
+
"```",
|
|
1039
|
+
].join("\n");
|
|
1040
|
+
const nextSteps = ["Review findings", "Apply all Critical and Warning fixes", "Run verification and reconcile lifecycle state if needed"];
|
|
1041
|
+
return makeAssessResult({
|
|
1042
|
+
subcommand: "cleave",
|
|
1043
|
+
args,
|
|
1044
|
+
ok: true,
|
|
1045
|
+
summary: `Prepared adversarial review for ${diffContext.ref}`,
|
|
1046
|
+
humanText,
|
|
1047
|
+
data: {
|
|
1048
|
+
ref: diffContext.ref,
|
|
1049
|
+
diffStat: diffContext.diffStat,
|
|
1050
|
+
recentLog: diffContext.recentLog,
|
|
1051
|
+
hasGuardrails: Boolean(guardrailPreamble),
|
|
1052
|
+
reconcileChange: targetChange ?? null,
|
|
1053
|
+
snapshot: lifecycleRecord?.snapshot ?? null,
|
|
1054
|
+
},
|
|
1055
|
+
effects: [
|
|
1056
|
+
{ type: "view", content: humanText },
|
|
1057
|
+
{ type: "follow_up", content: prompt },
|
|
1058
|
+
...(lifecycle ? [{ type: "reconcile_hint" as const, ...lifecycle }] : []),
|
|
1059
|
+
],
|
|
1060
|
+
nextSteps,
|
|
1061
|
+
lifecycle,
|
|
1062
|
+
lifecycleRecord,
|
|
1063
|
+
});
|
|
1064
|
+
}
|
|
1065
|
+
|
|
1066
|
+
async function executeAssessDiff(
|
|
1067
|
+
pi: ExtensionAPI,
|
|
1068
|
+
ctx: AssessExecutionContext,
|
|
1069
|
+
args: string,
|
|
1070
|
+
): Promise<AssessStructuredResult> {
|
|
1071
|
+
const requestedRef = args.trim() || "HEAD~1";
|
|
1072
|
+
const diffContext = await collectAssessDiffContext(pi, ctx.cwd, requestedRef, false);
|
|
1073
|
+
if (!diffContext) {
|
|
1074
|
+
const humanText = `No changes found relative to \`${requestedRef}\`.`;
|
|
1075
|
+
return makeAssessResult({
|
|
1076
|
+
subcommand: "diff",
|
|
1077
|
+
args,
|
|
1078
|
+
ok: false,
|
|
1079
|
+
summary: `No diff found for ${requestedRef}`,
|
|
1080
|
+
humanText,
|
|
1081
|
+
data: { ref: requestedRef, reason: "no_changes" },
|
|
1082
|
+
effects: [{ type: "view", content: humanText }],
|
|
1083
|
+
});
|
|
1084
|
+
}
|
|
1085
|
+
|
|
1086
|
+
const guardrailPreamble = buildGuardrailPreamble(ctx.cwd);
|
|
1087
|
+
const humanText = [
|
|
1088
|
+
`**Assessing diff since \`${diffContext.ref}\`...**`,
|
|
1089
|
+
"```",
|
|
1090
|
+
diffContext.diffStat,
|
|
1091
|
+
"```",
|
|
1092
|
+
].join("\n");
|
|
1093
|
+
const prompt = [
|
|
1094
|
+
`## Code Review: diff since \`${diffContext.ref}\``,
|
|
1095
|
+
"",
|
|
1096
|
+
"Do an adversarial code review of these changes.",
|
|
1097
|
+
"Find bugs, fragile patterns, missing edge cases, and style issues.",
|
|
1098
|
+
"",
|
|
1099
|
+
...(guardrailPreamble ? [
|
|
1100
|
+
"### Deterministic Analysis",
|
|
1101
|
+
"",
|
|
1102
|
+
guardrailPreamble,
|
|
1103
|
+
"",
|
|
1104
|
+
"The above are compiler/linter findings — treat failures as Critical issues.",
|
|
1105
|
+
"",
|
|
1106
|
+
] : []),
|
|
1107
|
+
"Categorize findings as:",
|
|
1108
|
+
"- **C1, C2...** Critical (logic errors, security, data loss)",
|
|
1109
|
+
"- **W1, W2...** Warning (fragile, misleading, missing cases)",
|
|
1110
|
+
"- **N1, N2...** Nit (style, dead code, minor)",
|
|
1111
|
+
"",
|
|
1112
|
+
"Diff stat:",
|
|
1113
|
+
"```",
|
|
1114
|
+
diffContext.diffStat,
|
|
1115
|
+
"```",
|
|
1116
|
+
"",
|
|
1117
|
+
"```diff",
|
|
1118
|
+
diffContext.diffContent,
|
|
1119
|
+
"```",
|
|
1120
|
+
"",
|
|
1121
|
+
"Present findings only — do NOT fix anything unless I ask.",
|
|
1122
|
+
].join("\n");
|
|
1123
|
+
return makeAssessResult({
|
|
1124
|
+
subcommand: "diff",
|
|
1125
|
+
args,
|
|
1126
|
+
ok: true,
|
|
1127
|
+
summary: `Prepared review for ${diffContext.ref}`,
|
|
1128
|
+
humanText,
|
|
1129
|
+
data: {
|
|
1130
|
+
ref: diffContext.ref,
|
|
1131
|
+
diffStat: diffContext.diffStat,
|
|
1132
|
+
hasGuardrails: Boolean(guardrailPreamble),
|
|
1133
|
+
},
|
|
1134
|
+
effects: [
|
|
1135
|
+
{ type: "view", content: humanText },
|
|
1136
|
+
{ type: "follow_up", content: prompt },
|
|
1137
|
+
],
|
|
1138
|
+
nextSteps: ["Read the review findings", "Decide whether to fix issues or continue implementation"],
|
|
1139
|
+
});
|
|
1140
|
+
}
|
|
1141
|
+
|
|
1142
|
+
async function executeAssessSpec(
|
|
1143
|
+
pi: ExtensionAPI,
|
|
1144
|
+
ctx: AssessExecutionContext,
|
|
1145
|
+
args: string,
|
|
1146
|
+
overrides?: AssessExecutorOverrides,
|
|
1147
|
+
): Promise<AssessStructuredResult> {
|
|
1148
|
+
const repoPath = ctx.cwd;
|
|
1149
|
+
const openspecDir = detectOpenSpec(repoPath);
|
|
1150
|
+
if (!openspecDir) {
|
|
1151
|
+
const humanText = "No `openspec/` directory found. Nothing to assess against.";
|
|
1152
|
+
return makeAssessResult({
|
|
1153
|
+
subcommand: "spec",
|
|
1154
|
+
args,
|
|
1155
|
+
ok: false,
|
|
1156
|
+
summary: "OpenSpec directory not found",
|
|
1157
|
+
humanText,
|
|
1158
|
+
data: { reason: "openspec_missing" },
|
|
1159
|
+
effects: [{ type: "view", content: humanText }],
|
|
1160
|
+
});
|
|
1161
|
+
}
|
|
1162
|
+
|
|
1163
|
+
const changes = findExecutableChanges(openspecDir);
|
|
1164
|
+
if (changes.length === 0) {
|
|
1165
|
+
const humanText = "No OpenSpec changes with tasks.md found.";
|
|
1166
|
+
return makeAssessResult({
|
|
1167
|
+
subcommand: "spec",
|
|
1168
|
+
args,
|
|
1169
|
+
ok: false,
|
|
1170
|
+
summary: "No executable OpenSpec changes found",
|
|
1171
|
+
humanText,
|
|
1172
|
+
data: { reason: "no_executable_changes" },
|
|
1173
|
+
effects: [{ type: "view", content: humanText }],
|
|
1174
|
+
});
|
|
1175
|
+
}
|
|
1176
|
+
|
|
1177
|
+
const requestedChange = args.trim();
|
|
1178
|
+
let target = requestedChange
|
|
1179
|
+
? changes.find((change) => change.name === requestedChange || change.name.includes(requestedChange))
|
|
1180
|
+
: null;
|
|
1181
|
+
if (!target) {
|
|
1182
|
+
const status = getActiveChangesStatus(repoPath);
|
|
1183
|
+
const withTasks = status.filter((entry) => entry.totalTasks > 0);
|
|
1184
|
+
if (withTasks.length > 0) {
|
|
1185
|
+
const byRecency = [...withTasks].sort((a, b) => b.lastModifiedMs - a.lastModifiedMs);
|
|
1186
|
+
target = changes.find((change) => change.name === byRecency[0].name) ?? changes[0];
|
|
1187
|
+
} else {
|
|
1188
|
+
target = changes[0];
|
|
1189
|
+
}
|
|
1190
|
+
}
|
|
1191
|
+
|
|
1192
|
+
const specCtx = buildOpenSpecContext(target.path);
|
|
1193
|
+
if (specCtx.specScenarios.length === 0) {
|
|
1194
|
+
const humanText = `Change \`${target.name}\` has no delta spec scenarios to assess against.\n\nUse \`/assess diff\` for general code review instead.`;
|
|
1195
|
+
return makeAssessResult({
|
|
1196
|
+
subcommand: "spec",
|
|
1197
|
+
args,
|
|
1198
|
+
ok: false,
|
|
1199
|
+
summary: `No delta scenarios found for ${target.name}`,
|
|
1200
|
+
humanText,
|
|
1201
|
+
data: { changeName: target.name, reason: "no_scenarios" },
|
|
1202
|
+
effects: [{ type: "view", content: humanText }],
|
|
1203
|
+
});
|
|
1204
|
+
}
|
|
1205
|
+
|
|
1206
|
+
const scenarioText = specCtx.specScenarios.map((scenarioSet) => {
|
|
1207
|
+
const renderedScenarios = scenarioSet.scenarios.map((scenario) => {
|
|
1208
|
+
const lines = scenario.split("\n").map((line) => ` ${line}`).join("\n");
|
|
1209
|
+
return lines;
|
|
1210
|
+
}).join("\n");
|
|
1211
|
+
return `**${scenarioSet.domain} → ${scenarioSet.requirement}**\n${renderedScenarios}`;
|
|
1212
|
+
}).join("\n\n");
|
|
1213
|
+
|
|
1214
|
+
let diffContent = "";
|
|
1215
|
+
try {
|
|
1216
|
+
const diff = await pi.exec("git", ["diff", "HEAD~5", "--", "."], { cwd: repoPath, timeout: 10_000 });
|
|
1217
|
+
diffContent = diff.stdout.slice(0, 30_000);
|
|
1218
|
+
} catch {
|
|
1219
|
+
try {
|
|
1220
|
+
const diff = await pi.exec("git", ["diff", "--", "."], { cwd: repoPath, timeout: 10_000 });
|
|
1221
|
+
diffContent = diff.stdout.slice(0, 30_000);
|
|
1222
|
+
} catch {
|
|
1223
|
+
/* proceed without diff */
|
|
1224
|
+
}
|
|
1225
|
+
}
|
|
1226
|
+
|
|
1227
|
+
const designContext = specCtx.decisions.length > 0
|
|
1228
|
+
? [
|
|
1229
|
+
"### Design Decisions",
|
|
1230
|
+
"",
|
|
1231
|
+
"The implementation should also reflect these decisions from design.md:",
|
|
1232
|
+
"",
|
|
1233
|
+
...specCtx.decisions.map((decision) => `- ${decision}`),
|
|
1234
|
+
"",
|
|
1235
|
+
]
|
|
1236
|
+
: [];
|
|
1237
|
+
const apiContractContext = specCtx.apiContract
|
|
1238
|
+
? [
|
|
1239
|
+
"### API Contract",
|
|
1240
|
+
"",
|
|
1241
|
+
"The implementation must conform to this OpenAPI/AsyncAPI contract (`api.yaml`).",
|
|
1242
|
+
"Verify that:",
|
|
1243
|
+
"- All paths/methods defined in the contract are implemented",
|
|
1244
|
+
"- Request/response schemas match the contract exactly",
|
|
1245
|
+
"- Status codes and error responses match the contract",
|
|
1246
|
+
"- Security schemes are applied as specified",
|
|
1247
|
+
"- Any endpoint in the code but NOT in the contract is flagged as undocumented",
|
|
1248
|
+
"",
|
|
1249
|
+
"```yaml",
|
|
1250
|
+
specCtx.apiContract.length > 15_000
|
|
1251
|
+
? specCtx.apiContract.slice(0, 15_000) + "\n# ... (truncated)"
|
|
1252
|
+
: specCtx.apiContract,
|
|
1253
|
+
"```",
|
|
1254
|
+
"",
|
|
1255
|
+
]
|
|
1256
|
+
: [];
|
|
1257
|
+
const lifecycle: AssessLifecycleHint = {
|
|
1258
|
+
changeName: target.name,
|
|
1259
|
+
assessmentKind: "spec",
|
|
1260
|
+
outcomes: ["pass", "reopen", "ambiguous"],
|
|
1261
|
+
};
|
|
1262
|
+
const totalScenarioCount = countSpecScenarios(specCtx);
|
|
1263
|
+
const introText = [
|
|
1264
|
+
`**Spec Assessment: \`${target.name}\`**`,
|
|
1265
|
+
"",
|
|
1266
|
+
`Evaluating implementation against ${totalScenarioCount} spec scenarios`
|
|
1267
|
+
+ (specCtx.decisions.length > 0 ? ` and ${specCtx.decisions.length} design decisions` : "")
|
|
1268
|
+
+ (specCtx.apiContract ? " and API contract (`api.yaml`)" : "")
|
|
1269
|
+
+ "...",
|
|
1270
|
+
].join("\n");
|
|
1271
|
+
const prompt = [
|
|
1272
|
+
`## Spec-Driven Assessment: \`${target.name}\``,
|
|
1273
|
+
"",
|
|
1274
|
+
"Assess whether the current implementation satisfies these OpenSpec scenarios.",
|
|
1275
|
+
"For each scenario, determine: **PASS**, **FAIL**, or **UNCLEAR**.",
|
|
1276
|
+
...(specCtx.apiContract ? ["", "Also verify implementation conformance to the API contract."] : []),
|
|
1277
|
+
"",
|
|
1278
|
+
"### Acceptance Criteria",
|
|
1279
|
+
"",
|
|
1280
|
+
scenarioText,
|
|
1281
|
+
"",
|
|
1282
|
+
...designContext,
|
|
1283
|
+
...apiContractContext,
|
|
1284
|
+
"### Instructions",
|
|
1285
|
+
"",
|
|
1286
|
+
"1. Read the relevant source files to check each scenario",
|
|
1287
|
+
...(specCtx.apiContract ? [" - Also check route definitions, schemas, and status codes against the API contract"] : []),
|
|
1288
|
+
"2. For each scenario, report:",
|
|
1289
|
+
" - **PASS** — implementation clearly satisfies the Given/When/Then",
|
|
1290
|
+
" - **FAIL** — implementation contradicts or is missing",
|
|
1291
|
+
" - **UNCLEAR** — can't determine without running tests",
|
|
1292
|
+
"3. Summarize with a count: N/M scenarios passing",
|
|
1293
|
+
"4. For any FAIL items, explain what's wrong and suggest fixes",
|
|
1294
|
+
"5. Do NOT auto-fix — this is assessment only",
|
|
1295
|
+
`6. After the assessment, if the result reopens work or reveals new constraints/file-scope drift, call \`openspec_manage\` with action \`reconcile_after_assess\`, change_name \`${target.name}\`, assessment_kind \`spec\`, and outcome \`reopen\` or \`ambiguous\` as appropriate. If all scenarios pass cleanly, call it with outcome \`pass\` to refresh lifecycle state.`,
|
|
1296
|
+
...(diffContent ? ["", "### Recent Changes (for context)", "", "```diff", diffContent, "```"] : []),
|
|
1297
|
+
].join("\n");
|
|
1298
|
+
|
|
1299
|
+
if (isInteractiveAssessContext(ctx)) {
|
|
1300
|
+
const lifecycleRecord = await buildLifecycleRecord(pi, ctx.cwd, {
|
|
1301
|
+
changeName: target.name,
|
|
1302
|
+
assessmentKind: "spec",
|
|
1303
|
+
outcome: "ambiguous",
|
|
1304
|
+
recommendedAction: `Run openspec_manage reconcile_after_assess ${target.name} with outcome pass, reopen, or ambiguous after scenario evaluation completes.`,
|
|
1305
|
+
});
|
|
1306
|
+
return makeAssessResult({
|
|
1307
|
+
subcommand: "spec",
|
|
1308
|
+
args,
|
|
1309
|
+
ok: true,
|
|
1310
|
+
summary: `Prepared spec assessment for ${target.name}`,
|
|
1311
|
+
humanText: introText,
|
|
1312
|
+
data: {
|
|
1313
|
+
changeName: target.name,
|
|
1314
|
+
scenarioCount: totalScenarioCount,
|
|
1315
|
+
decisionCount: specCtx.decisions.length,
|
|
1316
|
+
hasApiContract: Boolean(specCtx.apiContract),
|
|
1317
|
+
snapshot: lifecycleRecord.snapshot,
|
|
1318
|
+
},
|
|
1319
|
+
effects: [
|
|
1320
|
+
{ type: "view", content: introText },
|
|
1321
|
+
{ type: "follow_up", content: prompt },
|
|
1322
|
+
{ type: "reconcile_hint" as const, ...lifecycle },
|
|
1323
|
+
],
|
|
1324
|
+
nextSteps: ["Assess each scenario", "Reconcile lifecycle state based on the assessment outcome"],
|
|
1325
|
+
completion: { completed: false, completedInBand: false, requiresFollowUp: true },
|
|
1326
|
+
lifecycle,
|
|
1327
|
+
lifecycleRecord,
|
|
1328
|
+
});
|
|
1329
|
+
}
|
|
1330
|
+
|
|
1331
|
+
const runSpecAssessment = overrides?.runSpecAssessment ?? runSpecAssessmentSubprocess;
|
|
1332
|
+
const completed = await runSpecAssessment({
|
|
1333
|
+
repoPath,
|
|
1334
|
+
changeName: target.name,
|
|
1335
|
+
scenarioText,
|
|
1336
|
+
designContext,
|
|
1337
|
+
apiContractContext,
|
|
1338
|
+
diffContent,
|
|
1339
|
+
expectedScenarioCount: totalScenarioCount,
|
|
1340
|
+
modelId: ctx.model?.id,
|
|
1341
|
+
});
|
|
1342
|
+
const assessed = normalizeSpecAssessment(completed.assessed, totalScenarioCount);
|
|
1343
|
+
const outcome = determineSpecOutcome(assessed.summary);
|
|
1344
|
+
const snapshot = completed.snapshot ?? await collectAssessmentSnapshot(pi, ctx.cwd);
|
|
1345
|
+
const recommendedAction = `Call openspec_manage reconcile_after_assess for ${target.name} with assessment_kind spec and outcome ${outcome}.`;
|
|
1346
|
+
const lifecycleRecord = await buildLifecycleRecord(pi, ctx.cwd, {
|
|
1347
|
+
changeName: target.name,
|
|
1348
|
+
assessmentKind: "spec",
|
|
1349
|
+
outcome,
|
|
1350
|
+
recommendedAction,
|
|
1351
|
+
changedFiles: assessed.changedFiles,
|
|
1352
|
+
constraints: assessed.constraints,
|
|
1353
|
+
snapshot,
|
|
1354
|
+
});
|
|
1355
|
+
const humanText = buildSpecAssessmentHumanText(target.name, assessed, outcome);
|
|
1356
|
+
const summary = `Completed spec assessment for ${target.name}: ${assessed.summary.pass}/${assessed.summary.total} pass, ${assessed.summary.fail} fail, ${assessed.summary.unclear} unclear`;
|
|
1357
|
+
const nextSteps = [
|
|
1358
|
+
`Call openspec_manage reconcile_after_assess for ${target.name} with outcome ${outcome}`,
|
|
1359
|
+
...(outcome === "pass" ? [`If archive gates clear, run /opsx:archive ${target.name}`] : ["Address findings before archive"]),
|
|
1360
|
+
];
|
|
1361
|
+
return makeAssessResult({
|
|
1362
|
+
subcommand: "spec",
|
|
1363
|
+
args,
|
|
1364
|
+
ok: true,
|
|
1365
|
+
summary,
|
|
1366
|
+
humanText,
|
|
1367
|
+
data: {
|
|
1368
|
+
changeName: target.name,
|
|
1369
|
+
outcome,
|
|
1370
|
+
scenarioSummary: assessed.summary,
|
|
1371
|
+
scenarios: assessed.scenarios,
|
|
1372
|
+
changedFiles: assessed.changedFiles ?? [],
|
|
1373
|
+
constraints: assessed.constraints ?? [],
|
|
1374
|
+
overallNotes: assessed.overallNotes ?? null,
|
|
1375
|
+
snapshot,
|
|
1376
|
+
recommendedReconcileOutcome: outcome,
|
|
1377
|
+
},
|
|
1378
|
+
effects: [{ type: "reconcile_hint" as const, ...lifecycle }],
|
|
1379
|
+
nextSteps,
|
|
1380
|
+
completion: { completed: true, completedInBand: true, requiresFollowUp: false, outcome },
|
|
1381
|
+
lifecycle,
|
|
1382
|
+
lifecycleRecord,
|
|
1383
|
+
});
|
|
1384
|
+
}
|
|
1385
|
+
|
|
1386
|
+
async function executeAssessComplexity(args: string): Promise<AssessStructuredResult> {
|
|
1387
|
+
const directive = args.trim();
|
|
1388
|
+
if (!directive) {
|
|
1389
|
+
const humanText = "Usage: `/assess complexity <directive>`\n\nAssess whether a task should be decomposed or executed directly.";
|
|
1390
|
+
return makeAssessResult({
|
|
1391
|
+
subcommand: "complexity",
|
|
1392
|
+
args,
|
|
1393
|
+
ok: false,
|
|
1394
|
+
summary: "Missing directive for complexity assessment",
|
|
1395
|
+
humanText,
|
|
1396
|
+
data: { reason: "missing_directive" },
|
|
1397
|
+
effects: [{ type: "view", content: humanText }],
|
|
1398
|
+
});
|
|
1399
|
+
}
|
|
1400
|
+
|
|
1401
|
+
const assessment = assessDirective(directive);
|
|
1402
|
+
const humanText = [
|
|
1403
|
+
formatAssessment(assessment),
|
|
1404
|
+
"",
|
|
1405
|
+
assessment.decision === "cleave"
|
|
1406
|
+
? `**→ Decomposition recommended.** Use \`/cleave ${directive}\` to proceed.`
|
|
1407
|
+
: assessment.decision === "execute"
|
|
1408
|
+
? "**→ Execute directly.** Task is below complexity threshold."
|
|
1409
|
+
: "**→ Manual assessment needed.** No pattern matched.",
|
|
1410
|
+
].join("\n");
|
|
1411
|
+
return makeAssessResult({
|
|
1412
|
+
subcommand: "complexity",
|
|
1413
|
+
args,
|
|
1414
|
+
ok: true,
|
|
1415
|
+
summary: `Complexity decision: ${assessment.decision}`,
|
|
1416
|
+
humanText,
|
|
1417
|
+
data: assessment,
|
|
1418
|
+
effects: [{ type: "view", content: humanText }],
|
|
1419
|
+
nextSteps: assessment.decision === "cleave" ? [`Run /cleave ${directive}`] : ["Execute directly"],
|
|
1420
|
+
});
|
|
1421
|
+
}
|
|
1422
|
+
|
|
1423
|
+
async function runDesignAssessmentSubprocess(
|
|
1424
|
+
repoPath: string,
|
|
1425
|
+
nodeId: string,
|
|
1426
|
+
modelId?: string,
|
|
1427
|
+
): Promise<{ findings: DesignAssessmentFinding[]; nodeTitle: string; structuralPass: boolean }> {
|
|
1428
|
+
const prompt = [
|
|
1429
|
+
"You are performing a read-only design-tree node assessment.",
|
|
1430
|
+
"Operate in read-only plan mode. Never call edit, write, or any workspace-mutating command.",
|
|
1431
|
+
"",
|
|
1432
|
+
`## Task`,
|
|
1433
|
+
"",
|
|
1434
|
+
`1. Call design_tree with action='node', node_id='${nodeId}' to load the node.`,
|
|
1435
|
+
"2. Run the structural pre-check:",
|
|
1436
|
+
" - open_questions must be empty — if not, emit a structural finding for each",
|
|
1437
|
+
" - decisions must have at least one entry — if not, emit a structural finding",
|
|
1438
|
+
" - acceptanceCriteria must have at least one scenario, falsifiability, or constraint — if not, emit a structural finding",
|
|
1439
|
+
"3. If structural pre-check fails, output ONLY the JSON result below and stop.",
|
|
1440
|
+
"4. Otherwise, evaluate each acceptance criterion against the document body:",
|
|
1441
|
+
" - For each Scenario (Given/When/Then): does the document body address the Then clause?",
|
|
1442
|
+
" - For each Falsifiability condition: is it addressed, ruled out, or acknowledged as a known risk?",
|
|
1443
|
+
" - For each Constraint: is it satisfied by the document content?",
|
|
1444
|
+
"",
|
|
1445
|
+
"## Output Format",
|
|
1446
|
+
"",
|
|
1447
|
+
"Output ONLY a single JSON object (no prose, no markdown, no code blocks):",
|
|
1448
|
+
"{",
|
|
1449
|
+
' "nodeTitle": "<title from node>",',
|
|
1450
|
+
' "structuralPass": true|false,',
|
|
1451
|
+
' "findings": [',
|
|
1452
|
+
' {"type":"scenario"|"falsifiability"|"constraint"|"structural","index":N,"pass":true|false,"finding":"<reason>"}',
|
|
1453
|
+
" ]",
|
|
1454
|
+
"}",
|
|
1455
|
+
].join("\n");
|
|
1456
|
+
|
|
1457
|
+
const args = ["--mode", "json", "--plan", "-p", "--no-session"];
|
|
1458
|
+
if (modelId) args.push("--model", modelId);
|
|
1459
|
+
|
|
1460
|
+
return await new Promise<{ findings: DesignAssessmentFinding[]; nodeTitle: string; structuralPass: boolean }>(
|
|
1461
|
+
(resolve, reject) => {
|
|
1462
|
+
const proc = spawn("pi", args, {
|
|
1463
|
+
cwd: repoPath,
|
|
1464
|
+
shell: false,
|
|
1465
|
+
stdio: ["pipe", "pipe", "pipe"],
|
|
1466
|
+
env: { ...process.env, PI_CHILD: "1", TERM: process.env.TERM ?? "dumb" },
|
|
1467
|
+
});
|
|
1468
|
+
let buffer = "";
|
|
1469
|
+
let assistantText = "";
|
|
1470
|
+
let settled = false;
|
|
1471
|
+
const settleReject = (error: Error) => {
|
|
1472
|
+
if (settled) return;
|
|
1473
|
+
settled = true;
|
|
1474
|
+
clearTimeout(timer);
|
|
1475
|
+
reject(error);
|
|
1476
|
+
};
|
|
1477
|
+
const settleResolve = (value: { findings: DesignAssessmentFinding[]; nodeTitle: string; structuralPass: boolean }) => {
|
|
1478
|
+
if (settled) return;
|
|
1479
|
+
settled = true;
|
|
1480
|
+
clearTimeout(timer);
|
|
1481
|
+
resolve(value);
|
|
1482
|
+
};
|
|
1483
|
+
const timer = setTimeout(() => {
|
|
1484
|
+
proc.kill("SIGTERM");
|
|
1485
|
+
setTimeout(() => { if (!proc.killed) proc.kill("SIGKILL"); }, 5_000);
|
|
1486
|
+
settleReject(new Error(`Timed out after 120s while assessing design node ${nodeId}.`));
|
|
1487
|
+
}, 120_000);
|
|
1488
|
+
const processLine = (line: string) => {
|
|
1489
|
+
if (!line.trim()) return;
|
|
1490
|
+
let event: unknown;
|
|
1491
|
+
try { event = JSON.parse(line); } catch { return; }
|
|
1492
|
+
if (!event || typeof event !== "object") return;
|
|
1493
|
+
const typed = event as { type?: string; message?: { role?: string; content?: unknown } };
|
|
1494
|
+
if (typed.type === "message_end" && typed.message?.role === "assistant") {
|
|
1495
|
+
assistantText = extractAssistantText(typed.message.content);
|
|
1496
|
+
}
|
|
1497
|
+
};
|
|
1498
|
+
proc.stdout.on("data", (data) => {
|
|
1499
|
+
buffer += (data as Buffer).toString();
|
|
1500
|
+
const lines = buffer.split("\n");
|
|
1501
|
+
buffer = lines.pop() || "";
|
|
1502
|
+
for (const line of lines) processLine(line);
|
|
1503
|
+
});
|
|
1504
|
+
let stderr = "";
|
|
1505
|
+
proc.stderr.on("data", (data) => { stderr += (data as Buffer).toString(); });
|
|
1506
|
+
proc.on("error", (error) => settleReject(error));
|
|
1507
|
+
proc.on("close", (code) => {
|
|
1508
|
+
if (buffer.trim()) processLine(buffer.trim());
|
|
1509
|
+
if ((code ?? 1) !== 0) {
|
|
1510
|
+
settleReject(new Error(stderr.trim() || `Design assessment subprocess exited with code ${code ?? 1}.`));
|
|
1511
|
+
return;
|
|
1512
|
+
}
|
|
1513
|
+
const jsonText = extractJsonObject(assistantText || buffer);
|
|
1514
|
+
if (!jsonText) {
|
|
1515
|
+
settleReject(new Error(`Design assessment subprocess did not return parseable JSON.\n${stderr}`));
|
|
1516
|
+
return;
|
|
1517
|
+
}
|
|
1518
|
+
try {
|
|
1519
|
+
const parsed = JSON.parse(jsonText) as { nodeTitle?: string; structuralPass?: boolean; findings?: DesignAssessmentFinding[] };
|
|
1520
|
+
settleResolve({
|
|
1521
|
+
nodeTitle: parsed.nodeTitle ?? nodeId,
|
|
1522
|
+
structuralPass: parsed.structuralPass ?? true,
|
|
1523
|
+
findings: Array.isArray(parsed.findings) ? parsed.findings : [],
|
|
1524
|
+
});
|
|
1525
|
+
} catch (err) {
|
|
1526
|
+
settleReject(new Error(`Design assessment JSON was invalid: ${String(err)}`));
|
|
1527
|
+
}
|
|
1528
|
+
});
|
|
1529
|
+
proc.stdin.write(prompt + "\n");
|
|
1530
|
+
proc.stdin.end();
|
|
1531
|
+
},
|
|
1532
|
+
);
|
|
1533
|
+
}
|
|
1534
|
+
|
|
1535
|
+
async function executeAssessDesign(
|
|
1536
|
+
pi: ExtensionAPI,
|
|
1537
|
+
ctx: AssessExecutionContext,
|
|
1538
|
+
args: string,
|
|
1539
|
+
): Promise<AssessStructuredResult> {
|
|
1540
|
+
const cwd = ctx.cwd;
|
|
1541
|
+
// Resolve: explicit arg → focused node → error
|
|
1542
|
+
const nodeId = args.trim() || sharedState.designTree?.focusedNode?.id || null;
|
|
1543
|
+
|
|
1544
|
+
if (!nodeId) {
|
|
1545
|
+
const humanText = "Usage: `/assess design <node-id>`\n\nProvide a design-tree node ID, or set a focused node via `design_tree_update` with action 'focus' and run `/assess design` without arguments.";
|
|
1546
|
+
return makeAssessResult({
|
|
1547
|
+
subcommand: "design",
|
|
1548
|
+
args,
|
|
1549
|
+
ok: false,
|
|
1550
|
+
summary: "Missing node-id for design assessment",
|
|
1551
|
+
humanText,
|
|
1552
|
+
data: { reason: "missing_node_id" },
|
|
1553
|
+
effects: [{ type: "view", content: humanText }],
|
|
1554
|
+
});
|
|
1555
|
+
}
|
|
1556
|
+
|
|
1557
|
+
// Build the interactive follow-up prompt
|
|
1558
|
+
const interactivePrompt = [
|
|
1559
|
+
`## Design Assessment: \`${nodeId}\``,
|
|
1560
|
+
"",
|
|
1561
|
+
"Assess this design-tree node for readiness to be marked as 'decided'.",
|
|
1562
|
+
"",
|
|
1563
|
+
"### Steps",
|
|
1564
|
+
"",
|
|
1565
|
+
`1. Call \`design_tree\` with \`action='node'\`, \`node_id='${nodeId}'\` to load the node and its document body.`,
|
|
1566
|
+
"2. **Structural pre-check** (fail fast with specific finding per gap):",
|
|
1567
|
+
" - If `open_questions.length > 0`: FAIL — list each unresolved question",
|
|
1568
|
+
" - If `decisions.length === 0`: FAIL — no decisions recorded",
|
|
1569
|
+
" - If `acceptanceCriteria` has no scenarios, falsifiability, or constraints: FAIL — empty acceptance criteria",
|
|
1570
|
+
" - If any structural check fails, stop here and report findings.",
|
|
1571
|
+
"3. **Acceptance criteria evaluation** (against the document body):",
|
|
1572
|
+
" - For each **Scenario** (Given/When/Then): does the document body address the Then clause?",
|
|
1573
|
+
" - For each **Falsifiability** condition: is it addressed, ruled out, or noted as a known risk?",
|
|
1574
|
+
" - For each **Constraint**: is it satisfied by the document content?",
|
|
1575
|
+
"4. **Write `assessment.json`** to `openspec/design/${nodeId}/assessment.json` with structure:",
|
|
1576
|
+
" ```json",
|
|
1577
|
+
` {"nodeId":"${nodeId}","pass":true|false,"structuralPass":true|false,"findings":[...]}`,
|
|
1578
|
+
" ```",
|
|
1579
|
+
" Each finding: `{\"type\":\"scenario\"|\"falsifiability\"|\"constraint\"|\"structural\",\"index\":N,\"pass\":true|false,\"finding\":\"<reason>\"}`",
|
|
1580
|
+
"5. **Report** overall PASS/FAIL with per-finding details.",
|
|
1581
|
+
" - If PASS: suggest `design_tree_update` with `set_status(decided)` for this node.",
|
|
1582
|
+
" - If FAIL: list each failing finding with an actionable fix.",
|
|
1583
|
+
].join("\n");
|
|
1584
|
+
|
|
1585
|
+
if (isInteractiveAssessContext(ctx)) {
|
|
1586
|
+
const introText = `Running design assessment for node \`${nodeId}\`…`;
|
|
1587
|
+
return makeAssessResult({
|
|
1588
|
+
subcommand: "design",
|
|
1589
|
+
args,
|
|
1590
|
+
ok: true,
|
|
1591
|
+
summary: `Prepared design assessment for ${nodeId}`,
|
|
1592
|
+
humanText: introText,
|
|
1593
|
+
data: { nodeId },
|
|
1594
|
+
effects: [
|
|
1595
|
+
{ type: "view", content: introText },
|
|
1596
|
+
{ type: "follow_up", content: interactivePrompt },
|
|
1597
|
+
],
|
|
1598
|
+
nextSteps: ["Evaluate acceptance criteria", "Write assessment.json", "Set status to decided if pass"],
|
|
1599
|
+
completion: { completed: false, completedInBand: false, requiresFollowUp: true },
|
|
1600
|
+
});
|
|
1601
|
+
}
|
|
1602
|
+
|
|
1603
|
+
// Bridged / subprocess mode
|
|
1604
|
+
let subResult: { findings: DesignAssessmentFinding[]; nodeTitle: string; structuralPass: boolean };
|
|
1605
|
+
try {
|
|
1606
|
+
subResult = await runDesignAssessmentSubprocess(cwd, nodeId, ctx.model?.id);
|
|
1607
|
+
} catch (err) {
|
|
1608
|
+
const msg = `Design assessment subprocess failed: ${String(err)}`;
|
|
1609
|
+
return makeAssessResult({
|
|
1610
|
+
subcommand: "design",
|
|
1611
|
+
args,
|
|
1612
|
+
ok: false,
|
|
1613
|
+
summary: msg,
|
|
1614
|
+
humanText: msg,
|
|
1615
|
+
data: { reason: "subprocess_failed", nodeId },
|
|
1616
|
+
effects: [{ type: "view", content: msg }],
|
|
1617
|
+
});
|
|
1618
|
+
}
|
|
1619
|
+
|
|
1620
|
+
const { findings, nodeTitle, structuralPass } = subResult;
|
|
1621
|
+
const overallPass = structuralPass && findings.length > 0 && findings.every((f) => f.pass);
|
|
1622
|
+
|
|
1623
|
+
const result: DesignAssessmentResult = { nodeId, pass: overallPass, structuralPass, findings };
|
|
1624
|
+
|
|
1625
|
+
// Write assessment.json
|
|
1626
|
+
await writeDesignAssessment(cwd, nodeId, result);
|
|
1627
|
+
|
|
1628
|
+
// Build human text
|
|
1629
|
+
const failFindings = findings.filter((f) => !f.pass);
|
|
1630
|
+
const passFindings = findings.filter((f) => f.pass);
|
|
1631
|
+
const humanLines: string[] = [
|
|
1632
|
+
`## Design Assessment: ${nodeTitle} (${nodeId})`,
|
|
1633
|
+
"",
|
|
1634
|
+
overallPass
|
|
1635
|
+
? `**✅ PASS** — ${passFindings.length}/${findings.length} criteria satisfied. Ready to set status → decided.`
|
|
1636
|
+
: structuralPass
|
|
1637
|
+
? `**❌ FAIL** — ${failFindings.length}/${findings.length} criteria not satisfied.`
|
|
1638
|
+
: "**❌ Structural pre-check failed** — resolve these issues before assessing.",
|
|
1639
|
+
"",
|
|
1640
|
+
];
|
|
1641
|
+
if (failFindings.length > 0) {
|
|
1642
|
+
humanLines.push("### Issues to Resolve");
|
|
1643
|
+
for (const f of failFindings) humanLines.push(`- [${f.type}#${f.index}] ${f.finding}`);
|
|
1644
|
+
humanLines.push("");
|
|
1645
|
+
}
|
|
1646
|
+
if (passFindings.length > 0) {
|
|
1647
|
+
humanLines.push("### Satisfied");
|
|
1648
|
+
for (const f of passFindings) humanLines.push(`- ✓ [${f.type}#${f.index}] ${f.finding}`);
|
|
1649
|
+
}
|
|
1650
|
+
|
|
1651
|
+
const humanText = humanLines.join("\n");
|
|
1652
|
+
const nextSteps = overallPass
|
|
1653
|
+
? [`Run design_tree_update with action 'set_status', node_id '${nodeId}', status 'decided'`]
|
|
1654
|
+
: failFindings.map((f) => f.finding.split(".")[0] ?? f.finding);
|
|
1655
|
+
|
|
1656
|
+
return makeAssessResult({
|
|
1657
|
+
subcommand: "design",
|
|
1658
|
+
args,
|
|
1659
|
+
ok: overallPass,
|
|
1660
|
+
summary: overallPass
|
|
1661
|
+
? `Design node '${nodeId}' passed — ready to decide`
|
|
1662
|
+
: `Design node '${nodeId}' failed — ${failFindings.length} issue(s) to resolve`,
|
|
1663
|
+
humanText,
|
|
1664
|
+
data: result,
|
|
1665
|
+
effects: [{ type: "view", content: humanText }],
|
|
1666
|
+
nextSteps,
|
|
1667
|
+
});
|
|
1668
|
+
}
|
|
1669
|
+
|
|
1670
|
+
async function writeDesignAssessment(cwd: string, nodeId: string, result: DesignAssessmentResult): Promise<void> {
|
|
1671
|
+
try {
|
|
1672
|
+
const { writeFile } = await import("node:fs/promises");
|
|
1673
|
+
const { join } = await import("node:path");
|
|
1674
|
+
const { existsSync } = await import("node:fs");
|
|
1675
|
+
const dir = join(cwd, "openspec", "design", nodeId);
|
|
1676
|
+
// Do NOT create the directory — if it doesn't exist the node has no design change
|
|
1677
|
+
// scaffolded yet, and creating assessment.json here would trigger the "active not
|
|
1678
|
+
// archived" gate on set_status(decided) and implement. Write only if already scaffolded.
|
|
1679
|
+
if (!existsSync(dir)) return;
|
|
1680
|
+
await writeFile(join(dir, "assessment.json"), JSON.stringify(result, null, 2), "utf8");
|
|
1681
|
+
} catch {
|
|
1682
|
+
// non-fatal — assessment result still returned to caller
|
|
1683
|
+
}
|
|
1684
|
+
}
|
|
1685
|
+
|
|
1686
|
+
export function createAssessStructuredExecutors(pi: ExtensionAPI, overrides?: AssessExecutorOverrides) {
|
|
1687
|
+
return {
|
|
1688
|
+
cleave: (args: string, ctx: AssessExecutionContext) => executeAssessCleave(pi, ctx, args),
|
|
1689
|
+
diff: (args: string, ctx: AssessExecutionContext) => executeAssessDiff(pi, ctx, args),
|
|
1690
|
+
spec: (args: string, ctx: AssessExecutionContext) => executeAssessSpec(pi, ctx, args, overrides),
|
|
1691
|
+
complexity: (args: string) => executeAssessComplexity(args),
|
|
1692
|
+
design: (args: string, ctx: AssessExecutionContext) => executeAssessDesign(pi, ctx, args),
|
|
1693
|
+
} as const;
|
|
1694
|
+
}
|
|
1695
|
+
|
|
1696
|
+
// ─── Extension ──────────────────────────────────────────────────────────────
|
|
1697
|
+
|
|
1698
|
+
export default function cleaveExtension(pi: ExtensionAPI) {
|
|
1699
|
+
// ── Initialize dashboard state ──────────────────────────────────
|
|
1700
|
+
emitCleaveState(pi, "idle");
|
|
1701
|
+
|
|
1702
|
+
// ── Agent start: inject OpenSpec status into context ─────────────
|
|
1703
|
+
// Uses before_agent_start (not session_start) so the status message
|
|
1704
|
+
// enters the agent's conversation context, not just the TUI display.
|
|
1705
|
+
let openspecFirstTurn = true;
|
|
1706
|
+
|
|
1707
|
+
pi.on("before_agent_start", (_event, ctx) => {
|
|
1708
|
+
if (!openspecFirstTurn) return;
|
|
1709
|
+
openspecFirstTurn = false;
|
|
1710
|
+
|
|
1711
|
+
try {
|
|
1712
|
+
const status = getActiveChangesStatus(ctx.cwd);
|
|
1713
|
+
if (status.length === 0) return;
|
|
1714
|
+
|
|
1715
|
+
const lines = ["**OpenSpec Changes**", ""];
|
|
1716
|
+
for (const s of status) {
|
|
1717
|
+
const progress = s.totalTasks > 0
|
|
1718
|
+
? `${s.doneTasks}/${s.totalTasks} tasks`
|
|
1719
|
+
: "no tasks";
|
|
1720
|
+
const artifacts: string[] = [];
|
|
1721
|
+
if (s.hasProposal) artifacts.push("proposal");
|
|
1722
|
+
if (s.hasDesign) artifacts.push("design");
|
|
1723
|
+
if (s.hasSpecs) artifacts.push("specs");
|
|
1724
|
+
const artStr = artifacts.length > 0 ? ` [${artifacts.join(", ")}]` : "";
|
|
1725
|
+
|
|
1726
|
+
const icon = s.totalTasks > 0 && s.doneTasks >= s.totalTasks ? "✓" : "◦";
|
|
1727
|
+
lines.push(` ${icon} **${s.name}** — ${progress}${artStr}`);
|
|
1728
|
+
}
|
|
1729
|
+
|
|
1730
|
+
const incomplete = status.filter((s) => s.totalTasks > 0 && s.doneTasks < s.totalTasks);
|
|
1731
|
+
if (incomplete.length > 0) {
|
|
1732
|
+
lines.push("", `Use \`/opsx:apply\` to continue or \`/cleave\` to parallelize.`);
|
|
1733
|
+
}
|
|
1734
|
+
|
|
1735
|
+
const withTasks = status.filter((s) => s.totalTasks > 0);
|
|
1736
|
+
const allDone = withTasks.length > 0 && withTasks.every((s) => s.doneTasks >= s.totalTasks);
|
|
1737
|
+
if (allDone) {
|
|
1738
|
+
lines.push("", `All tasks complete. Run \`/opsx:verify\` → \`/opsx:archive\` to finalize.`);
|
|
1739
|
+
}
|
|
1740
|
+
|
|
1741
|
+
const content = lines.join("\n");
|
|
1742
|
+
return {
|
|
1743
|
+
message: {
|
|
1744
|
+
customType: "openspec-status",
|
|
1745
|
+
content,
|
|
1746
|
+
display: true,
|
|
1747
|
+
},
|
|
1748
|
+
};
|
|
1749
|
+
} catch {
|
|
1750
|
+
// Non-fatal — don't block agent start
|
|
1751
|
+
}
|
|
1752
|
+
});
|
|
1753
|
+
|
|
1754
|
+
|
|
1755
|
+
// ── cleave_assess tool ───────────────────────────────────────────────
|
|
1756
|
+
pi.registerTool({
|
|
1757
|
+
name: "cleave_assess",
|
|
1758
|
+
label: "Cleave Assess",
|
|
1759
|
+
description:
|
|
1760
|
+
"Assess the complexity of a task directive to determine if it should be " +
|
|
1761
|
+
"decomposed (cleaved) into subtasks or executed directly. Returns complexity " +
|
|
1762
|
+
"score, matched pattern, confidence, and decision (execute/cleave).\n\n" +
|
|
1763
|
+
"Use before attempting complex multi-system tasks to decide whether decomposition is warranted.",
|
|
1764
|
+
promptSnippet:
|
|
1765
|
+
"Assess task complexity for decomposition — returns pattern match, complexity score, and execute/cleave decision",
|
|
1766
|
+
promptGuidelines: [
|
|
1767
|
+
"Call cleave_assess before starting any multi-system or cross-cutting task to determine if decomposition is needed",
|
|
1768
|
+
"If decision is 'execute', proceed directly. If 'cleave', use /cleave to decompose. If 'needs_assessment', proceed directly — it means no pattern matched but the task is likely simple enough for in-session execution.",
|
|
1769
|
+
"Complexity formula: (1 + systems) × (1 + 0.5 × modifiers). Threshold default: 2.0.",
|
|
1770
|
+
"The /assess command provides code assessment: `/assess cleave` (adversarial review + auto-fix), `/assess diff [ref]` (review only), `/assess spec [change]` (validate against OpenSpec scenarios), `/assess design [node-id]` (evaluate design-tree node readiness before set_status(decided)).",
|
|
1771
|
+
"When the repo has openspec/ with active changes, suggest `/assess spec` after implementation and before `/opsx:archive`.",
|
|
1772
|
+
"Run `/assess design <node-id>` before calling design_tree_update with set_status(decided) to verify acceptance criteria are satisfied.",
|
|
1773
|
+
],
|
|
1774
|
+
|
|
1775
|
+
parameters: Type.Object({
|
|
1776
|
+
directive: Type.String({ description: "The task directive to assess" }),
|
|
1777
|
+
threshold: Type.Optional(Type.Number({ description: "Complexity threshold (default: 2.0)" })),
|
|
1778
|
+
}),
|
|
1779
|
+
|
|
1780
|
+
renderCall(args, theme) {
|
|
1781
|
+
const dir = args.directive.length > 55
|
|
1782
|
+
? args.directive.slice(0, 52) + "…"
|
|
1783
|
+
: args.directive;
|
|
1784
|
+
return sciCall("cleave_assess", dir, theme);
|
|
1785
|
+
},
|
|
1786
|
+
|
|
1787
|
+
renderResult(result, { expanded }, theme) {
|
|
1788
|
+
const d = result.details as {
|
|
1789
|
+
score?: number; complexity?: number; decision?: string;
|
|
1790
|
+
systems?: number; modifiers?: string[]; pattern?: string; method?: string;
|
|
1791
|
+
} | undefined;
|
|
1792
|
+
const score = d?.score ?? d?.complexity;
|
|
1793
|
+
const decision = d?.decision;
|
|
1794
|
+
const scoreStr = score != null ? `complexity ${score.toFixed(1)}` : "";
|
|
1795
|
+
const decisionStr = decision ? `→ ${decision}` : "";
|
|
1796
|
+
const summary = [scoreStr, decisionStr].filter(Boolean).join(" ");
|
|
1797
|
+
|
|
1798
|
+
if (expanded) {
|
|
1799
|
+
const lines: string[] = [];
|
|
1800
|
+
// Structured breakdown
|
|
1801
|
+
if (d?.pattern) lines.push(`${theme.fg("accent", "Pattern")} ${theme.fg("muted", d.pattern)}`);
|
|
1802
|
+
if (d?.method) lines.push(`${theme.fg("accent", "Method")} ${theme.fg("muted", d.method)}`);
|
|
1803
|
+
if (d?.systems != null) lines.push(`${theme.fg("accent", "Systems")} ${theme.fg("muted", String(d.systems))}`);
|
|
1804
|
+
if (d?.modifiers?.length) lines.push(`${theme.fg("accent", "Modifiers")} ${theme.fg("muted", d.modifiers.join(", "))}`);
|
|
1805
|
+
if (score != null) {
|
|
1806
|
+
const color = score >= 2.0 ? "warning" : "success";
|
|
1807
|
+
lines.push(`${theme.fg("accent", "Score")} ${theme.fg(color as any, score.toFixed(1))}`);
|
|
1808
|
+
}
|
|
1809
|
+
if (decision) {
|
|
1810
|
+
const color = decision === "cleave" ? "warning" : "success";
|
|
1811
|
+
lines.push(`${theme.fg("accent", "Decision")} ${theme.fg(color as any, decision)}`);
|
|
1812
|
+
}
|
|
1813
|
+
if (lines.length === 0) {
|
|
1814
|
+
// Fallback to raw text
|
|
1815
|
+
const text = (result.content?.[0] && "text" in result.content[0] ? result.content[0].text : "") ?? "";
|
|
1816
|
+
lines.push(...text.split("\n"));
|
|
1817
|
+
}
|
|
1818
|
+
return sciExpanded(lines, summary, theme);
|
|
1819
|
+
}
|
|
1820
|
+
|
|
1821
|
+
if (!summary) {
|
|
1822
|
+
const first = result.content?.[0];
|
|
1823
|
+
return sciOk(((first && "text" in first ? first.text : null) ?? "").split("\n")[0].slice(0, 80), theme);
|
|
1824
|
+
}
|
|
1825
|
+
|
|
1826
|
+
return sciOk(summary, theme);
|
|
1827
|
+
},
|
|
1828
|
+
|
|
1829
|
+
async execute(_toolCallId, params, _signal, _onUpdate, _ctx) {
|
|
1830
|
+
const assessment = assessDirective(params.directive, params.threshold ?? DEFAULT_CONFIG.threshold);
|
|
1831
|
+
const text = formatAssessment(assessment);
|
|
1832
|
+
|
|
1833
|
+
return {
|
|
1834
|
+
content: [{ type: "text", text }],
|
|
1835
|
+
details: {
|
|
1836
|
+
...assessment,
|
|
1837
|
+
availablePatterns: Object.values(PATTERNS).map((p) => p.name),
|
|
1838
|
+
},
|
|
1839
|
+
};
|
|
1840
|
+
},
|
|
1841
|
+
});
|
|
1842
|
+
|
|
1843
|
+
// ── /assess command ──────────────────────────────────────────────────
|
|
1844
|
+
const ASSESS_SUBS = [
|
|
1845
|
+
{ value: "cleave", label: "cleave", description: "Adversarial review → auto-fix (optional: ref)" },
|
|
1846
|
+
{ value: "diff", label: "diff", description: "Assess uncommitted or recent changes for issues" },
|
|
1847
|
+
{ value: "spec", label: "spec", description: "Assess implementation against OpenSpec scenarios" },
|
|
1848
|
+
{ value: "complexity", label: "complexity", description: "Assess directive complexity (cleave_assess)" },
|
|
1849
|
+
{ value: "design", label: "design", description: "Assess design-tree node readiness before set_status(decided)" },
|
|
1850
|
+
];
|
|
1851
|
+
const assessExecutors = createAssessStructuredExecutors(pi);
|
|
1852
|
+
const slashCommandBridge = getSharedBridge();
|
|
1853
|
+
const toBridgeAssessResult = (
|
|
1854
|
+
bridgedArgs: string[],
|
|
1855
|
+
result: AssessStructuredResult,
|
|
1856
|
+
): ReturnType<typeof buildSlashCommandResult> => buildAssessBridgeResult(bridgedArgs, result);
|
|
1857
|
+
|
|
1858
|
+
slashCommandBridge.register(pi, {
|
|
1859
|
+
name: "assess",
|
|
1860
|
+
description: "Adversarial review + auto-fix (default), or: /assess <diff|spec|complexity> [args]",
|
|
1861
|
+
bridge: {
|
|
1862
|
+
agentCallable: true,
|
|
1863
|
+
sideEffectClass: "workspace-write",
|
|
1864
|
+
resultContract: "cleave.assess.v1",
|
|
1865
|
+
summary: "Lifecycle-safe assessment commands for spec, diff, cleave, and complexity",
|
|
1866
|
+
},
|
|
1867
|
+
getArgumentCompletions: (prefix: string) => {
|
|
1868
|
+
const parts = prefix.split(" ");
|
|
1869
|
+
if (parts.length <= 1) {
|
|
1870
|
+
const partial = parts[0] || "";
|
|
1871
|
+
const filtered = ASSESS_SUBS.filter((s) => s.value.startsWith(partial));
|
|
1872
|
+
return filtered.length > 0 ? filtered : null;
|
|
1873
|
+
}
|
|
1874
|
+
return null;
|
|
1875
|
+
},
|
|
1876
|
+
structuredExecutor: async (args, ctx) => {
|
|
1877
|
+
const trimmed = (args || "").trim();
|
|
1878
|
+
if (!trimmed) {
|
|
1879
|
+
return buildSlashCommandResult("assess", [], {
|
|
1880
|
+
ok: false,
|
|
1881
|
+
summary: "/assess requires an explicit bridged subcommand",
|
|
1882
|
+
humanText: "Bare /assess remains interactive-only in v1. Use one of: /assess spec, /assess diff, /assess cleave, or /assess complexity.",
|
|
1883
|
+
data: { supportedSubcommands: ASSESS_SUBS.map((sub) => sub.value) },
|
|
1884
|
+
effects: { sideEffectClass: "workspace-write" },
|
|
1885
|
+
nextSteps: ASSESS_SUBS.map((sub) => ({ label: `Run /assess ${sub.value}` })),
|
|
1886
|
+
});
|
|
1887
|
+
}
|
|
1888
|
+
|
|
1889
|
+
const parts = trimmed.split(/\s+/);
|
|
1890
|
+
const sub = parts[0] || "";
|
|
1891
|
+
const rest = parts.slice(1).join(" ");
|
|
1892
|
+
const assessCtx: AssessExecutionContext = {
|
|
1893
|
+
cwd: ctx.cwd,
|
|
1894
|
+
bridgeInvocation: (ctx as { bridgeInvocation?: boolean }).bridgeInvocation,
|
|
1895
|
+
hasUI: ctx.hasUI,
|
|
1896
|
+
model: ctx.model ? { id: ctx.model.id } : undefined,
|
|
1897
|
+
waitForIdle: "waitForIdle" in ctx && typeof ctx.waitForIdle === "function"
|
|
1898
|
+
? ctx.waitForIdle.bind(ctx)
|
|
1899
|
+
: undefined,
|
|
1900
|
+
};
|
|
1901
|
+
switch (sub) {
|
|
1902
|
+
case "cleave":
|
|
1903
|
+
return toBridgeAssessResult(parts, await assessExecutors.cleave(rest, assessCtx));
|
|
1904
|
+
case "diff":
|
|
1905
|
+
return toBridgeAssessResult(parts, await assessExecutors.diff(rest, assessCtx));
|
|
1906
|
+
case "spec":
|
|
1907
|
+
return toBridgeAssessResult(parts, await assessExecutors.spec(rest, assessCtx));
|
|
1908
|
+
case "complexity":
|
|
1909
|
+
return toBridgeAssessResult(parts, await assessExecutors.complexity(rest));
|
|
1910
|
+
case "design":
|
|
1911
|
+
return toBridgeAssessResult(parts, await assessExecutors.design(rest, assessCtx));
|
|
1912
|
+
default:
|
|
1913
|
+
return buildSlashCommandResult("assess", parts, {
|
|
1914
|
+
ok: false,
|
|
1915
|
+
summary: `Unsupported bridged /assess target: ${sub}`,
|
|
1916
|
+
humanText: `Bridged /assess currently supports only: ${ASSESS_SUBS.map((item) => item.value).join(", ")}. Freeform adversarial review remains interactive-only in v1.`,
|
|
1917
|
+
data: { supportedSubcommands: ASSESS_SUBS.map((item) => item.value) },
|
|
1918
|
+
effects: { sideEffectClass: "workspace-write" },
|
|
1919
|
+
nextSteps: ASSESS_SUBS.map((item) => ({ label: `Run /assess ${item.value}` })),
|
|
1920
|
+
});
|
|
1921
|
+
}
|
|
1922
|
+
},
|
|
1923
|
+
interactiveHandler: async (result, args) => {
|
|
1924
|
+
const trimmed = (args || "").trim();
|
|
1925
|
+
const sub = trimmed.split(/\s+/)[0] || "";
|
|
1926
|
+
if (!trimmed || !ASSESS_SUBS.some((item) => item.value === sub)) {
|
|
1927
|
+
pi.sendUserMessage([
|
|
1928
|
+
"# Adversarial Assessment",
|
|
1929
|
+
"",
|
|
1930
|
+
trimmed
|
|
1931
|
+
? "You are now operating as a hostile reviewer. Your job is to find everything wrong with the work completed in this session."
|
|
1932
|
+
: "You are now operating as a hostile reviewer. Your job is to find everything wrong with the work completed in this session. Do not be polite. Do not hedge. If something is broken, say it's broken.",
|
|
1933
|
+
...(trimmed ? ["", "**User instructions:** " + trimmed] : []),
|
|
1934
|
+
"",
|
|
1935
|
+
"Follow the user's instructions above for tone and scope, but still perform a thorough review.",
|
|
1936
|
+
"Read every file that was changed. Be specific. Cite line numbers.",
|
|
1937
|
+
"",
|
|
1938
|
+
"## Output Format",
|
|
1939
|
+
"",
|
|
1940
|
+
"### Verdict",
|
|
1941
|
+
"One of: `PASS` | `PASS WITH CONCERNS` | `NEEDS REWORK` | `REJECT`",
|
|
1942
|
+
"",
|
|
1943
|
+
"### Critical Issues",
|
|
1944
|
+
"### Warnings",
|
|
1945
|
+
"### Nitpicks",
|
|
1946
|
+
"### Omissions",
|
|
1947
|
+
"### What Actually Worked",
|
|
1948
|
+
].join("\n"));
|
|
1949
|
+
return;
|
|
1950
|
+
}
|
|
1951
|
+
const assessResult: AssessStructuredResult = {
|
|
1952
|
+
command: "assess",
|
|
1953
|
+
subcommand: (result.data as any)?.subcommand ?? "diff",
|
|
1954
|
+
args: trimmed,
|
|
1955
|
+
ok: result.ok,
|
|
1956
|
+
summary: result.summary,
|
|
1957
|
+
humanText: result.humanText,
|
|
1958
|
+
data: (result.data as any)?.data,
|
|
1959
|
+
effects: (result.data as any)?.assessEffects ?? [],
|
|
1960
|
+
nextSteps: (result.nextSteps ?? []).map((step) => step.label),
|
|
1961
|
+
completion: (result.data as any)?.completion,
|
|
1962
|
+
lifecycle: (result.data as any)?.lifecycleHint,
|
|
1963
|
+
lifecycleRecord: result.lifecycle as AssessLifecycleRecord | undefined,
|
|
1964
|
+
};
|
|
1965
|
+
applyAssessEffects(pi, assessResult);
|
|
1966
|
+
},
|
|
1967
|
+
agentHandler: async (result, args) => {
|
|
1968
|
+
const trimmed = (args || "").trim();
|
|
1969
|
+
const assessResult: AssessStructuredResult = {
|
|
1970
|
+
command: "assess",
|
|
1971
|
+
subcommand: (result.data as any)?.subcommand ?? "diff",
|
|
1972
|
+
args: trimmed,
|
|
1973
|
+
ok: result.ok,
|
|
1974
|
+
summary: result.summary,
|
|
1975
|
+
humanText: result.humanText,
|
|
1976
|
+
data: (result.data as any)?.data,
|
|
1977
|
+
effects: (result.data as any)?.assessEffects ?? [],
|
|
1978
|
+
nextSteps: (result.nextSteps ?? []).map((step) => step.label),
|
|
1979
|
+
completion: (result.data as any)?.completion,
|
|
1980
|
+
lifecycle: (result.data as any)?.lifecycleHint,
|
|
1981
|
+
lifecycleRecord: result.lifecycle as AssessLifecycleRecord | undefined,
|
|
1982
|
+
};
|
|
1983
|
+
applyAssessEffects(pi, assessResult);
|
|
1984
|
+
},
|
|
1985
|
+
});
|
|
1986
|
+
pi.registerTool(slashCommandBridge.createToolDefinition());
|
|
1987
|
+
|
|
1988
|
+
// ── /cleave command ──────────────────────────────────────────────────
|
|
1989
|
+
pi.registerCommand("cleave", {
|
|
1990
|
+
description: "Recursive task decomposition (usage: /cleave <directive>)",
|
|
1991
|
+
handler: async (args, ctx) => {
|
|
1992
|
+
const directive = (args || "").trim();
|
|
1993
|
+
|
|
1994
|
+
if (!directive) {
|
|
1995
|
+
pi.sendMessage({
|
|
1996
|
+
customType: "view",
|
|
1997
|
+
content: [
|
|
1998
|
+
"**Cleave — Recursive Task Decomposition**",
|
|
1999
|
+
"",
|
|
2000
|
+
"Usage: `/cleave <directive>`",
|
|
2001
|
+
"",
|
|
2002
|
+
"Example: `/cleave Implement JWT authentication with refresh tokens`",
|
|
2003
|
+
"",
|
|
2004
|
+
"The directive will be assessed for complexity. If it exceeds the",
|
|
2005
|
+
"threshold, it will be decomposed into 2-4 child tasks executed",
|
|
2006
|
+
"in parallel via git worktrees.",
|
|
2007
|
+
"",
|
|
2008
|
+
"Available patterns: " + Object.values(PATTERNS).map((p) => p.name).join(", "),
|
|
2009
|
+
].join("\n"),
|
|
2010
|
+
display: true,
|
|
2011
|
+
});
|
|
2012
|
+
return;
|
|
2013
|
+
}
|
|
2014
|
+
|
|
2015
|
+
// Delegate the full workflow to the LLM via a structured prompt.
|
|
2016
|
+
// This allows the LLM to handle the interactive confirm gates
|
|
2017
|
+
// and adapt to user feedback, while we provide all the mechanical
|
|
2018
|
+
// infrastructure via tools.
|
|
2019
|
+
|
|
2020
|
+
const assessment = assessDirective(directive);
|
|
2021
|
+
const assessmentText = formatAssessment(assessment);
|
|
2022
|
+
|
|
2023
|
+
if (assessment.decision === "execute" || assessment.decision === "needs_assessment") {
|
|
2024
|
+
pi.sendMessage({
|
|
2025
|
+
customType: "view",
|
|
2026
|
+
content: [
|
|
2027
|
+
assessmentText,
|
|
2028
|
+
"",
|
|
2029
|
+
assessment.decision === "needs_assessment"
|
|
2030
|
+
? "**→ Execute directly** — no pattern matched; heuristic suggests in-session execution."
|
|
2031
|
+
: "**→ Execute directly** — complexity is below threshold.",
|
|
2032
|
+
"Proceeding with the task in-session.",
|
|
2033
|
+
].join("\n"),
|
|
2034
|
+
display: true,
|
|
2035
|
+
});
|
|
2036
|
+
|
|
2037
|
+
// Hand off to the LLM to execute directly
|
|
2038
|
+
pi.sendUserMessage(
|
|
2039
|
+
`Execute this task directly (cleave assessment says it's simple enough):\n\n${directive}`,
|
|
2040
|
+
{ deliverAs: "followUp" },
|
|
2041
|
+
);
|
|
2042
|
+
return;
|
|
2043
|
+
}
|
|
2044
|
+
|
|
2045
|
+
// Task needs cleaving — check for OpenSpec first, then fall back to LLM
|
|
2046
|
+
const repoPath = ctx.cwd;
|
|
2047
|
+
|
|
2048
|
+
// ── OpenSpec fast path ─────────────────────────────────────
|
|
2049
|
+
const openspecDir = detectOpenSpec(repoPath);
|
|
2050
|
+
if (openspecDir) {
|
|
2051
|
+
const executableChanges = findExecutableChanges(openspecDir);
|
|
2052
|
+
if (executableChanges.length > 0) {
|
|
2053
|
+
// Try to find a change whose name matches the directive.
|
|
2054
|
+
// Three strategies: exact slug containment, word overlap, partial prefix.
|
|
2055
|
+
const directiveSlug = directive.toLowerCase().replace(/[^\w]+/g, "-");
|
|
2056
|
+
const directiveWords = new Set(
|
|
2057
|
+
directive.toLowerCase().replace(/[^\w\s]/g, "").split(/\s+/).filter((w) => w.length > 2),
|
|
2058
|
+
);
|
|
2059
|
+
|
|
2060
|
+
const matched = executableChanges.find((c) => {
|
|
2061
|
+
// Strategy 1: slug containment (either direction)
|
|
2062
|
+
if (directiveSlug.includes(c.name) || c.name.includes(directiveSlug.slice(0, 20))) return true;
|
|
2063
|
+
// Strategy 2: word overlap — change name words appear in directive
|
|
2064
|
+
const changeWords = c.name.split("-").filter((w) => w.length > 2);
|
|
2065
|
+
const overlap = changeWords.filter((w) => directiveWords.has(w)).length;
|
|
2066
|
+
if (changeWords.length > 0 && overlap >= Math.ceil(changeWords.length * 0.5)) return true;
|
|
2067
|
+
return false;
|
|
2068
|
+
});
|
|
2069
|
+
|
|
2070
|
+
// Only use OpenSpec if we found a matching change — never silently
|
|
2071
|
+
// pick an unrelated change
|
|
2072
|
+
if (!matched) {
|
|
2073
|
+
// No match — mention available changes but fall through to LLM planner
|
|
2074
|
+
pi.sendMessage({
|
|
2075
|
+
customType: "view",
|
|
2076
|
+
content: [
|
|
2077
|
+
`OpenSpec changes found but none matched the directive.`,
|
|
2078
|
+
`Available: ${executableChanges.map((c) => c.name).join(", ")}`,
|
|
2079
|
+
`Falling back to LLM planner.`,
|
|
2080
|
+
].join("\n"),
|
|
2081
|
+
display: true,
|
|
2082
|
+
});
|
|
2083
|
+
}
|
|
2084
|
+
const change = matched!;
|
|
2085
|
+
const result = change ? openspecChangeToSplitPlanWithContext(change.path) : null;
|
|
2086
|
+
|
|
2087
|
+
if (result) {
|
|
2088
|
+
const { plan, context } = result;
|
|
2089
|
+
const planJson = JSON.stringify(plan, null, 2);
|
|
2090
|
+
|
|
2091
|
+
// Report what OpenSpec artifacts we found
|
|
2092
|
+
const artifactNotes: string[] = [];
|
|
2093
|
+
if (context.designContent) artifactNotes.push(`design.md (${context.decisions.length} decisions, ${context.fileChanges.length} file changes)`);
|
|
2094
|
+
if (context.specScenarios.length > 0) artifactNotes.push(`specs (${context.specScenarios.length} scenarios for post-merge verification)`);
|
|
2095
|
+
|
|
2096
|
+
pi.sendMessage({
|
|
2097
|
+
customType: "view",
|
|
2098
|
+
content: [
|
|
2099
|
+
assessmentText,
|
|
2100
|
+
"",
|
|
2101
|
+
`**→ OpenSpec plan detected** from \`${change.name}/tasks.md\``,
|
|
2102
|
+
...(artifactNotes.length > 0 ? [`**Artifacts:** ${artifactNotes.join("; ")}`] : []),
|
|
2103
|
+
"",
|
|
2104
|
+
`**Rationale:** ${plan.rationale}`,
|
|
2105
|
+
`**Children:** ${plan.children.map((c) => c.label).join(", ")}`,
|
|
2106
|
+
"",
|
|
2107
|
+
"Review the plan and confirm to execute via `cleave_run`.",
|
|
2108
|
+
].join("\n"),
|
|
2109
|
+
display: true,
|
|
2110
|
+
});
|
|
2111
|
+
|
|
2112
|
+
pi.sendUserMessage(
|
|
2113
|
+
[
|
|
2114
|
+
"## Cleave Decomposition (OpenSpec)",
|
|
2115
|
+
"",
|
|
2116
|
+
`OpenSpec change \`${change.name}\` provides a pre-built split plan.`,
|
|
2117
|
+
"",
|
|
2118
|
+
"### Split Plan",
|
|
2119
|
+
"",
|
|
2120
|
+
"```json",
|
|
2121
|
+
planJson,
|
|
2122
|
+
"```",
|
|
2123
|
+
"",
|
|
2124
|
+
"Present this plan to the user for review. After confirmation,",
|
|
2125
|
+
`use the \`cleave_run\` tool with this plan_json, the original directive,`,
|
|
2126
|
+
`and \`openspec_change_path\` set to \`${change.path}\`.`,
|
|
2127
|
+
"",
|
|
2128
|
+
"### Original Directive",
|
|
2129
|
+
"",
|
|
2130
|
+
directive,
|
|
2131
|
+
].join("\n"),
|
|
2132
|
+
{ deliverAs: "followUp" },
|
|
2133
|
+
);
|
|
2134
|
+
return;
|
|
2135
|
+
}
|
|
2136
|
+
}
|
|
2137
|
+
}
|
|
2138
|
+
|
|
2139
|
+
// ── LLM planning fallback ──────────────────────────────────
|
|
2140
|
+
let repoTree: string;
|
|
2141
|
+
try {
|
|
2142
|
+
repoTree = await getRepoTree(pi, repoPath);
|
|
2143
|
+
} catch {
|
|
2144
|
+
repoTree = "(unable to read repo structure)";
|
|
2145
|
+
}
|
|
2146
|
+
|
|
2147
|
+
const plannerPrompt = buildPlannerPrompt(directive, repoTree, []);
|
|
2148
|
+
|
|
2149
|
+
pi.sendMessage({
|
|
2150
|
+
customType: "view",
|
|
2151
|
+
content: [
|
|
2152
|
+
assessmentText,
|
|
2153
|
+
"",
|
|
2154
|
+
"**→ Decomposition needed.** Generating split plan...",
|
|
2155
|
+
].join("\n"),
|
|
2156
|
+
display: true,
|
|
2157
|
+
});
|
|
2158
|
+
|
|
2159
|
+
// Delegate to the LLM to:
|
|
2160
|
+
// 1. Generate a split plan (can use ask_local_model or think about it)
|
|
2161
|
+
// 2. Present the plan for confirmation
|
|
2162
|
+
// 3. Execute via cleave_run tool
|
|
2163
|
+
pi.sendUserMessage(
|
|
2164
|
+
[
|
|
2165
|
+
`## Cleave Decomposition`,
|
|
2166
|
+
"",
|
|
2167
|
+
`The directive needs decomposition (complexity ${assessment.complexity}, pattern: ${assessment.pattern || "none"}).`,
|
|
2168
|
+
"",
|
|
2169
|
+
"### Step 1: Generate a split plan",
|
|
2170
|
+
"",
|
|
2171
|
+
"Use `ask_local_model` with this planning prompt to generate a JSON split plan:",
|
|
2172
|
+
"",
|
|
2173
|
+
"```",
|
|
2174
|
+
plannerPrompt,
|
|
2175
|
+
"```",
|
|
2176
|
+
"",
|
|
2177
|
+
"Parse the JSON response and present the plan to me for review.",
|
|
2178
|
+
"",
|
|
2179
|
+
"### Step 2: After I confirm",
|
|
2180
|
+
"",
|
|
2181
|
+
"Use the `cleave_run` tool with the plan to execute the decomposition.",
|
|
2182
|
+
"",
|
|
2183
|
+
"### Original Directive",
|
|
2184
|
+
"",
|
|
2185
|
+
directive,
|
|
2186
|
+
].join("\n"),
|
|
2187
|
+
{ deliverAs: "followUp" },
|
|
2188
|
+
);
|
|
2189
|
+
},
|
|
2190
|
+
});
|
|
2191
|
+
|
|
2192
|
+
// ── /cleave resume command ────────────────────────────────────────
|
|
2193
|
+
/**
|
|
2194
|
+
* Resume an interrupted cleave run.
|
|
2195
|
+
*
|
|
2196
|
+
* When a cleave session is killed before the harvest/merge phase the
|
|
2197
|
+
* workspace state file is left with `phase: "dispatch"` and some children
|
|
2198
|
+
* still `pending`. This command:
|
|
2199
|
+
*
|
|
2200
|
+
* 1. Finds the most recent interrupted run for the current repo.
|
|
2201
|
+
* 2. Re-dispatches pending children (dispatchChildren skips completed ones).
|
|
2202
|
+
* 3. Runs the harvest/merge phase and cleans up worktrees.
|
|
2203
|
+
*
|
|
2204
|
+
* The full harvest is intentionally the same code path as cleave_run
|
|
2205
|
+
* (minus OpenSpec write-back which requires the original change path).
|
|
2206
|
+
*/
|
|
2207
|
+
pi.registerCommand("cleave resume", {
|
|
2208
|
+
description: "Resume an interrupted cleave run — re-dispatch pending children and complete the harvest/merge phase",
|
|
2209
|
+
handler: async (_args, ctx) => {
|
|
2210
|
+
const repoPath = ctx.cwd;
|
|
2211
|
+
const signal: AbortSignal | undefined = (ctx as any).signal;
|
|
2212
|
+
|
|
2213
|
+
const incomplete = findIncompleteRuns(repoPath);
|
|
2214
|
+
if (incomplete.length === 0) {
|
|
2215
|
+
pi.sendMessage({
|
|
2216
|
+
customType: "view",
|
|
2217
|
+
content: "**Cleave Resume** — no interrupted runs found for this repository.",
|
|
2218
|
+
display: true,
|
|
2219
|
+
});
|
|
2220
|
+
return;
|
|
2221
|
+
}
|
|
2222
|
+
|
|
2223
|
+
const state = incomplete[0]!;
|
|
2224
|
+
|
|
2225
|
+
const emit = (text: string) => pi.sendMessage({ customType: "view", content: text, display: true });
|
|
2226
|
+
|
|
2227
|
+
const completedBefore = state.children.filter((c) => c.status === "completed").length;
|
|
2228
|
+
const pendingChildren = state.children.filter(
|
|
2229
|
+
(c) => c.status !== "completed" && c.status !== "failed",
|
|
2230
|
+
);
|
|
2231
|
+
|
|
2232
|
+
const header = [
|
|
2233
|
+
`**Cleave Resume** — \`${state.runId}\``,
|
|
2234
|
+
`Directive: ${state.directive}`,
|
|
2235
|
+
`Base branch: \`${state.baseBranch}\``,
|
|
2236
|
+
"",
|
|
2237
|
+
...state.children.map((c) => {
|
|
2238
|
+
const icon = c.status === "completed" ? "✅" : c.status === "failed" ? "❌" : "⏳";
|
|
2239
|
+
return ` ${icon} [${c.childId}] \`${c.label}\` — ${c.status}`;
|
|
2240
|
+
}),
|
|
2241
|
+
"",
|
|
2242
|
+
`${completedBefore} already completed, ${pendingChildren.length} to dispatch`,
|
|
2243
|
+
].join("\n");
|
|
2244
|
+
emit(header);
|
|
2245
|
+
|
|
2246
|
+
// ── Re-dispatch any pending children ──────────────────────
|
|
2247
|
+
if (pendingChildren.length > 0) {
|
|
2248
|
+
emit(`Resuming dispatch for ${pendingChildren.length} pending child(ren)…`);
|
|
2249
|
+
emitCleaveState(pi, "dispatching", state.runId, state.children);
|
|
2250
|
+
await dispatchChildren(
|
|
2251
|
+
pi,
|
|
2252
|
+
state,
|
|
2253
|
+
4, // maxParallel
|
|
2254
|
+
120 * 60 * 1000,
|
|
2255
|
+
undefined,
|
|
2256
|
+
signal,
|
|
2257
|
+
(msg) => emit(msg),
|
|
2258
|
+
DEFAULT_REVIEW_CONFIG,
|
|
2259
|
+
);
|
|
2260
|
+
saveState(state);
|
|
2261
|
+
}
|
|
2262
|
+
|
|
2263
|
+
// ── Harvest + merge ────────────────────────────────────────
|
|
2264
|
+
emitCleaveState(pi, "merging", state.runId, state.children);
|
|
2265
|
+
state.phase = "harvest";
|
|
2266
|
+
saveState(state);
|
|
2267
|
+
|
|
2268
|
+
const taskContents = readTaskFiles(state.workspacePath);
|
|
2269
|
+
const taskResults = [...taskContents.entries()].map(([id, content]) =>
|
|
2270
|
+
parseTaskResult(content, `${id}-task.md`),
|
|
2271
|
+
);
|
|
2272
|
+
const conflicts = detectConflicts(taskResults);
|
|
2273
|
+
|
|
2274
|
+
state.phase = "reunify";
|
|
2275
|
+
saveState(state);
|
|
2276
|
+
|
|
2277
|
+
const completedChildren2 = state.children.filter((c) => c.status === "completed");
|
|
2278
|
+
const mergeResults: Array<{ label: string; branch: string; success: boolean; conflicts: string[] }> = [];
|
|
2279
|
+
|
|
2280
|
+
for (const child of completedChildren2) {
|
|
2281
|
+
const result = await mergeBranch(pi, repoPath, child.branch, state.baseBranch);
|
|
2282
|
+
mergeResults.push({ label: child.label, branch: child.branch, success: result.success, conflicts: result.conflictFiles });
|
|
2283
|
+
if (!result.success) break;
|
|
2284
|
+
}
|
|
2285
|
+
|
|
2286
|
+
const mergeFailures = mergeResults.filter((m) => !m.success);
|
|
2287
|
+
|
|
2288
|
+
if (mergeResults.length > 0 && mergeFailures.length === 0) {
|
|
2289
|
+
await cleanupWorktrees(pi, repoPath);
|
|
2290
|
+
} else {
|
|
2291
|
+
await pruneWorktreeDirs(pi, repoPath);
|
|
2292
|
+
}
|
|
2293
|
+
|
|
2294
|
+
// ── Finalise state ─────────────────────────────────────────
|
|
2295
|
+
const allOk =
|
|
2296
|
+
state.children.every((c) => c.status === "completed") &&
|
|
2297
|
+
mergeResults.every((m) => m.success) &&
|
|
2298
|
+
conflicts.length === 0;
|
|
2299
|
+
|
|
2300
|
+
state.phase = allOk ? "complete" : "failed";
|
|
2301
|
+
state.completedAt = new Date().toISOString();
|
|
2302
|
+
state.totalDurationSec = Math.round(
|
|
2303
|
+
(new Date(state.completedAt).getTime() - new Date(state.createdAt).getTime()) / 1000,
|
|
2304
|
+
);
|
|
2305
|
+
emitCleaveState(pi, allOk ? "done" : "failed", state.runId, state.children);
|
|
2306
|
+
saveState(state);
|
|
2307
|
+
|
|
2308
|
+
const completedCount = state.children.filter((c) => c.status === "completed").length;
|
|
2309
|
+
const failedCount = state.children.filter((c) => c.status === "failed").length;
|
|
2310
|
+
|
|
2311
|
+
const report = [
|
|
2312
|
+
`## Cleave Resume Report: ${state.runId}`,
|
|
2313
|
+
"",
|
|
2314
|
+
`**Status:** ${allOk ? "✓ COMPLETE" : "✗ ISSUES"}`,
|
|
2315
|
+
`**Children:** ${completedCount} completed, ${failedCount} failed of ${state.children.length}`,
|
|
2316
|
+
`**Merges:** ${mergeResults.filter((m) => m.success).length} succeeded, ${mergeFailures.length} failed`,
|
|
2317
|
+
conflicts.length > 0 ? `\n${formatConflicts(conflicts)}` : "",
|
|
2318
|
+
mergeFailures.length > 0
|
|
2319
|
+
? `\n**Merge failures:**\n${mergeFailures.map((m) => ` • \`${m.branch}\`: ${m.conflicts.join(", ") || "unknown error"}`).join("\n")}`
|
|
2320
|
+
: "",
|
|
2321
|
+
].filter(Boolean).join("\n");
|
|
2322
|
+
emit(report);
|
|
2323
|
+
},
|
|
2324
|
+
});
|
|
2325
|
+
|
|
2326
|
+
// ── cleave_run tool ──────────────────────────────────────────────────
|
|
2327
|
+
pi.registerTool({
|
|
2328
|
+
name: "cleave_run",
|
|
2329
|
+
label: "Cleave Run",
|
|
2330
|
+
description:
|
|
2331
|
+
"Execute a cleave decomposition plan. Creates git worktrees for each child, " +
|
|
2332
|
+
"dispatches child pi processes, harvests results, detects conflicts, and " +
|
|
2333
|
+
"merges branches back. Requires a split plan (from cleave_assess + planning).\n\n" +
|
|
2334
|
+
"Each child runs in an isolated git worktree on its own branch.",
|
|
2335
|
+
promptSnippet:
|
|
2336
|
+
"Execute a cleave decomposition plan — parallel child dispatch in git worktrees, conflict detection, merge, and report",
|
|
2337
|
+
promptGuidelines: [
|
|
2338
|
+
"When an OpenSpec change was used to generate the plan, ALWAYS pass `openspec_change_path` so child tasks get design context and tasks.md is reconciled on completion.",
|
|
2339
|
+
"Treat lifecycle reconciliation as required: after cleave_run, ensure tasks.md, design-tree status, and dashboard-facing progress reflect the merged reality before archive.",
|
|
2340
|
+
"After cleave_run completes with OpenSpec, follow the Next Steps in the report (typically `/assess spec` → `/opsx:verify` → `/opsx:archive`).",
|
|
2341
|
+
],
|
|
2342
|
+
parameters: Type.Object({
|
|
2343
|
+
directive: Type.String({ description: "The original task directive" }),
|
|
2344
|
+
plan_json: Type.String({
|
|
2345
|
+
description:
|
|
2346
|
+
'JSON string of the split plan: {"children": [{"label": "...", "description": "...", "scope": [...], "depends_on": [...]}], "rationale": "..."}',
|
|
2347
|
+
}),
|
|
2348
|
+
prefer_local: Type.Optional(
|
|
2349
|
+
Type.Boolean({ description: "Use local model for leaf tasks when possible (default: true)" }),
|
|
2350
|
+
),
|
|
2351
|
+
max_parallel: Type.Optional(
|
|
2352
|
+
Type.Number({ description: "Maximum parallel children (default: 4)" }),
|
|
2353
|
+
),
|
|
2354
|
+
openspec_change_path: Type.Optional(
|
|
2355
|
+
Type.String({
|
|
2356
|
+
description:
|
|
2357
|
+
"Path to an OpenSpec change directory. When provided, child task files are " +
|
|
2358
|
+
"enriched with design.md context (architecture decisions, file scope) and " +
|
|
2359
|
+
"post-merge verification checks specs against implementation.",
|
|
2360
|
+
}),
|
|
2361
|
+
),
|
|
2362
|
+
review: Type.Optional(
|
|
2363
|
+
Type.Boolean({
|
|
2364
|
+
description:
|
|
2365
|
+
"Enable adversarial review loop after each child completes. " +
|
|
2366
|
+
"Runs an gloriana-tier reviewer that checks for bugs, security issues, " +
|
|
2367
|
+
"and spec compliance. Severity-gated fix iterations with churn detection. " +
|
|
2368
|
+
"Default: false.",
|
|
2369
|
+
}),
|
|
2370
|
+
),
|
|
2371
|
+
review_max_warning_fixes: Type.Optional(
|
|
2372
|
+
Type.Number({
|
|
2373
|
+
description: "Maximum fix iterations for warning-level issues (default: 1)",
|
|
2374
|
+
}),
|
|
2375
|
+
),
|
|
2376
|
+
review_max_critical_fixes: Type.Optional(
|
|
2377
|
+
Type.Number({
|
|
2378
|
+
description: "Maximum fix iterations for critical issues before escalation (default: 2)",
|
|
2379
|
+
}),
|
|
2380
|
+
),
|
|
2381
|
+
review_churn_threshold: Type.Optional(
|
|
2382
|
+
Type.Number({
|
|
2383
|
+
description: "Fraction of reappearing issues that triggers churn bail (default: 0.5)",
|
|
2384
|
+
}),
|
|
2385
|
+
),
|
|
2386
|
+
}),
|
|
2387
|
+
|
|
2388
|
+
renderCall(args, theme) {
|
|
2389
|
+
const plan = (() => {
|
|
2390
|
+
try { return JSON.parse(args.plan_json); } catch { return null; }
|
|
2391
|
+
})();
|
|
2392
|
+
const n = Array.isArray(plan?.children) ? plan.children.length : "?";
|
|
2393
|
+
const dir = args.directive.length > 50
|
|
2394
|
+
? args.directive.slice(0, 47) + "…"
|
|
2395
|
+
: args.directive;
|
|
2396
|
+
return sciCall("cleave_run", `${n} children · ${dir}`, theme);
|
|
2397
|
+
},
|
|
2398
|
+
|
|
2399
|
+
renderResult(result, { expanded, isPartial }, theme) {
|
|
2400
|
+
if (isPartial) {
|
|
2401
|
+
// Phase-aware child table from details
|
|
2402
|
+
const d = result?.details as {
|
|
2403
|
+
children?: Array<{ label: string; status: string; branch?: string }>;
|
|
2404
|
+
phase?: string;
|
|
2405
|
+
} | undefined;
|
|
2406
|
+
const children = d?.children ?? [];
|
|
2407
|
+
if (children.length === 0) {
|
|
2408
|
+
const msg = result.content?.[0];
|
|
2409
|
+
const txt = (msg && "text" in msg ? msg.text : null) ?? "running…";
|
|
2410
|
+
const phase = d?.phase ? theme.fg("dim", ` [${d.phase}]`) : "";
|
|
2411
|
+
return sciOk(txt.split("\n")[0].slice(0, 60) + phase, theme);
|
|
2412
|
+
}
|
|
2413
|
+
const done = children.filter((c) => c.status === "completed").length;
|
|
2414
|
+
const failed = children.filter((c) => c.status === "failed").length;
|
|
2415
|
+
const running = children.filter((c) => c.status === "running").length;
|
|
2416
|
+
const total = children.length;
|
|
2417
|
+
const phase = d?.phase ?? "running";
|
|
2418
|
+
|
|
2419
|
+
const parts = [`${done}/${total} done`];
|
|
2420
|
+
if (running > 0) parts.push(`${running} running`);
|
|
2421
|
+
if (failed > 0) parts.push(theme.fg("error", `${failed} failed`));
|
|
2422
|
+
const footer = parts.join(" ") + theme.fg("dim", ` · ${phase}`);
|
|
2423
|
+
|
|
2424
|
+
const rows = children.slice(0, 10).map((c) => {
|
|
2425
|
+
const icon =
|
|
2426
|
+
c.status === "completed" ? theme.fg("success", "✓")
|
|
2427
|
+
: c.status === "running" ? theme.fg("warning", "⟳")
|
|
2428
|
+
: c.status === "failed" ? theme.fg("error", "✕")
|
|
2429
|
+
: theme.fg("muted", "○");
|
|
2430
|
+
const label = c.status === "running"
|
|
2431
|
+
? theme.fg("accent", c.label)
|
|
2432
|
+
: c.status === "failed"
|
|
2433
|
+
? theme.fg("error", c.label)
|
|
2434
|
+
: theme.fg("dim", c.label);
|
|
2435
|
+
return `${icon} ${label}`;
|
|
2436
|
+
});
|
|
2437
|
+
if (children.length > 10) {
|
|
2438
|
+
rows.push(theme.fg("muted", `… ${children.length - 10} more`));
|
|
2439
|
+
}
|
|
2440
|
+
return sciExpanded(rows, footer, theme);
|
|
2441
|
+
}
|
|
2442
|
+
|
|
2443
|
+
// Final result
|
|
2444
|
+
const first = result.content?.[0];
|
|
2445
|
+
const text = (first && "text" in first ? first.text : null) ?? "";
|
|
2446
|
+
const isError = (result as any).isError;
|
|
2447
|
+
const hasConflicts = text.toLowerCase().includes("conflict");
|
|
2448
|
+
|
|
2449
|
+
// Extract structured info from details for expanded view
|
|
2450
|
+
const d = result?.details as {
|
|
2451
|
+
children?: Array<{ label: string; status: string }>;
|
|
2452
|
+
merged?: number; failed?: number; conflicts?: number;
|
|
2453
|
+
duration?: number; filesChanged?: number;
|
|
2454
|
+
} | undefined;
|
|
2455
|
+
|
|
2456
|
+
if (expanded) {
|
|
2457
|
+
const lines: string[] = [];
|
|
2458
|
+
const children = d?.children ?? [];
|
|
2459
|
+
if (children.length > 0) {
|
|
2460
|
+
for (const c of children.slice(0, 12)) {
|
|
2461
|
+
const icon =
|
|
2462
|
+
c.status === "completed" ? theme.fg("success", "✓")
|
|
2463
|
+
: c.status === "failed" ? theme.fg("error", "✕")
|
|
2464
|
+
: theme.fg("muted", "○");
|
|
2465
|
+
const label = c.status === "failed"
|
|
2466
|
+
? theme.fg("error", c.label)
|
|
2467
|
+
: theme.fg("muted", c.label);
|
|
2468
|
+
lines.push(`${icon} ${label}`);
|
|
2469
|
+
}
|
|
2470
|
+
if (children.length > 12) {
|
|
2471
|
+
lines.push(theme.fg("muted", `… ${children.length - 12} more`));
|
|
2472
|
+
}
|
|
2473
|
+
} else {
|
|
2474
|
+
// Fall back to raw text
|
|
2475
|
+
lines.push(...text.split("\n").slice(0, 15));
|
|
2476
|
+
}
|
|
2477
|
+
|
|
2478
|
+
const merged = d?.merged ?? children.filter(c => c.status === "completed").length;
|
|
2479
|
+
const failed = d?.failed ?? children.filter(c => c.status === "failed").length;
|
|
2480
|
+
const total = children.length || "?";
|
|
2481
|
+
const dur = d?.duration != null ? ` · ${(d.duration / 1000).toFixed(0)}s` : "";
|
|
2482
|
+
const footer = failed > 0
|
|
2483
|
+
? `${merged}/${total} merged ${theme.fg("error", `${failed} failed`)}${dur}`
|
|
2484
|
+
: `${merged}/${total} merged${dur}`;
|
|
2485
|
+
|
|
2486
|
+
return sciExpanded(lines, footer, theme);
|
|
2487
|
+
}
|
|
2488
|
+
|
|
2489
|
+
// Collapsed
|
|
2490
|
+
const firstLine = text.split("\n")[0];
|
|
2491
|
+
if (isError) {
|
|
2492
|
+
return sciErr("✕ " + firstLine.slice(0, 70), theme);
|
|
2493
|
+
} else if (hasConflicts) {
|
|
2494
|
+
return sciOk("⚠ " + firstLine.slice(0, 70), theme);
|
|
2495
|
+
} else {
|
|
2496
|
+
return sciOk("✓ " + firstLine.slice(0, 70), theme);
|
|
2497
|
+
}
|
|
2498
|
+
},
|
|
2499
|
+
|
|
2500
|
+
async execute(_toolCallId, params, signal, onUpdate, ctx) {
|
|
2501
|
+
// Parse the plan
|
|
2502
|
+
emitCleaveState(pi, "assessing");
|
|
2503
|
+
|
|
2504
|
+
let plan: SplitPlan;
|
|
2505
|
+
try {
|
|
2506
|
+
plan = parsePlanResponse(params.plan_json);
|
|
2507
|
+
} catch (e: any) {
|
|
2508
|
+
emitCleaveState(pi, "failed");
|
|
2509
|
+
throw new Error(`Invalid split plan: ${e.message}`);
|
|
2510
|
+
}
|
|
2511
|
+
|
|
2512
|
+
const repoPath = ctx.cwd;
|
|
2513
|
+
const maxParallel = params.max_parallel ?? DEFAULT_CONFIG.maxParallel;
|
|
2514
|
+
const preferLocal = params.prefer_local ?? DEFAULT_CONFIG.preferLocal;
|
|
2515
|
+
|
|
2516
|
+
emitCleaveState(pi, "planning");
|
|
2517
|
+
|
|
2518
|
+
// ── OPENSPEC CONTEXT ───────────────────────────────────────
|
|
2519
|
+
let openspecCtx: OpenSpecContext | null = null;
|
|
2520
|
+
if (params.openspec_change_path) {
|
|
2521
|
+
try {
|
|
2522
|
+
openspecCtx = buildOpenSpecContext(params.openspec_change_path);
|
|
2523
|
+
} catch {
|
|
2524
|
+
// Non-fatal — proceed without enrichment
|
|
2525
|
+
}
|
|
2526
|
+
}
|
|
2527
|
+
|
|
2528
|
+
// ── SKILL MATCHING ─────────────────────────────────────────
|
|
2529
|
+
// Initialize skills on children (parsePlanResponse may not set them)
|
|
2530
|
+
for (const child of plan.children) {
|
|
2531
|
+
child.skills = child.skills ?? [];
|
|
2532
|
+
}
|
|
2533
|
+
|
|
2534
|
+
// Auto-match skills from scope patterns for children without annotations
|
|
2535
|
+
matchSkillsToAllChildren(plan.children);
|
|
2536
|
+
|
|
2537
|
+
// Resolve skill names to absolute SKILL.md paths
|
|
2538
|
+
const allSkillNames = new Set(plan.children.flatMap((c) => c.skills));
|
|
2539
|
+
const { resolved: resolvedPaths } = resolveSkillPaths([...allSkillNames]);
|
|
2540
|
+
|
|
2541
|
+
// Build per-child skill directive map
|
|
2542
|
+
const resolvedSkillMap = new Map<number, SkillDirective[]>();
|
|
2543
|
+
for (let i = 0; i < plan.children.length; i++) {
|
|
2544
|
+
const child = plan.children[i];
|
|
2545
|
+
const directives: SkillDirective[] = [];
|
|
2546
|
+
for (const skillName of child.skills) {
|
|
2547
|
+
const found = resolvedPaths.find((r) => r.skill === skillName);
|
|
2548
|
+
if (found) {
|
|
2549
|
+
directives.push({ skill: found.skill, path: found.path });
|
|
2550
|
+
}
|
|
2551
|
+
}
|
|
2552
|
+
resolvedSkillMap.set(i, directives);
|
|
2553
|
+
}
|
|
2554
|
+
|
|
2555
|
+
// ── PREFLIGHT ──────────────────────────────────────────────
|
|
2556
|
+
const toolUi = (ctx as {
|
|
2557
|
+
ui?: {
|
|
2558
|
+
input?: (prompt: string, initial?: string) => Promise<string | undefined>;
|
|
2559
|
+
select?: (title: string, options: string[]) => Promise<string | undefined>;
|
|
2560
|
+
};
|
|
2561
|
+
}).ui;
|
|
2562
|
+
const preflightOutcome = await runDirtyTreePreflight(pi, {
|
|
2563
|
+
repoPath,
|
|
2564
|
+
openspecChangePath: params.openspec_change_path,
|
|
2565
|
+
onUpdate,
|
|
2566
|
+
ui: (toolUi?.input || toolUi?.select) ? {
|
|
2567
|
+
...(typeof toolUi.input === "function" ? { input: toolUi.input.bind(toolUi) } : {}),
|
|
2568
|
+
...(typeof toolUi.select === "function" ? { select: toolUi.select.bind(toolUi) } : {}),
|
|
2569
|
+
} : undefined,
|
|
2570
|
+
});
|
|
2571
|
+
if (preflightOutcome === "skip_cleave") {
|
|
2572
|
+
const message = "Dirty-tree preflight resolved to proceed without cleave. Worktree creation and dispatch were skipped.";
|
|
2573
|
+
emitCleaveState(pi, "idle");
|
|
2574
|
+
return {
|
|
2575
|
+
content: [{ type: "text", text: message }],
|
|
2576
|
+
details: { phase: "preflight", skipped: true, reason: "proceed_without_cleave" },
|
|
2577
|
+
};
|
|
2578
|
+
}
|
|
2579
|
+
if (preflightOutcome === "cancelled") {
|
|
2580
|
+
emitCleaveState(pi, "idle");
|
|
2581
|
+
return {
|
|
2582
|
+
content: [{ type: "text", text: "Cleave cancelled during dirty-tree preflight." }],
|
|
2583
|
+
details: { phase: "preflight", cancelled: true },
|
|
2584
|
+
};
|
|
2585
|
+
}
|
|
2586
|
+
await ensureCleanWorktree(pi, repoPath);
|
|
2587
|
+
const baseBranch = await getCurrentBranch(pi, repoPath);
|
|
2588
|
+
|
|
2589
|
+
// ── MODEL RESOLUTION ───────────────────────────────────────
|
|
2590
|
+
// Determine local model availability (needed for model resolution)
|
|
2591
|
+
let localModelAvailable = false;
|
|
2592
|
+
let localModel: string | undefined;
|
|
2593
|
+
if (preferLocal) {
|
|
2594
|
+
try {
|
|
2595
|
+
const ollamaResult = await pi.exec("ollama", ["list", "--json"], { timeout: 5_000 });
|
|
2596
|
+
if (ollamaResult.code === 0) {
|
|
2597
|
+
const models = JSON.parse(ollamaResult.stdout);
|
|
2598
|
+
if (Array.isArray(models?.models) && models.models.length > 0) {
|
|
2599
|
+
// Prefer code-optimised models for leaf tasks; fall back in order
|
|
2600
|
+
const available = models.models.map((m: { name: string }) => m.name);
|
|
2601
|
+
// Code-biased preference from shared registry (extensions/lib/local-models.ts)
|
|
2602
|
+
const { PREFERRED_ORDER_CODE: preferredCodeModels } = await import("../lib/local-models.ts");
|
|
2603
|
+
localModel =
|
|
2604
|
+
preferredCodeModels.find((id) => available.includes(id)) ?? available[0];
|
|
2605
|
+
localModelAvailable = true;
|
|
2606
|
+
}
|
|
2607
|
+
}
|
|
2608
|
+
} catch {
|
|
2609
|
+
// No local model available
|
|
2610
|
+
}
|
|
2611
|
+
}
|
|
2612
|
+
|
|
2613
|
+
// Resolve execute model for each child
|
|
2614
|
+
for (const child of plan.children) {
|
|
2615
|
+
child.executeModel = resolveExecuteModel(
|
|
2616
|
+
child,
|
|
2617
|
+
preferLocal,
|
|
2618
|
+
localModelAvailable,
|
|
2619
|
+
getPreferredTier,
|
|
2620
|
+
);
|
|
2621
|
+
}
|
|
2622
|
+
|
|
2623
|
+
// ── INITIALIZE STATE ───────────────────────────────────────
|
|
2624
|
+
const state: CleaveState = {
|
|
2625
|
+
runId: generateRunId(),
|
|
2626
|
+
phase: "dispatch",
|
|
2627
|
+
directive: params.directive,
|
|
2628
|
+
repoPath,
|
|
2629
|
+
baseBranch,
|
|
2630
|
+
assessment: assessDirective(params.directive),
|
|
2631
|
+
plan,
|
|
2632
|
+
children: plan.children.map((c, i) => ({
|
|
2633
|
+
childId: i,
|
|
2634
|
+
label: c.label,
|
|
2635
|
+
dependsOn: c.dependsOn,
|
|
2636
|
+
status: "pending" as const,
|
|
2637
|
+
branch: `cleave/${i}-${c.label}`,
|
|
2638
|
+
backend: c.executeModel === "local" ? "local" as const : "cloud" as const,
|
|
2639
|
+
executeModel: c.executeModel,
|
|
2640
|
+
})),
|
|
2641
|
+
workspacePath: "",
|
|
2642
|
+
totalDurationSec: 0,
|
|
2643
|
+
createdAt: new Date().toISOString(),
|
|
2644
|
+
};
|
|
2645
|
+
|
|
2646
|
+
// Create workspace — pass OpenSpec context and resolved skills to enrich child task files
|
|
2647
|
+
const wsPath = initWorkspace(state, plan, repoPath, openspecCtx, resolvedSkillMap);
|
|
2648
|
+
state.workspacePath = wsPath;
|
|
2649
|
+
|
|
2650
|
+
// ── CREATE WORKTREES ───────────────────────────────────────
|
|
2651
|
+
onUpdate?.({
|
|
2652
|
+
content: [{ type: "text", text: "Creating git worktrees..." }],
|
|
2653
|
+
details: { phase: "dispatch", children: state.children },
|
|
2654
|
+
});
|
|
2655
|
+
|
|
2656
|
+
for (const child of state.children) {
|
|
2657
|
+
try {
|
|
2658
|
+
const wt = await createWorktree(pi, repoPath, child.label, child.childId, baseBranch);
|
|
2659
|
+
child.worktreePath = wt.path;
|
|
2660
|
+
child.branch = wt.branch;
|
|
2661
|
+
} catch (e: any) {
|
|
2662
|
+
child.status = "failed";
|
|
2663
|
+
child.error = `Worktree creation failed: ${e.message}`;
|
|
2664
|
+
}
|
|
2665
|
+
}
|
|
2666
|
+
|
|
2667
|
+
saveState(state);
|
|
2668
|
+
|
|
2669
|
+
// ── DISPATCH ───────────────────────────────────────────────
|
|
2670
|
+
// localModel was already resolved in MODEL RESOLUTION section above
|
|
2671
|
+
|
|
2672
|
+
emitCleaveState(pi, "dispatching", state.runId, state.children);
|
|
2673
|
+
|
|
2674
|
+
onUpdate?.({
|
|
2675
|
+
content: [{ type: "text", text: `Dispatching ${state.children.length} children...` }],
|
|
2676
|
+
details: { phase: "dispatch", children: state.children },
|
|
2677
|
+
});
|
|
2678
|
+
|
|
2679
|
+
// ── REVIEW CONFIG ──────────────────────────────────────
|
|
2680
|
+
const reviewConfig: ReviewConfig = {
|
|
2681
|
+
enabled: params.review ?? DEFAULT_REVIEW_CONFIG.enabled,
|
|
2682
|
+
maxWarningFixes: params.review_max_warning_fixes ?? DEFAULT_REVIEW_CONFIG.maxWarningFixes,
|
|
2683
|
+
maxCriticalFixes: params.review_max_critical_fixes ?? DEFAULT_REVIEW_CONFIG.maxCriticalFixes,
|
|
2684
|
+
churnThreshold: params.review_churn_threshold ?? DEFAULT_REVIEW_CONFIG.churnThreshold,
|
|
2685
|
+
};
|
|
2686
|
+
|
|
2687
|
+
await dispatchChildren(
|
|
2688
|
+
pi,
|
|
2689
|
+
state,
|
|
2690
|
+
maxParallel,
|
|
2691
|
+
120 * 60 * 1000, // 2 hour timeout per child
|
|
2692
|
+
localModel,
|
|
2693
|
+
signal ?? undefined,
|
|
2694
|
+
(msg) => {
|
|
2695
|
+
onUpdate?.({
|
|
2696
|
+
content: [{ type: "text", text: msg }],
|
|
2697
|
+
details: { phase: "dispatch", children: state.children },
|
|
2698
|
+
});
|
|
2699
|
+
},
|
|
2700
|
+
reviewConfig,
|
|
2701
|
+
);
|
|
2702
|
+
|
|
2703
|
+
// ── HARVEST + CONFLICTS ────────────────────────────────────
|
|
2704
|
+
emitCleaveState(pi, "merging", state.runId, state.children);
|
|
2705
|
+
|
|
2706
|
+
state.phase = "harvest";
|
|
2707
|
+
saveState(state);
|
|
2708
|
+
|
|
2709
|
+
const taskContents = readTaskFiles(wsPath);
|
|
2710
|
+
const taskResults = [...taskContents.entries()].map(([id, content]) =>
|
|
2711
|
+
parseTaskResult(content, `${id}-task.md`),
|
|
2712
|
+
);
|
|
2713
|
+
const conflicts = detectConflicts(taskResults);
|
|
2714
|
+
const cleaveCandidates = taskResults.flatMap((result) =>
|
|
2715
|
+
result.summary
|
|
2716
|
+
? emitResolvedBugCandidate(result.summary, result.path)
|
|
2717
|
+
: [],
|
|
2718
|
+
);
|
|
2719
|
+
|
|
2720
|
+
// ── MERGE ──────────────────────────────────────────────────
|
|
2721
|
+
state.phase = "reunify";
|
|
2722
|
+
saveState(state);
|
|
2723
|
+
|
|
2724
|
+
const completedChildren = state.children.filter((c) => c.status === "completed");
|
|
2725
|
+
const mergeResults: Array<{ label: string; branch: string; success: boolean; conflicts: string[] }> = [];
|
|
2726
|
+
|
|
2727
|
+
for (const child of completedChildren) {
|
|
2728
|
+
const result = await mergeBranch(pi, repoPath, child.branch, baseBranch);
|
|
2729
|
+
mergeResults.push({
|
|
2730
|
+
label: child.label,
|
|
2731
|
+
branch: child.branch,
|
|
2732
|
+
success: result.success,
|
|
2733
|
+
conflicts: result.conflictFiles,
|
|
2734
|
+
});
|
|
2735
|
+
// On merge failure, stop merging further children to avoid
|
|
2736
|
+
// compounding a partially-merged state
|
|
2737
|
+
if (!result.success) break;
|
|
2738
|
+
}
|
|
2739
|
+
|
|
2740
|
+
// ── CLEANUP ────────────────────────────────────────────────
|
|
2741
|
+
// Only clean up worktrees if all merges succeeded. On merge
|
|
2742
|
+
// failure, preserve branches so the user can manually resolve.
|
|
2743
|
+
const mergeFailures = mergeResults.filter((m) => !m.success);
|
|
2744
|
+
|
|
2745
|
+
// ── TASK WRITE-BACK ────────────────────────────────────────
|
|
2746
|
+
// Mark completed child tasks as [x] done in OpenSpec tasks.md
|
|
2747
|
+
let writeBackResult: { updated: number; totalTasks: number; allDone: boolean; unmatchedLabels: string[] } | null = null;
|
|
2748
|
+
if (params.openspec_change_path && mergeFailures.length === 0) {
|
|
2749
|
+
const completedLabels = state.children
|
|
2750
|
+
.filter((c) => c.status === "completed")
|
|
2751
|
+
.map((c) => c.label);
|
|
2752
|
+
try {
|
|
2753
|
+
writeBackResult = writeBackTaskCompletion(params.openspec_change_path, completedLabels);
|
|
2754
|
+
emitOpenSpecState(repoPath, pi);
|
|
2755
|
+
} catch {
|
|
2756
|
+
// Non-fatal — report will note write-back wasn't possible
|
|
2757
|
+
}
|
|
2758
|
+
}
|
|
2759
|
+
|
|
2760
|
+
// ── SPEC VERIFICATION ──────────────────────────────────────
|
|
2761
|
+
// If OpenSpec specs exist, check implementation against scenarios
|
|
2762
|
+
let specVerification: string | null = null;
|
|
2763
|
+
if (openspecCtx && openspecCtx.specScenarios.length > 0 && mergeFailures.length === 0) {
|
|
2764
|
+
specVerification = formatSpecVerification(openspecCtx);
|
|
2765
|
+
}
|
|
2766
|
+
// ── POST-MERGE GUARDRAILS ──────────────────────────────────
|
|
2767
|
+
let guardrailReport: string | null = null;
|
|
2768
|
+
if (mergeFailures.length === 0) {
|
|
2769
|
+
try {
|
|
2770
|
+
const checks = discoverGuardrails(repoPath);
|
|
2771
|
+
if (checks.length > 0) {
|
|
2772
|
+
const suite = runGuardrails(repoPath, checks);
|
|
2773
|
+
if (suite.allPassed) {
|
|
2774
|
+
guardrailReport = "### Static Analysis\n\n✅ All deterministic checks passed after merge";
|
|
2775
|
+
} else {
|
|
2776
|
+
const failures = suite.results.filter((r) => !r.passed);
|
|
2777
|
+
const lines = ["### Static Analysis", "", "⚠ **Post-merge regressions detected**", ""];
|
|
2778
|
+
for (const f of failures) {
|
|
2779
|
+
const capped = f.output.split("\n").slice(0, 20).join("\n");
|
|
2780
|
+
lines.push(`**${f.check.name}** (exit ${f.exitCode}, ${f.durationMs}ms):`);
|
|
2781
|
+
lines.push("```", capped, "```", "");
|
|
2782
|
+
}
|
|
2783
|
+
guardrailReport = lines.join("\n");
|
|
2784
|
+
}
|
|
2785
|
+
}
|
|
2786
|
+
} catch { /* non-fatal */ }
|
|
2787
|
+
}
|
|
2788
|
+
|
|
2789
|
+
if (cleaveCandidates.length > 0) {
|
|
2790
|
+
(sharedState.lifecycleCandidateQueue ??= []).push({
|
|
2791
|
+
source: "cleave",
|
|
2792
|
+
context: `cleave run ${state.runId} final outcomes`,
|
|
2793
|
+
candidates: cleaveCandidates,
|
|
2794
|
+
});
|
|
2795
|
+
}
|
|
2796
|
+
|
|
2797
|
+
if (mergeResults.length > 0 && mergeFailures.length === 0) {
|
|
2798
|
+
// All merges succeeded — safe to clean up worktrees and branches
|
|
2799
|
+
await cleanupWorktrees(pi, repoPath);
|
|
2800
|
+
} else if (mergeResults.length === 0) {
|
|
2801
|
+
// No merges attempted (e.g., all children misclassified or failed).
|
|
2802
|
+
// Preserve branches — they may contain committed work.
|
|
2803
|
+
// Only prune worktree directories to reclaim disk space.
|
|
2804
|
+
await pruneWorktreeDirs(pi, repoPath);
|
|
2805
|
+
} else {
|
|
2806
|
+
// Partial merge failure — preserve branches for manual resolution
|
|
2807
|
+
await pruneWorktreeDirs(pi, repoPath);
|
|
2808
|
+
}
|
|
2809
|
+
|
|
2810
|
+
// ── REPORT ─────────────────────────────────────────────────
|
|
2811
|
+
state.phase = "complete";
|
|
2812
|
+
state.completedAt = new Date().toISOString();
|
|
2813
|
+
state.totalDurationSec = Math.round(
|
|
2814
|
+
(new Date(state.completedAt).getTime() - new Date(state.createdAt).getTime()) / 1000,
|
|
2815
|
+
);
|
|
2816
|
+
|
|
2817
|
+
const allOk =
|
|
2818
|
+
state.children.every((c) => c.status === "completed") &&
|
|
2819
|
+
mergeResults.every((m) => m.success) &&
|
|
2820
|
+
conflicts.length === 0;
|
|
2821
|
+
|
|
2822
|
+
if (!allOk) state.phase = "failed";
|
|
2823
|
+
emitCleaveState(pi, allOk ? "done" : "failed", state.runId, state.children);
|
|
2824
|
+
saveState(state);
|
|
2825
|
+
|
|
2826
|
+
// Build report
|
|
2827
|
+
const completedCount = state.children.filter((c) => c.status === "completed").length;
|
|
2828
|
+
const failedCount = state.children.filter((c) => c.status === "failed").length;
|
|
2829
|
+
|
|
2830
|
+
const reportLines = [
|
|
2831
|
+
`## Cleave Report: ${state.runId}`,
|
|
2832
|
+
"",
|
|
2833
|
+
`**Directive:** ${params.directive}`,
|
|
2834
|
+
`**Status:** ${allOk ? "✓ SUCCESS" : "✗ ISSUES DETECTED"}`,
|
|
2835
|
+
`**Children:** ${completedCount} completed, ${failedCount} failed of ${state.children.length}`,
|
|
2836
|
+
`**Duration:** ${state.totalDurationSec}s`,
|
|
2837
|
+
`**Workspace:** \`${wsPath}\``,
|
|
2838
|
+
"",
|
|
2839
|
+
];
|
|
2840
|
+
|
|
2841
|
+
// Child details
|
|
2842
|
+
for (const child of state.children) {
|
|
2843
|
+
const icon = child.status === "completed" ? "✓" : child.status === "failed" ? "✗" : "⏳";
|
|
2844
|
+
const dur = child.durationSec ? ` (${child.durationSec}s)` : "";
|
|
2845
|
+
const reviewNote = child.reviewIterations && child.reviewIterations > 0
|
|
2846
|
+
? ` [${child.reviewIterations} review${child.reviewIterations > 1 ? "s" : ""}: ${child.reviewDecision}]`
|
|
2847
|
+
: "";
|
|
2848
|
+
reportLines.push(` ${icon} **${child.label}** [${child.backend ?? "cloud"}]${dur}: ${child.status}${reviewNote}`);
|
|
2849
|
+
if (child.error) reportLines.push(` Error: ${child.error}`);
|
|
2850
|
+
if (child.reviewEscalationReason) reportLines.push(` Review: ${child.reviewEscalationReason}`);
|
|
2851
|
+
}
|
|
2852
|
+
|
|
2853
|
+
// Conflicts
|
|
2854
|
+
if (conflicts.length > 0) {
|
|
2855
|
+
reportLines.push("", "### Conflicts", "", formatConflicts(conflicts));
|
|
2856
|
+
}
|
|
2857
|
+
|
|
2858
|
+
// Merge results (always show — makes partial merge state explicit)
|
|
2859
|
+
if (mergeResults.length > 0) {
|
|
2860
|
+
reportLines.push("", "### Merge Results");
|
|
2861
|
+
const mergeSuccesses = mergeResults.filter((m) => m.success);
|
|
2862
|
+
const notAttempted = completedChildren
|
|
2863
|
+
.filter((c) => !mergeResults.some((m) => m.label === c.label))
|
|
2864
|
+
.map((c) => c.label);
|
|
2865
|
+
for (const m of mergeSuccesses) {
|
|
2866
|
+
reportLines.push(` ✓ ${m.label} merged`);
|
|
2867
|
+
}
|
|
2868
|
+
for (const m of mergeFailures) {
|
|
2869
|
+
reportLines.push(` ✗ ${m.label}: conflicts in ${m.conflicts.join(", ")}`);
|
|
2870
|
+
}
|
|
2871
|
+
for (const label of notAttempted) {
|
|
2872
|
+
reportLines.push(` ⏭ ${label}: skipped (earlier merge failed)`);
|
|
2873
|
+
}
|
|
2874
|
+
}
|
|
2875
|
+
|
|
2876
|
+
// Spec verification (post-merge)
|
|
2877
|
+
if (specVerification) {
|
|
2878
|
+
reportLines.push("", specVerification);
|
|
2879
|
+
}
|
|
2880
|
+
|
|
2881
|
+
// Post-merge guardrail results
|
|
2882
|
+
if (guardrailReport) {
|
|
2883
|
+
reportLines.push("", guardrailReport);
|
|
2884
|
+
}
|
|
2885
|
+
|
|
2886
|
+
// Task write-back status
|
|
2887
|
+
if (writeBackResult && writeBackResult.updated > 0) {
|
|
2888
|
+
reportLines.push(
|
|
2889
|
+
"",
|
|
2890
|
+
"### Task Write-Back",
|
|
2891
|
+
` ✓ Marked ${writeBackResult.updated} tasks as done in \`tasks.md\``,
|
|
2892
|
+
);
|
|
2893
|
+
}
|
|
2894
|
+
if (writeBackResult && writeBackResult.unmatchedLabels.length > 0) {
|
|
2895
|
+
reportLines.push(
|
|
2896
|
+
"",
|
|
2897
|
+
"### Lifecycle Reconciliation Warning",
|
|
2898
|
+
" ⚠ Completed cleave work could not be mapped back into `tasks.md` for:",
|
|
2899
|
+
...writeBackResult.unmatchedLabels.map((label) => ` - ${label}`),
|
|
2900
|
+
"",
|
|
2901
|
+
" tasks.md no longer matches the implementation plan. Reconcile the OpenSpec task groups before archive.",
|
|
2902
|
+
);
|
|
2903
|
+
}
|
|
2904
|
+
|
|
2905
|
+
// Next steps guidance
|
|
2906
|
+
if (allOk && params.openspec_change_path) {
|
|
2907
|
+
reportLines.push("", "### Next Steps");
|
|
2908
|
+
if (writeBackResult?.allDone) {
|
|
2909
|
+
reportLines.push(
|
|
2910
|
+
" All tasks complete. Ready to finalize:",
|
|
2911
|
+
" 1. Run `/assess spec` to validate implementation against spec scenarios",
|
|
2912
|
+
" 2. Run `/opsx:verify` for full verification",
|
|
2913
|
+
" 3. Run `/opsx:archive` to merge delta specs and close the change",
|
|
2914
|
+
);
|
|
2915
|
+
} else {
|
|
2916
|
+
reportLines.push(
|
|
2917
|
+
" Some tasks remain. Continue with:",
|
|
2918
|
+
" 1. Run `/opsx:apply` to work on remaining tasks",
|
|
2919
|
+
" 2. Or run `/cleave` again targeting the unfinished groups",
|
|
2920
|
+
);
|
|
2921
|
+
}
|
|
2922
|
+
} else if (allOk && !params.openspec_change_path) {
|
|
2923
|
+
reportLines.push(
|
|
2924
|
+
"",
|
|
2925
|
+
"### Next Steps",
|
|
2926
|
+
" Run tests and review the merged changes.",
|
|
2927
|
+
);
|
|
2928
|
+
}
|
|
2929
|
+
|
|
2930
|
+
const rawReport = reportLines.join("\n");
|
|
2931
|
+
const truncation = truncateTail(rawReport, {
|
|
2932
|
+
maxLines: DEFAULT_MAX_LINES,
|
|
2933
|
+
maxBytes: DEFAULT_MAX_BYTES,
|
|
2934
|
+
});
|
|
2935
|
+
let report = truncation.content;
|
|
2936
|
+
if (truncation.truncated) {
|
|
2937
|
+
report += `\n\n[Report truncated: ${truncation.outputLines} of ${truncation.totalLines} lines` +
|
|
2938
|
+
` (${formatSize(truncation.outputBytes)} of ${formatSize(truncation.totalBytes)})]`;
|
|
2939
|
+
}
|
|
2940
|
+
|
|
2941
|
+
return {
|
|
2942
|
+
content: [{ type: "text", text: report }],
|
|
2943
|
+
details: {
|
|
2944
|
+
runId: state.runId,
|
|
2945
|
+
success: allOk,
|
|
2946
|
+
childrenCompleted: completedCount,
|
|
2947
|
+
childrenFailed: failedCount,
|
|
2948
|
+
conflictsFound: conflicts.length,
|
|
2949
|
+
mergeFailures: mergeFailures.length,
|
|
2950
|
+
workspacePath: wsPath,
|
|
2951
|
+
},
|
|
2952
|
+
};
|
|
2953
|
+
},
|
|
2954
|
+
});
|
|
2955
|
+
|
|
2956
|
+
// ─── /cleave inspect ──────────────────────────────────────────────────────
|
|
2957
|
+
|
|
2958
|
+
/**
|
|
2959
|
+
* Run `git diff --stat` + `git status --short` in a worktree.
|
|
2960
|
+
* Returns a formatted string or an empty string if no changes / no worktree.
|
|
2961
|
+
* Result is cached for 2s to avoid subprocess spam on repeated renders.
|
|
2962
|
+
*/
|
|
2963
|
+
const execFileAsync = promisify(execFile);
|
|
2964
|
+
const diffCache = new Map<string, { ts: number; result: string }>();
|
|
2965
|
+
|
|
2966
|
+
async function runGitDiff(worktreePath: string): Promise<string> {
|
|
2967
|
+
const cached = diffCache.get(worktreePath);
|
|
2968
|
+
if (cached && Date.now() - cached.ts < 2000) return cached.result;
|
|
2969
|
+
try {
|
|
2970
|
+
const [stat, status] = await Promise.all([
|
|
2971
|
+
execFileAsync("git", ["diff", "--stat", "HEAD"], { cwd: worktreePath }).then(r => r.stdout.trim()).catch(() => ""),
|
|
2972
|
+
execFileAsync("git", ["status", "--short"], { cwd: worktreePath }).then(r => r.stdout.trim()).catch(() => ""),
|
|
2973
|
+
]);
|
|
2974
|
+
const result = [stat, status].filter(Boolean).join("\n") || "(no changes yet)";
|
|
2975
|
+
diffCache.set(worktreePath, { ts: Date.now(), result });
|
|
2976
|
+
return result;
|
|
2977
|
+
} catch {
|
|
2978
|
+
return "(git unavailable)";
|
|
2979
|
+
}
|
|
2980
|
+
}
|
|
2981
|
+
|
|
2982
|
+
/** Open the inspect overlay for a specific child index. */
|
|
2983
|
+
async function openInspectOverlay(childIdx: number, ctx: ExtensionCommandContext): Promise<void> {
|
|
2984
|
+
const cl = (sharedState as any).cleave;
|
|
2985
|
+
const children: any[] = cl?.children ?? [];
|
|
2986
|
+
|
|
2987
|
+
let selected = Math.max(0, Math.min(childIdx, children.length - 1));
|
|
2988
|
+
let diffText = children.length === 0 ? "" : "(loading…)";
|
|
2989
|
+
let diffLoaded = false;
|
|
2990
|
+
|
|
2991
|
+
const loadDiff = async () => {
|
|
2992
|
+
const child = children[selected];
|
|
2993
|
+
if (!child?.worktreePath) { diffText = "(no worktree assigned)"; diffLoaded = true; return; }
|
|
2994
|
+
diffText = await runGitDiff(child.worktreePath);
|
|
2995
|
+
diffLoaded = true;
|
|
2996
|
+
};
|
|
2997
|
+
// Load diff async — don't block overlay open
|
|
2998
|
+
loadDiff();
|
|
2999
|
+
|
|
3000
|
+
await ctx.ui.custom((tui, theme, _kb, done) => {
|
|
3001
|
+
const component = {
|
|
3002
|
+
render(width: number): string[] {
|
|
3003
|
+
const lines: string[] = [];
|
|
3004
|
+
const child = children[selected];
|
|
3005
|
+
const divFill = (label: string) => {
|
|
3006
|
+
const prefix = ` ── ${label} `;
|
|
3007
|
+
return theme.fg("dim", prefix + "─".repeat(Math.max(0, width - prefix.length)));
|
|
3008
|
+
};
|
|
3009
|
+
|
|
3010
|
+
// ── Header ──────────────────────────────────────────────
|
|
3011
|
+
const headerLabel = ` ⚡ cleave inspect `;
|
|
3012
|
+
const headerFill = Math.max(0, width - headerLabel.length - 2);
|
|
3013
|
+
lines.push(
|
|
3014
|
+
theme.fg("dim", "──") +
|
|
3015
|
+
theme.fg("accent", headerLabel) +
|
|
3016
|
+
theme.fg("dim", "─".repeat(headerFill)),
|
|
3017
|
+
);
|
|
3018
|
+
|
|
3019
|
+
if (children.length === 0) {
|
|
3020
|
+
lines.push("");
|
|
3021
|
+
lines.push(theme.fg("muted", " No active cleave children."));
|
|
3022
|
+
lines.push("");
|
|
3023
|
+
lines.push(theme.fg("dim", " q close"));
|
|
3024
|
+
return lines;
|
|
3025
|
+
}
|
|
3026
|
+
|
|
3027
|
+
// ── Child selector ───────────────────────────────────────
|
|
3028
|
+
lines.push("");
|
|
3029
|
+
for (let i = 0; i < children.length; i++) {
|
|
3030
|
+
const c = children[i];
|
|
3031
|
+
const icon =
|
|
3032
|
+
c.status === "done" ? theme.fg("success", "✓") :
|
|
3033
|
+
c.status === "failed" ? theme.fg("error", "✕") :
|
|
3034
|
+
c.status === "running" ? theme.fg("warning", "⟳") :
|
|
3035
|
+
theme.fg("dim", "○");
|
|
3036
|
+
const elapsedSec = c.startedAt ? Math.floor((Date.now() - c.startedAt) / 1000) : null;
|
|
3037
|
+
const elapsed = elapsedSec != null ? theme.fg("dim", ` ${elapsedSec}s`) : "";
|
|
3038
|
+
const label = i === selected
|
|
3039
|
+
? theme.bold(theme.fg("accent", `▶ ${c.label}`))
|
|
3040
|
+
: theme.fg("muted", ` ${c.label}`);
|
|
3041
|
+
lines.push(` ${icon} ${label}${elapsed}`);
|
|
3042
|
+
}
|
|
3043
|
+
|
|
3044
|
+
// ── Recent stdout ────────────────────────────────────────
|
|
3045
|
+
lines.push("");
|
|
3046
|
+
lines.push(divFill("activity"));
|
|
3047
|
+
const recent: string[] = child?.recentLines ?? [];
|
|
3048
|
+
if (recent.length === 0) {
|
|
3049
|
+
lines.push(theme.fg("dim", " (no output yet)"));
|
|
3050
|
+
} else {
|
|
3051
|
+
for (const l of recent.slice(-10)) {
|
|
3052
|
+
lines.push(theme.fg("muted", ` ${l.slice(0, width - 4)}`));
|
|
3053
|
+
}
|
|
3054
|
+
}
|
|
3055
|
+
|
|
3056
|
+
// ── Git diff ─────────────────────────────────────────────
|
|
3057
|
+
lines.push("");
|
|
3058
|
+
lines.push(divFill("worktree diff"));
|
|
3059
|
+
for (const l of diffText.split("\n").slice(0, 15)) {
|
|
3060
|
+
lines.push(theme.fg("muted", ` ${l.slice(0, width - 4)}`));
|
|
3061
|
+
}
|
|
3062
|
+
|
|
3063
|
+
// ── Footer ───────────────────────────────────────────────
|
|
3064
|
+
lines.push("");
|
|
3065
|
+
lines.push(theme.fg("dim", " ↑↓ select child · r refresh · q close"));
|
|
3066
|
+
return lines;
|
|
3067
|
+
},
|
|
3068
|
+
invalidate() {},
|
|
3069
|
+
onKey(key: string) {
|
|
3070
|
+
if (key === "escape" || key === "q") { done(undefined); return; }
|
|
3071
|
+
if (key === "up" || key === "k") {
|
|
3072
|
+
selected = Math.max(0, selected - 1);
|
|
3073
|
+
diffLoaded = false; diffText = "(loading…)";
|
|
3074
|
+
loadDiff().then(() => tui.requestRender());
|
|
3075
|
+
}
|
|
3076
|
+
if (key === "down" || key === "j") {
|
|
3077
|
+
selected = Math.min(children.length - 1, selected + 1);
|
|
3078
|
+
diffLoaded = false; diffText = "(loading…)";
|
|
3079
|
+
loadDiff().then(() => tui.requestRender());
|
|
3080
|
+
}
|
|
3081
|
+
if (key === "r") {
|
|
3082
|
+
const child = children[selected];
|
|
3083
|
+
if (child?.worktreePath) diffCache.delete(child.worktreePath);
|
|
3084
|
+
diffText = "(loading…)";
|
|
3085
|
+
loadDiff().then(() => tui.requestRender());
|
|
3086
|
+
}
|
|
3087
|
+
tui.requestRender();
|
|
3088
|
+
},
|
|
3089
|
+
};
|
|
3090
|
+
// Re-render when diff loads
|
|
3091
|
+
const diffPoll = setInterval(() => {
|
|
3092
|
+
if (diffLoaded) { clearInterval(diffPoll); tui.requestRender(); }
|
|
3093
|
+
}, 100);
|
|
3094
|
+
(component as any).dispose = () => clearInterval(diffPoll);
|
|
3095
|
+
return component;
|
|
3096
|
+
});
|
|
3097
|
+
}
|
|
3098
|
+
|
|
3099
|
+
pi.registerCommand("cleave inspect", {
|
|
3100
|
+
description: "Inspect a running cleave child — stdout ring buffer + live git diff",
|
|
3101
|
+
handler: async (_args, ctx) => {
|
|
3102
|
+
await openInspectOverlay(0, ctx);
|
|
3103
|
+
},
|
|
3104
|
+
});
|
|
3105
|
+
|
|
3106
|
+
// Register ctrl+i shortcut while any cleave run is active
|
|
3107
|
+
let inspectShortcutActive = false;
|
|
3108
|
+
pi.events.on(DASHBOARD_UPDATE_EVENT, (data: any) => {
|
|
3109
|
+
const cl = (sharedState as any).cleave;
|
|
3110
|
+
const isRunning = cl?.status === "running" || cl?.status === "dispatching";
|
|
3111
|
+
if (isRunning && !inspectShortcutActive) {
|
|
3112
|
+
inspectShortcutActive = true;
|
|
3113
|
+
pi.registerShortcut("ctrl+i", {
|
|
3114
|
+
description: "Inspect running cleave children",
|
|
3115
|
+
handler: async (ctx) => {
|
|
3116
|
+
await openInspectOverlay(0, ctx as any);
|
|
3117
|
+
},
|
|
3118
|
+
});
|
|
3119
|
+
}
|
|
3120
|
+
});
|
|
3121
|
+
}
|