opencode-swarm-plugin 0.44.0 → 0.44.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/swarm.serve.test.ts +6 -4
- package/bin/swarm.ts +16 -10
- package/dist/compaction-prompt-scoring.js +139 -0
- package/dist/eval-capture.js +12811 -0
- package/dist/hive.d.ts.map +1 -1
- package/dist/index.js +7644 -62599
- package/dist/plugin.js +23766 -78721
- package/dist/swarm-orchestrate.d.ts.map +1 -1
- package/dist/swarm-prompts.d.ts.map +1 -1
- package/dist/swarm-review.d.ts.map +1 -1
- package/package.json +17 -5
- package/.changeset/swarm-insights-data-layer.md +0 -63
- package/.hive/analysis/eval-failure-analysis-2025-12-25.md +0 -331
- package/.hive/analysis/session-data-quality-audit.md +0 -320
- package/.hive/eval-results.json +0 -483
- package/.hive/issues.jsonl +0 -138
- package/.hive/memories.jsonl +0 -729
- package/.opencode/eval-history.jsonl +0 -327
- package/.turbo/turbo-build.log +0 -9
- package/CHANGELOG.md +0 -2286
- package/SCORER-ANALYSIS.md +0 -598
- package/docs/analysis/subagent-coordination-patterns.md +0 -902
- package/docs/analysis-socratic-planner-pattern.md +0 -504
- package/docs/planning/ADR-001-monorepo-structure.md +0 -171
- package/docs/planning/ADR-002-package-extraction.md +0 -393
- package/docs/planning/ADR-003-performance-improvements.md +0 -451
- package/docs/planning/ADR-004-message-queue-features.md +0 -187
- package/docs/planning/ADR-005-devtools-observability.md +0 -202
- package/docs/planning/ADR-007-swarm-enhancements-worktree-review.md +0 -168
- package/docs/planning/ADR-008-worker-handoff-protocol.md +0 -293
- package/docs/planning/ADR-009-oh-my-opencode-patterns.md +0 -353
- package/docs/planning/ADR-010-cass-inhousing.md +0 -1215
- package/docs/planning/ROADMAP.md +0 -368
- package/docs/semantic-memory-cli-syntax.md +0 -123
- package/docs/swarm-mail-architecture.md +0 -1147
- package/docs/testing/context-recovery-test.md +0 -470
- package/evals/ARCHITECTURE.md +0 -1189
- package/evals/README.md +0 -768
- package/evals/compaction-prompt.eval.ts +0 -149
- package/evals/compaction-resumption.eval.ts +0 -289
- package/evals/coordinator-behavior.eval.ts +0 -307
- package/evals/coordinator-session.eval.ts +0 -154
- package/evals/evalite.config.ts.bak +0 -15
- package/evals/example.eval.ts +0 -31
- package/evals/fixtures/cass-baseline.ts +0 -217
- package/evals/fixtures/compaction-cases.ts +0 -350
- package/evals/fixtures/compaction-prompt-cases.ts +0 -311
- package/evals/fixtures/coordinator-sessions.ts +0 -328
- package/evals/fixtures/decomposition-cases.ts +0 -105
- package/evals/lib/compaction-loader.test.ts +0 -248
- package/evals/lib/compaction-loader.ts +0 -320
- package/evals/lib/data-loader.evalite-test.ts +0 -289
- package/evals/lib/data-loader.test.ts +0 -345
- package/evals/lib/data-loader.ts +0 -281
- package/evals/lib/llm.ts +0 -115
- package/evals/scorers/compaction-prompt-scorers.ts +0 -145
- package/evals/scorers/compaction-scorers.ts +0 -305
- package/evals/scorers/coordinator-discipline.evalite-test.ts +0 -539
- package/evals/scorers/coordinator-discipline.ts +0 -325
- package/evals/scorers/index.test.ts +0 -146
- package/evals/scorers/index.ts +0 -328
- package/evals/scorers/outcome-scorers.evalite-test.ts +0 -27
- package/evals/scorers/outcome-scorers.ts +0 -349
- package/evals/swarm-decomposition.eval.ts +0 -121
- package/examples/commands/swarm.md +0 -745
- package/examples/plugin-wrapper-template.ts +0 -2515
- package/examples/skills/hive-workflow/SKILL.md +0 -212
- package/examples/skills/skill-creator/SKILL.md +0 -223
- package/examples/skills/swarm-coordination/SKILL.md +0 -292
- package/global-skills/cli-builder/SKILL.md +0 -344
- package/global-skills/cli-builder/references/advanced-patterns.md +0 -244
- package/global-skills/learning-systems/SKILL.md +0 -644
- package/global-skills/skill-creator/LICENSE.txt +0 -202
- package/global-skills/skill-creator/SKILL.md +0 -352
- package/global-skills/skill-creator/references/output-patterns.md +0 -82
- package/global-skills/skill-creator/references/workflows.md +0 -28
- package/global-skills/swarm-coordination/SKILL.md +0 -995
- package/global-skills/swarm-coordination/references/coordinator-patterns.md +0 -235
- package/global-skills/swarm-coordination/references/strategies.md +0 -138
- package/global-skills/system-design/SKILL.md +0 -213
- package/global-skills/testing-patterns/SKILL.md +0 -430
- package/global-skills/testing-patterns/references/dependency-breaking-catalog.md +0 -586
- package/opencode-swarm-plugin-0.30.7.tgz +0 -0
- package/opencode-swarm-plugin-0.31.0.tgz +0 -0
- package/scripts/cleanup-test-memories.ts +0 -346
- package/scripts/init-skill.ts +0 -222
- package/scripts/migrate-unknown-sessions.ts +0 -349
- package/scripts/validate-skill.ts +0 -204
- package/src/agent-mail.ts +0 -1724
- package/src/anti-patterns.test.ts +0 -1167
- package/src/anti-patterns.ts +0 -448
- package/src/compaction-capture.integration.test.ts +0 -257
- package/src/compaction-hook.test.ts +0 -838
- package/src/compaction-hook.ts +0 -1204
- package/src/compaction-observability.integration.test.ts +0 -139
- package/src/compaction-observability.test.ts +0 -187
- package/src/compaction-observability.ts +0 -324
- package/src/compaction-prompt-scorers.test.ts +0 -475
- package/src/compaction-prompt-scoring.ts +0 -300
- package/src/contributor-tools.test.ts +0 -133
- package/src/contributor-tools.ts +0 -201
- package/src/dashboard.test.ts +0 -611
- package/src/dashboard.ts +0 -462
- package/src/error-enrichment.test.ts +0 -403
- package/src/error-enrichment.ts +0 -219
- package/src/eval-capture.test.ts +0 -1015
- package/src/eval-capture.ts +0 -929
- package/src/eval-gates.test.ts +0 -306
- package/src/eval-gates.ts +0 -218
- package/src/eval-history.test.ts +0 -508
- package/src/eval-history.ts +0 -214
- package/src/eval-learning.test.ts +0 -378
- package/src/eval-learning.ts +0 -360
- package/src/eval-runner.test.ts +0 -223
- package/src/eval-runner.ts +0 -402
- package/src/export-tools.test.ts +0 -476
- package/src/export-tools.ts +0 -257
- package/src/hive.integration.test.ts +0 -2241
- package/src/hive.ts +0 -1628
- package/src/index.ts +0 -940
- package/src/learning.integration.test.ts +0 -1815
- package/src/learning.ts +0 -1079
- package/src/logger.test.ts +0 -189
- package/src/logger.ts +0 -135
- package/src/mandate-promotion.test.ts +0 -473
- package/src/mandate-promotion.ts +0 -239
- package/src/mandate-storage.integration.test.ts +0 -601
- package/src/mandate-storage.test.ts +0 -578
- package/src/mandate-storage.ts +0 -794
- package/src/mandates.ts +0 -540
- package/src/memory-tools.test.ts +0 -195
- package/src/memory-tools.ts +0 -344
- package/src/memory.integration.test.ts +0 -334
- package/src/memory.test.ts +0 -158
- package/src/memory.ts +0 -527
- package/src/model-selection.test.ts +0 -188
- package/src/model-selection.ts +0 -68
- package/src/observability-tools.test.ts +0 -359
- package/src/observability-tools.ts +0 -871
- package/src/output-guardrails.test.ts +0 -438
- package/src/output-guardrails.ts +0 -381
- package/src/pattern-maturity.test.ts +0 -1160
- package/src/pattern-maturity.ts +0 -525
- package/src/planning-guardrails.test.ts +0 -491
- package/src/planning-guardrails.ts +0 -438
- package/src/plugin.ts +0 -23
- package/src/post-compaction-tracker.test.ts +0 -251
- package/src/post-compaction-tracker.ts +0 -237
- package/src/query-tools.test.ts +0 -636
- package/src/query-tools.ts +0 -324
- package/src/rate-limiter.integration.test.ts +0 -466
- package/src/rate-limiter.ts +0 -774
- package/src/replay-tools.test.ts +0 -496
- package/src/replay-tools.ts +0 -240
- package/src/repo-crawl.integration.test.ts +0 -441
- package/src/repo-crawl.ts +0 -610
- package/src/schemas/cell-events.test.ts +0 -347
- package/src/schemas/cell-events.ts +0 -807
- package/src/schemas/cell.ts +0 -257
- package/src/schemas/evaluation.ts +0 -166
- package/src/schemas/index.test.ts +0 -199
- package/src/schemas/index.ts +0 -286
- package/src/schemas/mandate.ts +0 -232
- package/src/schemas/swarm-context.ts +0 -115
- package/src/schemas/task.ts +0 -161
- package/src/schemas/worker-handoff.test.ts +0 -302
- package/src/schemas/worker-handoff.ts +0 -131
- package/src/sessions/agent-discovery.test.ts +0 -137
- package/src/sessions/agent-discovery.ts +0 -112
- package/src/sessions/index.ts +0 -15
- package/src/skills.integration.test.ts +0 -1192
- package/src/skills.test.ts +0 -643
- package/src/skills.ts +0 -1549
- package/src/storage.integration.test.ts +0 -341
- package/src/storage.ts +0 -884
- package/src/structured.integration.test.ts +0 -817
- package/src/structured.test.ts +0 -1046
- package/src/structured.ts +0 -762
- package/src/swarm-decompose.test.ts +0 -188
- package/src/swarm-decompose.ts +0 -1302
- package/src/swarm-deferred.integration.test.ts +0 -157
- package/src/swarm-deferred.test.ts +0 -38
- package/src/swarm-insights.test.ts +0 -214
- package/src/swarm-insights.ts +0 -459
- package/src/swarm-mail.integration.test.ts +0 -970
- package/src/swarm-mail.ts +0 -739
- package/src/swarm-orchestrate.integration.test.ts +0 -282
- package/src/swarm-orchestrate.test.ts +0 -548
- package/src/swarm-orchestrate.ts +0 -3084
- package/src/swarm-prompts.test.ts +0 -1270
- package/src/swarm-prompts.ts +0 -2077
- package/src/swarm-research.integration.test.ts +0 -701
- package/src/swarm-research.test.ts +0 -698
- package/src/swarm-research.ts +0 -472
- package/src/swarm-review.integration.test.ts +0 -285
- package/src/swarm-review.test.ts +0 -879
- package/src/swarm-review.ts +0 -709
- package/src/swarm-strategies.ts +0 -407
- package/src/swarm-worktree.test.ts +0 -501
- package/src/swarm-worktree.ts +0 -575
- package/src/swarm.integration.test.ts +0 -2377
- package/src/swarm.ts +0 -38
- package/src/tool-adapter.integration.test.ts +0 -1221
- package/src/tool-availability.ts +0 -461
- package/tsconfig.json +0 -28
package/src/swarm-orchestrate.ts
DELETED
|
@@ -1,3084 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Swarm Orchestrate Module - Status tracking and completion handling
|
|
3
|
-
*
|
|
4
|
-
* Handles swarm execution lifecycle:
|
|
5
|
-
* - Initialization and tool availability
|
|
6
|
-
* - Status tracking and progress reporting
|
|
7
|
-
* - Completion verification and gates
|
|
8
|
-
* - Error accumulation and 3-strike detection
|
|
9
|
-
* - Learning from outcomes
|
|
10
|
-
*
|
|
11
|
-
* Key responsibilities:
|
|
12
|
-
* - swarm_init - Check tools and discover skills
|
|
13
|
-
* - swarm_status - Query epic progress
|
|
14
|
-
* - swarm_progress - Report agent progress
|
|
15
|
-
* - swarm_complete - Verification gate and completion
|
|
16
|
-
* - swarm_record_outcome - Learning signals
|
|
17
|
-
* - swarm_broadcast - Mid-task context sharing
|
|
18
|
-
* - Error accumulation tools
|
|
19
|
-
* - 3-strike detection for architectural problems
|
|
20
|
-
*/
|
|
21
|
-
|
|
22
|
-
import { tool } from "@opencode-ai/plugin";
|
|
23
|
-
import { z } from "zod";
|
|
24
|
-
import { minimatch } from "minimatch";
|
|
25
|
-
import {
|
|
26
|
-
type AgentProgress,
|
|
27
|
-
AgentProgressSchema,
|
|
28
|
-
type Bead,
|
|
29
|
-
BeadSchema,
|
|
30
|
-
type Evaluation,
|
|
31
|
-
EvaluationSchema,
|
|
32
|
-
type SpawnedAgent,
|
|
33
|
-
type SwarmStatus,
|
|
34
|
-
SwarmStatusSchema,
|
|
35
|
-
} from "./schemas";
|
|
36
|
-
import {
|
|
37
|
-
type WorkerHandoff,
|
|
38
|
-
WorkerHandoffSchema,
|
|
39
|
-
} from "./schemas/worker-handoff";
|
|
40
|
-
import {
|
|
41
|
-
getSwarmInbox,
|
|
42
|
-
releaseSwarmFiles,
|
|
43
|
-
sendSwarmMessage,
|
|
44
|
-
getAgent,
|
|
45
|
-
createEvent,
|
|
46
|
-
appendEvent,
|
|
47
|
-
getSwarmMailLibSQL,
|
|
48
|
-
} from "swarm-mail";
|
|
49
|
-
import {
|
|
50
|
-
addStrike,
|
|
51
|
-
clearStrikes,
|
|
52
|
-
DEFAULT_LEARNING_CONFIG,
|
|
53
|
-
type DecompositionStrategy as LearningDecompositionStrategy,
|
|
54
|
-
ErrorAccumulator,
|
|
55
|
-
type ErrorType,
|
|
56
|
-
type FeedbackEvent,
|
|
57
|
-
formatMemoryStoreOn3Strike,
|
|
58
|
-
formatMemoryStoreOnSuccess,
|
|
59
|
-
getArchitecturePrompt,
|
|
60
|
-
getStrikes,
|
|
61
|
-
InMemoryStrikeStorage,
|
|
62
|
-
isStrikedOut,
|
|
63
|
-
type OutcomeSignals,
|
|
64
|
-
OutcomeSignalsSchema,
|
|
65
|
-
outcomeToFeedback,
|
|
66
|
-
type ScoredOutcome,
|
|
67
|
-
scoreImplicitFeedback,
|
|
68
|
-
type StrikeStorage,
|
|
69
|
-
} from "./learning";
|
|
70
|
-
import {
|
|
71
|
-
checkAllTools,
|
|
72
|
-
formatToolAvailability,
|
|
73
|
-
isToolAvailable,
|
|
74
|
-
warnMissingTool,
|
|
75
|
-
} from "./tool-availability";
|
|
76
|
-
import { getHiveAdapter, hive_sync, setHiveWorkingDirectory, getHiveWorkingDirectory } from "./hive";
|
|
77
|
-
import { listSkills } from "./skills";
|
|
78
|
-
import {
|
|
79
|
-
canUseWorktreeIsolation,
|
|
80
|
-
getStartCommit,
|
|
81
|
-
} from "./swarm-worktree";
|
|
82
|
-
import {
|
|
83
|
-
isReviewApproved,
|
|
84
|
-
getReviewStatus,
|
|
85
|
-
} from "./swarm-review";
|
|
86
|
-
import { captureCoordinatorEvent, type EvalRecord } from "./eval-capture.js";
|
|
87
|
-
import { formatResearcherPrompt } from "./swarm-prompts";
|
|
88
|
-
|
|
89
|
-
// ============================================================================
|
|
90
|
-
// Helper Functions
|
|
91
|
-
// ============================================================================
|
|
92
|
-
|
|
93
|
-
/**
|
|
94
|
-
* Generate a WorkerHandoff object from subtask parameters
|
|
95
|
-
*
|
|
96
|
-
* Creates a machine-readable contract that replaces prose instructions in SUBTASK_PROMPT_V2.
|
|
97
|
-
* Workers receive typed handoffs with explicit files, criteria, and escalation paths.
|
|
98
|
-
*
|
|
99
|
-
* @param params - Subtask parameters
|
|
100
|
-
* @returns WorkerHandoff object validated against schema
|
|
101
|
-
*/
|
|
102
|
-
export function generateWorkerHandoff(params: {
|
|
103
|
-
task_id: string;
|
|
104
|
-
files_owned: string[];
|
|
105
|
-
files_readonly?: string[];
|
|
106
|
-
dependencies_completed?: string[];
|
|
107
|
-
success_criteria?: string[];
|
|
108
|
-
epic_summary: string;
|
|
109
|
-
your_role: string;
|
|
110
|
-
what_others_did?: string;
|
|
111
|
-
what_comes_next?: string;
|
|
112
|
-
}): WorkerHandoff {
|
|
113
|
-
const handoff: WorkerHandoff = {
|
|
114
|
-
contract: {
|
|
115
|
-
task_id: params.task_id,
|
|
116
|
-
files_owned: params.files_owned,
|
|
117
|
-
files_readonly: params.files_readonly || [],
|
|
118
|
-
dependencies_completed: params.dependencies_completed || [],
|
|
119
|
-
success_criteria: params.success_criteria || [
|
|
120
|
-
"All files compile without errors",
|
|
121
|
-
"Tests pass for modified code",
|
|
122
|
-
"Code follows project patterns",
|
|
123
|
-
],
|
|
124
|
-
},
|
|
125
|
-
context: {
|
|
126
|
-
epic_summary: params.epic_summary,
|
|
127
|
-
your_role: params.your_role,
|
|
128
|
-
what_others_did: params.what_others_did || "",
|
|
129
|
-
what_comes_next: params.what_comes_next || "",
|
|
130
|
-
},
|
|
131
|
-
escalation: {
|
|
132
|
-
blocked_contact: "coordinator",
|
|
133
|
-
scope_change_protocol:
|
|
134
|
-
"Send swarmmail_send(to=['coordinator'], subject='Scope change request: <task_id>', importance='high') and wait for approval before expanding beyond files_owned",
|
|
135
|
-
},
|
|
136
|
-
};
|
|
137
|
-
|
|
138
|
-
// Validate against schema
|
|
139
|
-
return WorkerHandoffSchema.parse(handoff);
|
|
140
|
-
}
|
|
141
|
-
|
|
142
|
-
/**
|
|
143
|
-
* Validate that files_touched is a subset of files_owned (supports globs)
|
|
144
|
-
*
|
|
145
|
-
* Checks contract compliance - workers should only modify files they own.
|
|
146
|
-
* Glob patterns in files_owned are matched against files_touched paths.
|
|
147
|
-
*
|
|
148
|
-
* @param files_touched - Actual files modified by the worker
|
|
149
|
-
* @param files_owned - Files the worker is allowed to modify (may include globs)
|
|
150
|
-
* @returns Validation result with violations list
|
|
151
|
-
*
|
|
152
|
-
* @example
|
|
153
|
-
* ```typescript
|
|
154
|
-
* // Exact match - passes
|
|
155
|
-
* validateContract(["src/a.ts"], ["src/a.ts", "src/b.ts"])
|
|
156
|
-
* // => { valid: true, violations: [] }
|
|
157
|
-
*
|
|
158
|
-
* // Glob match - passes
|
|
159
|
-
* validateContract(["src/auth/service.ts"], ["src/auth/**"])
|
|
160
|
-
* // => { valid: true, violations: [] }
|
|
161
|
-
*
|
|
162
|
-
* // Violation - fails
|
|
163
|
-
* validateContract(["src/other.ts"], ["src/auth/**"])
|
|
164
|
-
* // => { valid: false, violations: ["src/other.ts"] }
|
|
165
|
-
* ```
|
|
166
|
-
*/
|
|
167
|
-
export function validateContract(
|
|
168
|
-
files_touched: string[],
|
|
169
|
-
files_owned: string[]
|
|
170
|
-
): { valid: boolean; violations: string[] } {
|
|
171
|
-
// Empty files_touched is valid (read-only work)
|
|
172
|
-
if (files_touched.length === 0) {
|
|
173
|
-
return { valid: true, violations: [] };
|
|
174
|
-
}
|
|
175
|
-
|
|
176
|
-
const violations: string[] = [];
|
|
177
|
-
|
|
178
|
-
for (const touchedFile of files_touched) {
|
|
179
|
-
let matched = false;
|
|
180
|
-
|
|
181
|
-
for (const ownedPattern of files_owned) {
|
|
182
|
-
// Check if pattern is a glob or exact match
|
|
183
|
-
if (ownedPattern.includes("*") || ownedPattern.includes("?")) {
|
|
184
|
-
// Glob pattern - use minimatch
|
|
185
|
-
if (minimatch(touchedFile, ownedPattern)) {
|
|
186
|
-
matched = true;
|
|
187
|
-
break;
|
|
188
|
-
}
|
|
189
|
-
} else {
|
|
190
|
-
// Exact match
|
|
191
|
-
if (touchedFile === ownedPattern) {
|
|
192
|
-
matched = true;
|
|
193
|
-
break;
|
|
194
|
-
}
|
|
195
|
-
}
|
|
196
|
-
}
|
|
197
|
-
|
|
198
|
-
if (!matched) {
|
|
199
|
-
violations.push(touchedFile);
|
|
200
|
-
}
|
|
201
|
-
}
|
|
202
|
-
|
|
203
|
-
return {
|
|
204
|
-
valid: violations.length === 0,
|
|
205
|
-
violations,
|
|
206
|
-
};
|
|
207
|
-
}
|
|
208
|
-
|
|
209
|
-
/**
|
|
210
|
-
* Get files_owned for a subtask from DecompositionGeneratedEvent
|
|
211
|
-
*
|
|
212
|
-
* Queries the event log for the decomposition that created this epic,
|
|
213
|
-
* then extracts the files array for the matching subtask.
|
|
214
|
-
*
|
|
215
|
-
* @param projectKey - Project path
|
|
216
|
-
* @param epicId - Epic ID
|
|
217
|
-
* @param subtaskId - Subtask cell ID
|
|
218
|
-
* @returns Array of file patterns this subtask owns, or null if not found
|
|
219
|
-
*/
|
|
220
|
-
async function getSubtaskFilesOwned(
|
|
221
|
-
projectKey: string,
|
|
222
|
-
epicId: string,
|
|
223
|
-
subtaskId: string
|
|
224
|
-
): Promise<string[] | null> {
|
|
225
|
-
try {
|
|
226
|
-
// Import readEvents from swarm-mail
|
|
227
|
-
const { readEvents } = await import("swarm-mail");
|
|
228
|
-
|
|
229
|
-
// Query for decomposition_generated events for this epic
|
|
230
|
-
const events = await readEvents({
|
|
231
|
-
projectKey,
|
|
232
|
-
types: ["decomposition_generated"],
|
|
233
|
-
}, projectKey);
|
|
234
|
-
|
|
235
|
-
// Find the event for this epic
|
|
236
|
-
const decompositionEvent = events.find((e: any) =>
|
|
237
|
-
e.type === "decomposition_generated" && e.epic_id === epicId
|
|
238
|
-
);
|
|
239
|
-
|
|
240
|
-
if (!decompositionEvent) {
|
|
241
|
-
console.warn(`[swarm_complete] No decomposition event found for epic ${epicId}`);
|
|
242
|
-
return null;
|
|
243
|
-
}
|
|
244
|
-
|
|
245
|
-
// Extract subtask index from subtask ID (e.g., "bd-abc123.0" -> 0)
|
|
246
|
-
// Subtask IDs follow pattern: epicId.index
|
|
247
|
-
const subtaskMatch = subtaskId.match(/\.(\d+)$/);
|
|
248
|
-
if (!subtaskMatch) {
|
|
249
|
-
console.warn(`[swarm_complete] Could not parse subtask index from ${subtaskId}`);
|
|
250
|
-
return null;
|
|
251
|
-
}
|
|
252
|
-
|
|
253
|
-
const subtaskIndex = parseInt(subtaskMatch[1], 10);
|
|
254
|
-
const subtasks = (decompositionEvent as any).subtasks || [];
|
|
255
|
-
|
|
256
|
-
if (subtaskIndex >= subtasks.length) {
|
|
257
|
-
console.warn(`[swarm_complete] Subtask index ${subtaskIndex} out of range (${subtasks.length} subtasks)`);
|
|
258
|
-
return null;
|
|
259
|
-
}
|
|
260
|
-
|
|
261
|
-
const subtask = subtasks[subtaskIndex];
|
|
262
|
-
return subtask.files || [];
|
|
263
|
-
} catch (error) {
|
|
264
|
-
console.error(`[swarm_complete] Failed to query subtask files:`, error);
|
|
265
|
-
return null;
|
|
266
|
-
}
|
|
267
|
-
}
|
|
268
|
-
|
|
269
|
-
/**
|
|
270
|
-
* Query beads for subtasks of an epic using HiveAdapter (not bd CLI)
|
|
271
|
-
*/
|
|
272
|
-
async function queryEpicSubtasks(projectKey: string, epicId: string): Promise<Bead[]> {
|
|
273
|
-
try {
|
|
274
|
-
const adapter = await getHiveAdapter(projectKey);
|
|
275
|
-
const cells = await adapter.queryCells(projectKey, { parent_id: epicId });
|
|
276
|
-
// Map Cell (from HiveAdapter) to Bead schema format
|
|
277
|
-
// Cell uses `type` and numeric timestamps, Bead uses `issue_type` and ISO strings
|
|
278
|
-
return cells
|
|
279
|
-
.filter(cell => cell.status !== "tombstone") // Exclude deleted cells
|
|
280
|
-
.map(cell => ({
|
|
281
|
-
id: cell.id,
|
|
282
|
-
title: cell.title,
|
|
283
|
-
description: cell.description || "",
|
|
284
|
-
status: cell.status as "open" | "in_progress" | "blocked" | "closed",
|
|
285
|
-
priority: cell.priority,
|
|
286
|
-
issue_type: cell.type as "bug" | "feature" | "task" | "epic" | "chore",
|
|
287
|
-
created_at: new Date(cell.created_at).toISOString(),
|
|
288
|
-
updated_at: cell.updated_at ? new Date(cell.updated_at).toISOString() : undefined,
|
|
289
|
-
dependencies: [], // Dependencies fetched separately if needed
|
|
290
|
-
metadata: {},
|
|
291
|
-
}));
|
|
292
|
-
} catch (error) {
|
|
293
|
-
console.error(
|
|
294
|
-
`[swarm] ERROR: Failed to query subtasks for epic ${epicId}:`,
|
|
295
|
-
error instanceof Error ? error.message : String(error),
|
|
296
|
-
);
|
|
297
|
-
return [];
|
|
298
|
-
}
|
|
299
|
-
}
|
|
300
|
-
|
|
301
|
-
/**
|
|
302
|
-
* Query Agent Mail for swarm thread messages
|
|
303
|
-
*/
|
|
304
|
-
async function querySwarmMessages(
|
|
305
|
-
projectKey: string,
|
|
306
|
-
threadId: string,
|
|
307
|
-
): Promise<number> {
|
|
308
|
-
// Check if agent-mail is available
|
|
309
|
-
const agentMailAvailable = await isToolAvailable("agent-mail");
|
|
310
|
-
if (!agentMailAvailable) {
|
|
311
|
-
// Don't warn here - it's checked elsewhere
|
|
312
|
-
return 0;
|
|
313
|
-
}
|
|
314
|
-
|
|
315
|
-
try {
|
|
316
|
-
// Use embedded swarm-mail inbox to count messages in thread
|
|
317
|
-
const inbox = await getSwarmInbox({
|
|
318
|
-
projectPath: projectKey,
|
|
319
|
-
agentName: "coordinator", // Dummy agent name for thread query
|
|
320
|
-
limit: 5,
|
|
321
|
-
includeBodies: false,
|
|
322
|
-
});
|
|
323
|
-
|
|
324
|
-
// Count messages that match the thread ID
|
|
325
|
-
const threadMessages = inbox.messages.filter(
|
|
326
|
-
(m) => m.thread_id === threadId,
|
|
327
|
-
);
|
|
328
|
-
return threadMessages.length;
|
|
329
|
-
} catch (error) {
|
|
330
|
-
// Thread might not exist yet, or query failed
|
|
331
|
-
console.warn(
|
|
332
|
-
`[swarm] Failed to query swarm messages for thread ${threadId}:`,
|
|
333
|
-
error,
|
|
334
|
-
);
|
|
335
|
-
return 0;
|
|
336
|
-
}
|
|
337
|
-
}
|
|
338
|
-
|
|
339
|
-
/**
|
|
340
|
-
* Format a progress message for Agent Mail
|
|
341
|
-
*/
|
|
342
|
-
function formatProgressMessage(progress: AgentProgress): string {
|
|
343
|
-
const lines = [
|
|
344
|
-
`**Status**: ${progress.status}`,
|
|
345
|
-
progress.progress_percent !== undefined
|
|
346
|
-
? `**Progress**: ${progress.progress_percent}%`
|
|
347
|
-
: null,
|
|
348
|
-
progress.message ? `**Message**: ${progress.message}` : null,
|
|
349
|
-
progress.files_touched && progress.files_touched.length > 0
|
|
350
|
-
? `**Files touched**:\n${progress.files_touched.map((f) => `- \`${f}\``).join("\n")}`
|
|
351
|
-
: null,
|
|
352
|
-
progress.blockers && progress.blockers.length > 0
|
|
353
|
-
? `**Blockers**:\n${progress.blockers.map((b) => `- ${b}`).join("\n")}`
|
|
354
|
-
: null,
|
|
355
|
-
];
|
|
356
|
-
|
|
357
|
-
return lines.filter(Boolean).join("\n\n");
|
|
358
|
-
}
|
|
359
|
-
|
|
360
|
-
// ============================================================================
|
|
361
|
-
// Verification Gate
|
|
362
|
-
// ============================================================================
|
|
363
|
-
|
|
364
|
-
/**
|
|
365
|
-
* Verification Gate result - tracks each verification step
|
|
366
|
-
*
|
|
367
|
-
* Based on the Gate Function from superpowers:
|
|
368
|
-
* 1. IDENTIFY: What command proves this claim?
|
|
369
|
-
* 2. RUN: Execute the FULL command (fresh, complete)
|
|
370
|
-
* 3. READ: Full output, check exit code, count failures
|
|
371
|
-
* 4. VERIFY: Does output confirm the claim?
|
|
372
|
-
* 5. ONLY THEN: Make the claim
|
|
373
|
-
*/
|
|
374
|
-
interface VerificationStep {
|
|
375
|
-
name: string;
|
|
376
|
-
command: string;
|
|
377
|
-
passed: boolean;
|
|
378
|
-
exitCode: number;
|
|
379
|
-
output?: string;
|
|
380
|
-
error?: string;
|
|
381
|
-
skipped?: boolean;
|
|
382
|
-
skipReason?: string;
|
|
383
|
-
}
|
|
384
|
-
|
|
385
|
-
interface VerificationGateResult {
|
|
386
|
-
passed: boolean;
|
|
387
|
-
steps: VerificationStep[];
|
|
388
|
-
summary: string;
|
|
389
|
-
blockers: string[];
|
|
390
|
-
}
|
|
391
|
-
|
|
392
|
-
// NOTE: UBS scan (runUbsScan, UbsScanResult) removed in v0.31
|
|
393
|
-
// It was slowing down completion without proportional value.
|
|
394
|
-
// Run UBS manually if needed: ubs scan <files>
|
|
395
|
-
|
|
396
|
-
/**
|
|
397
|
-
* Run typecheck verification
|
|
398
|
-
*
|
|
399
|
-
* Attempts to run TypeScript type checking on the project.
|
|
400
|
-
* Falls back gracefully if tsc is not available.
|
|
401
|
-
*/
|
|
402
|
-
async function runTypecheckVerification(): Promise<VerificationStep> {
|
|
403
|
-
const step: VerificationStep = {
|
|
404
|
-
name: "typecheck",
|
|
405
|
-
command: "tsc --noEmit",
|
|
406
|
-
passed: false,
|
|
407
|
-
exitCode: -1,
|
|
408
|
-
};
|
|
409
|
-
|
|
410
|
-
try {
|
|
411
|
-
// Check if tsconfig.json exists in current directory
|
|
412
|
-
const tsconfigExists = await Bun.file("tsconfig.json").exists();
|
|
413
|
-
if (!tsconfigExists) {
|
|
414
|
-
step.skipped = true;
|
|
415
|
-
step.skipReason = "No tsconfig.json found";
|
|
416
|
-
step.passed = true; // Don't block if no TypeScript
|
|
417
|
-
return step;
|
|
418
|
-
}
|
|
419
|
-
|
|
420
|
-
const result = await Bun.$`tsc --noEmit`.quiet().nothrow();
|
|
421
|
-
step.exitCode = result.exitCode;
|
|
422
|
-
step.passed = result.exitCode === 0;
|
|
423
|
-
|
|
424
|
-
if (!step.passed) {
|
|
425
|
-
step.error = result.stderr.toString().slice(0, 1000); // Truncate for context
|
|
426
|
-
step.output = result.stdout.toString().slice(0, 1000);
|
|
427
|
-
}
|
|
428
|
-
} catch (error) {
|
|
429
|
-
step.skipped = true;
|
|
430
|
-
step.skipReason = `tsc not available: ${error instanceof Error ? error.message : String(error)}`;
|
|
431
|
-
step.passed = true; // Don't block if tsc unavailable
|
|
432
|
-
}
|
|
433
|
-
|
|
434
|
-
return step;
|
|
435
|
-
}
|
|
436
|
-
|
|
437
|
-
/**
|
|
438
|
-
* Run test verification for specific files
|
|
439
|
-
*
|
|
440
|
-
* Attempts to find and run tests related to the touched files.
|
|
441
|
-
* Uses common test patterns (*.test.ts, *.spec.ts, __tests__/).
|
|
442
|
-
*/
|
|
443
|
-
async function runTestVerification(
|
|
444
|
-
filesTouched: string[],
|
|
445
|
-
): Promise<VerificationStep> {
|
|
446
|
-
const step: VerificationStep = {
|
|
447
|
-
name: "tests",
|
|
448
|
-
command: "bun test <related-files>",
|
|
449
|
-
passed: false,
|
|
450
|
-
exitCode: -1,
|
|
451
|
-
};
|
|
452
|
-
|
|
453
|
-
if (filesTouched.length === 0) {
|
|
454
|
-
step.skipped = true;
|
|
455
|
-
step.skipReason = "No files touched";
|
|
456
|
-
step.passed = true;
|
|
457
|
-
return step;
|
|
458
|
-
}
|
|
459
|
-
|
|
460
|
-
// Find test files related to touched files
|
|
461
|
-
const testPatterns: string[] = [];
|
|
462
|
-
for (const file of filesTouched) {
|
|
463
|
-
// Skip if already a test file
|
|
464
|
-
if (file.includes(".test.") || file.includes(".spec.")) {
|
|
465
|
-
testPatterns.push(file);
|
|
466
|
-
continue;
|
|
467
|
-
}
|
|
468
|
-
|
|
469
|
-
// Look for corresponding test file
|
|
470
|
-
const baseName = file.replace(/\.(ts|tsx|js|jsx)$/, "");
|
|
471
|
-
testPatterns.push(`${baseName}.test.ts`);
|
|
472
|
-
testPatterns.push(`${baseName}.test.tsx`);
|
|
473
|
-
testPatterns.push(`${baseName}.spec.ts`);
|
|
474
|
-
}
|
|
475
|
-
|
|
476
|
-
// Check if any test files exist
|
|
477
|
-
const existingTests: string[] = [];
|
|
478
|
-
for (const pattern of testPatterns) {
|
|
479
|
-
try {
|
|
480
|
-
const exists = await Bun.file(pattern).exists();
|
|
481
|
-
if (exists) {
|
|
482
|
-
existingTests.push(pattern);
|
|
483
|
-
}
|
|
484
|
-
} catch {
|
|
485
|
-
// File doesn't exist, skip
|
|
486
|
-
}
|
|
487
|
-
}
|
|
488
|
-
|
|
489
|
-
if (existingTests.length === 0) {
|
|
490
|
-
step.skipped = true;
|
|
491
|
-
step.skipReason = "No related test files found";
|
|
492
|
-
step.passed = true;
|
|
493
|
-
return step;
|
|
494
|
-
}
|
|
495
|
-
|
|
496
|
-
try {
|
|
497
|
-
step.command = `bun test ${existingTests.join(" ")}`;
|
|
498
|
-
const result = await Bun.$`bun test ${existingTests}`.quiet().nothrow();
|
|
499
|
-
step.exitCode = result.exitCode;
|
|
500
|
-
step.passed = result.exitCode === 0;
|
|
501
|
-
|
|
502
|
-
if (!step.passed) {
|
|
503
|
-
step.error = result.stderr.toString().slice(0, 1000);
|
|
504
|
-
step.output = result.stdout.toString().slice(0, 1000);
|
|
505
|
-
}
|
|
506
|
-
} catch (error) {
|
|
507
|
-
step.skipped = true;
|
|
508
|
-
step.skipReason = `Test runner failed: ${error instanceof Error ? error.message : String(error)}`;
|
|
509
|
-
step.passed = true; // Don't block if test runner unavailable
|
|
510
|
-
}
|
|
511
|
-
|
|
512
|
-
return step;
|
|
513
|
-
}
|
|
514
|
-
|
|
515
|
-
/**
|
|
516
|
-
* Run the full Verification Gate
|
|
517
|
-
*
|
|
518
|
-
* Implements the Gate Function (IDENTIFY → RUN → READ → VERIFY → CLAIM):
|
|
519
|
-
* 1. Typecheck
|
|
520
|
-
* 2. Tests for touched files
|
|
521
|
-
*
|
|
522
|
-
* NOTE: UBS scan was removed in v0.31 - it was slowing down completion
|
|
523
|
-
* without providing proportional value. Run UBS manually if needed.
|
|
524
|
-
*
|
|
525
|
-
* All steps must pass (or be skipped with valid reason) to proceed.
|
|
526
|
-
*/
|
|
527
|
-
async function runVerificationGate(
|
|
528
|
-
filesTouched: string[],
|
|
529
|
-
_skipUbs: boolean = false, // Kept for backward compatibility, now ignored
|
|
530
|
-
): Promise<VerificationGateResult> {
|
|
531
|
-
const steps: VerificationStep[] = [];
|
|
532
|
-
const blockers: string[] = [];
|
|
533
|
-
|
|
534
|
-
// Step 1: Typecheck (UBS scan removed in v0.31)
|
|
535
|
-
const typecheckStep = await runTypecheckVerification();
|
|
536
|
-
steps.push(typecheckStep);
|
|
537
|
-
if (!typecheckStep.passed && !typecheckStep.skipped) {
|
|
538
|
-
blockers.push(
|
|
539
|
-
`Typecheck failed: ${typecheckStep.error?.slice(0, 100) || "type errors found"}. Try: Run 'tsc --noEmit' to see full errors, check tsconfig.json configuration, or fix reported type errors in modified files.`,
|
|
540
|
-
);
|
|
541
|
-
}
|
|
542
|
-
|
|
543
|
-
// Step 3: Tests
|
|
544
|
-
const testStep = await runTestVerification(filesTouched);
|
|
545
|
-
steps.push(testStep);
|
|
546
|
-
if (!testStep.passed && !testStep.skipped) {
|
|
547
|
-
blockers.push(
|
|
548
|
-
`Tests failed: ${testStep.error?.slice(0, 100) || "test failures"}. Try: Run 'bun test ${testStep.command.split(" ").slice(2).join(" ")}' to see full output, check test assertions, or fix failing tests in modified files.`,
|
|
549
|
-
);
|
|
550
|
-
}
|
|
551
|
-
|
|
552
|
-
// Build summary
|
|
553
|
-
const passedCount = steps.filter((s) => s.passed).length;
|
|
554
|
-
const skippedCount = steps.filter((s) => s.skipped).length;
|
|
555
|
-
const failedCount = steps.filter((s) => !s.passed && !s.skipped).length;
|
|
556
|
-
|
|
557
|
-
const summary =
|
|
558
|
-
failedCount === 0
|
|
559
|
-
? `Verification passed: ${passedCount} checks passed, ${skippedCount} skipped`
|
|
560
|
-
: `Verification FAILED: ${failedCount} checks failed, ${passedCount} passed, ${skippedCount} skipped`;
|
|
561
|
-
|
|
562
|
-
return {
|
|
563
|
-
passed: failedCount === 0,
|
|
564
|
-
steps,
|
|
565
|
-
summary,
|
|
566
|
-
blockers,
|
|
567
|
-
};
|
|
568
|
-
}
|
|
569
|
-
|
|
570
|
-
/**
|
|
571
|
-
* Classify failure based on error message heuristics
|
|
572
|
-
*
|
|
573
|
-
* Simple pattern matching to categorize why a task failed.
|
|
574
|
-
* Used when failure_mode is not explicitly provided.
|
|
575
|
-
*
|
|
576
|
-
* @param error - Error object or message
|
|
577
|
-
* @returns FailureMode classification
|
|
578
|
-
*/
|
|
579
|
-
function classifyFailure(error: Error | string): string {
|
|
580
|
-
const msg = (typeof error === "string" ? error : error.message).toLowerCase();
|
|
581
|
-
|
|
582
|
-
if (msg.includes("timeout")) return "timeout";
|
|
583
|
-
if (msg.includes("conflict") || msg.includes("reservation"))
|
|
584
|
-
return "conflict";
|
|
585
|
-
if (msg.includes("validation") || msg.includes("schema")) return "validation";
|
|
586
|
-
if (msg.includes("context") || msg.includes("token"))
|
|
587
|
-
return "context_overflow";
|
|
588
|
-
if (msg.includes("blocked") || msg.includes("dependency"))
|
|
589
|
-
return "dependency_blocked";
|
|
590
|
-
if (msg.includes("cancel")) return "user_cancelled";
|
|
591
|
-
|
|
592
|
-
// Check for tool failure patterns
|
|
593
|
-
if (
|
|
594
|
-
msg.includes("tool") ||
|
|
595
|
-
msg.includes("command") ||
|
|
596
|
-
msg.includes("failed to execute")
|
|
597
|
-
) {
|
|
598
|
-
return "tool_failure";
|
|
599
|
-
}
|
|
600
|
-
|
|
601
|
-
return "unknown";
|
|
602
|
-
}
|
|
603
|
-
|
|
604
|
-
// ============================================================================
|
|
605
|
-
// Global Storage
|
|
606
|
-
// ============================================================================
|
|
607
|
-
|
|
608
|
-
/**
|
|
609
|
-
* Global error accumulator for tracking errors across subtasks
|
|
610
|
-
*
|
|
611
|
-
* This is a session-level singleton that accumulates errors during
|
|
612
|
-
* swarm execution for feeding into retry prompts.
|
|
613
|
-
*/
|
|
614
|
-
const globalErrorAccumulator = new ErrorAccumulator();
|
|
615
|
-
|
|
616
|
-
/**
|
|
617
|
-
* Global strike storage for tracking consecutive fix failures
|
|
618
|
-
*/
|
|
619
|
-
const globalStrikeStorage: StrikeStorage = new InMemoryStrikeStorage();
|
|
620
|
-
|
|
621
|
-
// ============================================================================
|
|
622
|
-
// Tool Definitions
|
|
623
|
-
// ============================================================================
|
|
624
|
-
|
|
625
|
-
/**
|
|
626
|
-
* Initialize swarm and check tool availability
|
|
627
|
-
*
|
|
628
|
-
* Call this at the start of a swarm session to see what tools are available,
|
|
629
|
-
* what skills exist in the project, and what features will be degraded.
|
|
630
|
-
*
|
|
631
|
-
* Skills are automatically discovered from:
|
|
632
|
-
* - .opencode/skills/
|
|
633
|
-
* - .claude/skills/
|
|
634
|
-
* - skills/
|
|
635
|
-
*/
|
|
636
|
-
export const swarm_init = tool({
|
|
637
|
-
description:
|
|
638
|
-
"Initialize swarm session: discovers available skills, checks tool availability. ALWAYS call at swarm start.",
|
|
639
|
-
args: {
|
|
640
|
-
project_path: tool.schema
|
|
641
|
-
.string()
|
|
642
|
-
.optional()
|
|
643
|
-
.describe("Project path (for Agent Mail init)"),
|
|
644
|
-
isolation: tool.schema
|
|
645
|
-
.enum(["worktree", "reservation"])
|
|
646
|
-
.optional()
|
|
647
|
-
.default("reservation")
|
|
648
|
-
.describe(
|
|
649
|
-
"Isolation mode: 'worktree' for git worktree isolation (requires clean git state), 'reservation' for file reservations (default)",
|
|
650
|
-
),
|
|
651
|
-
},
|
|
652
|
-
async execute(args) {
|
|
653
|
-
// Check all tools
|
|
654
|
-
const availability = await checkAllTools();
|
|
655
|
-
|
|
656
|
-
// Build status report
|
|
657
|
-
const report = formatToolAvailability(availability);
|
|
658
|
-
|
|
659
|
-
// Check critical tools
|
|
660
|
-
const beadsAvailable = availability.get("beads")?.status.available ?? false;
|
|
661
|
-
const agentMailAvailable =
|
|
662
|
-
availability.get("agent-mail")?.status.available ?? false;
|
|
663
|
-
|
|
664
|
-
// Build warnings
|
|
665
|
-
const warnings: string[] = [];
|
|
666
|
-
const degradedFeatures: string[] = [];
|
|
667
|
-
|
|
668
|
-
if (!beadsAvailable) {
|
|
669
|
-
warnings.push(
|
|
670
|
-
"⚠️ beads (bd) not available - issue tracking disabled, swarm coordination will be limited",
|
|
671
|
-
);
|
|
672
|
-
degradedFeatures.push("issue tracking", "progress persistence");
|
|
673
|
-
}
|
|
674
|
-
|
|
675
|
-
if (!agentMailAvailable) {
|
|
676
|
-
warnings.push(
|
|
677
|
-
"⚠️ agent-mail not available - multi-agent communication disabled",
|
|
678
|
-
);
|
|
679
|
-
degradedFeatures.push("agent communication", "file reservations");
|
|
680
|
-
}
|
|
681
|
-
|
|
682
|
-
if (!availability.get("cass")?.status.available) {
|
|
683
|
-
degradedFeatures.push("historical context from past sessions");
|
|
684
|
-
}
|
|
685
|
-
|
|
686
|
-
if (!availability.get("ubs")?.status.available) {
|
|
687
|
-
degradedFeatures.push("pre-completion bug scanning");
|
|
688
|
-
}
|
|
689
|
-
|
|
690
|
-
if (!availability.get("semantic-memory")?.status.available) {
|
|
691
|
-
degradedFeatures.push("persistent learning (using in-memory fallback)");
|
|
692
|
-
}
|
|
693
|
-
|
|
694
|
-
// Discover available skills
|
|
695
|
-
const availableSkills = await listSkills();
|
|
696
|
-
const skillsInfo = {
|
|
697
|
-
count: availableSkills.length,
|
|
698
|
-
available: availableSkills.length > 0,
|
|
699
|
-
skills: availableSkills.map((s) => ({
|
|
700
|
-
name: s.name,
|
|
701
|
-
description: s.description,
|
|
702
|
-
hasScripts: s.hasScripts,
|
|
703
|
-
})),
|
|
704
|
-
};
|
|
705
|
-
|
|
706
|
-
// Add skills guidance if available
|
|
707
|
-
let skillsGuidance: string | undefined;
|
|
708
|
-
if (availableSkills.length > 0) {
|
|
709
|
-
skillsGuidance = `Found ${availableSkills.length} skill(s). Use skills_list to see details, skills_use to activate.`;
|
|
710
|
-
} else {
|
|
711
|
-
skillsGuidance =
|
|
712
|
-
"No skills found. Add skills to .opencode/skills/ or .claude/skills/ for specialized guidance.";
|
|
713
|
-
}
|
|
714
|
-
|
|
715
|
-
// Check isolation mode
|
|
716
|
-
const isolationMode = args.isolation ?? "reservation";
|
|
717
|
-
let isolationInfo: {
|
|
718
|
-
mode: "worktree" | "reservation";
|
|
719
|
-
available: boolean;
|
|
720
|
-
start_commit?: string;
|
|
721
|
-
reason?: string;
|
|
722
|
-
} = {
|
|
723
|
-
mode: isolationMode,
|
|
724
|
-
available: true,
|
|
725
|
-
};
|
|
726
|
-
|
|
727
|
-
if (isolationMode === "worktree" && args.project_path) {
|
|
728
|
-
const worktreeCheck = await canUseWorktreeIsolation(args.project_path);
|
|
729
|
-
if (worktreeCheck.canUse) {
|
|
730
|
-
const startCommit = await getStartCommit(args.project_path);
|
|
731
|
-
isolationInfo = {
|
|
732
|
-
mode: "worktree",
|
|
733
|
-
available: true,
|
|
734
|
-
start_commit: startCommit ?? undefined,
|
|
735
|
-
};
|
|
736
|
-
} else {
|
|
737
|
-
// Fall back to reservation mode
|
|
738
|
-
isolationInfo = {
|
|
739
|
-
mode: "reservation",
|
|
740
|
-
available: false,
|
|
741
|
-
reason: `Worktree mode unavailable: ${worktreeCheck.reason}. Falling back to reservation mode.`,
|
|
742
|
-
};
|
|
743
|
-
warnings.push(
|
|
744
|
-
`⚠️ Worktree isolation unavailable: ${worktreeCheck.reason}. Using file reservations instead.`,
|
|
745
|
-
);
|
|
746
|
-
}
|
|
747
|
-
} else if (isolationMode === "worktree" && !args.project_path) {
|
|
748
|
-
isolationInfo = {
|
|
749
|
-
mode: "reservation",
|
|
750
|
-
available: false,
|
|
751
|
-
reason: "Worktree mode requires project_path. Falling back to reservation mode.",
|
|
752
|
-
};
|
|
753
|
-
warnings.push(
|
|
754
|
-
"⚠️ Worktree isolation requires project_path. Using file reservations instead.",
|
|
755
|
-
);
|
|
756
|
-
}
|
|
757
|
-
|
|
758
|
-
return JSON.stringify(
|
|
759
|
-
{
|
|
760
|
-
ready: true,
|
|
761
|
-
isolation: isolationInfo,
|
|
762
|
-
tool_availability: Object.fromEntries(
|
|
763
|
-
Array.from(availability.entries()).map(([k, v]) => [
|
|
764
|
-
k,
|
|
765
|
-
{
|
|
766
|
-
available: v.status.available,
|
|
767
|
-
fallback: v.status.available ? null : v.fallbackBehavior,
|
|
768
|
-
},
|
|
769
|
-
]),
|
|
770
|
-
),
|
|
771
|
-
skills: skillsInfo,
|
|
772
|
-
warnings: warnings.length > 0 ? warnings : undefined,
|
|
773
|
-
degraded_features:
|
|
774
|
-
degradedFeatures.length > 0 ? degradedFeatures : undefined,
|
|
775
|
-
recommendations: {
|
|
776
|
-
skills: skillsGuidance,
|
|
777
|
-
beads: beadsAvailable
|
|
778
|
-
? "✓ Use beads for all task tracking"
|
|
779
|
-
: "Install beads: npm i -g @joelhooks/beads",
|
|
780
|
-
agent_mail: agentMailAvailable
|
|
781
|
-
? "✓ Use Agent Mail for coordination"
|
|
782
|
-
: "Start Agent Mail: agent-mail serve",
|
|
783
|
-
isolation:
|
|
784
|
-
isolationInfo.mode === "worktree"
|
|
785
|
-
? "✓ Using git worktree isolation"
|
|
786
|
-
: "✓ Using file reservation isolation",
|
|
787
|
-
},
|
|
788
|
-
report,
|
|
789
|
-
},
|
|
790
|
-
null,
|
|
791
|
-
2,
|
|
792
|
-
);
|
|
793
|
-
},
|
|
794
|
-
});
|
|
795
|
-
|
|
796
|
-
/**
|
|
797
|
-
* Get status of a swarm by epic ID
|
|
798
|
-
*
|
|
799
|
-
* Requires project_key to query Agent Mail for message counts.
|
|
800
|
-
*/
|
|
801
|
-
export const swarm_status = tool({
|
|
802
|
-
description: "Get status of a swarm by epic ID",
|
|
803
|
-
args: {
|
|
804
|
-
epic_id: tool.schema.string().describe("Epic bead ID (e.g., bd-abc123)"),
|
|
805
|
-
project_key: tool.schema
|
|
806
|
-
.string()
|
|
807
|
-
.describe("Project path (for Agent Mail queries)"),
|
|
808
|
-
},
|
|
809
|
-
async execute(args) {
|
|
810
|
-
// Query subtasks from beads
|
|
811
|
-
const subtasks = await queryEpicSubtasks(args.project_key, args.epic_id);
|
|
812
|
-
|
|
813
|
-
// Count statuses
|
|
814
|
-
const statusCounts = {
|
|
815
|
-
running: 0,
|
|
816
|
-
completed: 0,
|
|
817
|
-
failed: 0,
|
|
818
|
-
blocked: 0,
|
|
819
|
-
};
|
|
820
|
-
|
|
821
|
-
const agents: SpawnedAgent[] = [];
|
|
822
|
-
|
|
823
|
-
for (const bead of subtasks) {
|
|
824
|
-
// Map cell status to agent status
|
|
825
|
-
let agentStatus: SpawnedAgent["status"] = "pending";
|
|
826
|
-
switch (bead.status) {
|
|
827
|
-
case "in_progress":
|
|
828
|
-
agentStatus = "running";
|
|
829
|
-
statusCounts.running++;
|
|
830
|
-
break;
|
|
831
|
-
case "closed":
|
|
832
|
-
agentStatus = "completed";
|
|
833
|
-
statusCounts.completed++;
|
|
834
|
-
break;
|
|
835
|
-
case "blocked":
|
|
836
|
-
agentStatus = "pending"; // Blocked treated as pending for swarm
|
|
837
|
-
statusCounts.blocked++;
|
|
838
|
-
break;
|
|
839
|
-
default:
|
|
840
|
-
// open = pending
|
|
841
|
-
break;
|
|
842
|
-
}
|
|
843
|
-
|
|
844
|
-
agents.push({
|
|
845
|
-
bead_id: bead.id,
|
|
846
|
-
agent_name: "", // We don't track this in beads
|
|
847
|
-
status: agentStatus,
|
|
848
|
-
files: [], // Would need to parse from description
|
|
849
|
-
});
|
|
850
|
-
}
|
|
851
|
-
|
|
852
|
-
// Query Agent Mail for message activity
|
|
853
|
-
const messageCount = await querySwarmMessages(
|
|
854
|
-
args.project_key,
|
|
855
|
-
args.epic_id,
|
|
856
|
-
);
|
|
857
|
-
|
|
858
|
-
const status: SwarmStatus = {
|
|
859
|
-
epic_id: args.epic_id,
|
|
860
|
-
total_agents: subtasks.length,
|
|
861
|
-
running: statusCounts.running,
|
|
862
|
-
completed: statusCounts.completed,
|
|
863
|
-
failed: statusCounts.failed,
|
|
864
|
-
blocked: statusCounts.blocked,
|
|
865
|
-
agents,
|
|
866
|
-
last_update: new Date().toISOString(),
|
|
867
|
-
};
|
|
868
|
-
|
|
869
|
-
// Validate and return
|
|
870
|
-
const validated = SwarmStatusSchema.parse(status);
|
|
871
|
-
|
|
872
|
-
return JSON.stringify(
|
|
873
|
-
{
|
|
874
|
-
...validated,
|
|
875
|
-
message_count: messageCount,
|
|
876
|
-
progress_percent:
|
|
877
|
-
subtasks.length > 0
|
|
878
|
-
? Math.round((statusCounts.completed / subtasks.length) * 100)
|
|
879
|
-
: 0,
|
|
880
|
-
},
|
|
881
|
-
null,
|
|
882
|
-
2,
|
|
883
|
-
);
|
|
884
|
-
},
|
|
885
|
-
});
|
|
886
|
-
|
|
887
|
-
/**
|
|
888
|
-
* Report progress on a subtask
|
|
889
|
-
*
|
|
890
|
-
* Takes explicit agent identity since tools don't have persistent state.
|
|
891
|
-
*/
|
|
892
|
-
export const swarm_progress = tool({
|
|
893
|
-
description: "Report progress on a subtask to coordinator",
|
|
894
|
-
args: {
|
|
895
|
-
project_key: tool.schema.string().describe("Project path"),
|
|
896
|
-
agent_name: tool.schema.string().describe("Your Agent Mail name"),
|
|
897
|
-
bead_id: tool.schema.string().describe("Subtask bead ID"),
|
|
898
|
-
status: tool.schema
|
|
899
|
-
.enum(["in_progress", "blocked", "completed", "failed"])
|
|
900
|
-
.describe("Current status"),
|
|
901
|
-
message: tool.schema
|
|
902
|
-
.string()
|
|
903
|
-
.optional()
|
|
904
|
-
.describe("Progress message or blockers"),
|
|
905
|
-
progress_percent: tool.schema
|
|
906
|
-
.number()
|
|
907
|
-
.min(0)
|
|
908
|
-
.max(100)
|
|
909
|
-
.optional()
|
|
910
|
-
.describe("Completion percentage"),
|
|
911
|
-
files_touched: tool.schema
|
|
912
|
-
.array(tool.schema.string())
|
|
913
|
-
.optional()
|
|
914
|
-
.describe("Files modified so far"),
|
|
915
|
-
},
|
|
916
|
-
async execute(args) {
|
|
917
|
-
// Build progress report
|
|
918
|
-
const progress: AgentProgress = {
|
|
919
|
-
bead_id: args.bead_id,
|
|
920
|
-
agent_name: args.agent_name,
|
|
921
|
-
status: args.status,
|
|
922
|
-
progress_percent: args.progress_percent,
|
|
923
|
-
message: args.message,
|
|
924
|
-
files_touched: args.files_touched,
|
|
925
|
-
timestamp: new Date().toISOString(),
|
|
926
|
-
};
|
|
927
|
-
|
|
928
|
-
// Validate
|
|
929
|
-
const validated = AgentProgressSchema.parse(progress);
|
|
930
|
-
|
|
931
|
-
// Update cell status if needed (using HiveAdapter, not bd CLI)
|
|
932
|
-
if (args.status === "blocked" || args.status === "in_progress") {
|
|
933
|
-
try {
|
|
934
|
-
const adapter = await getHiveAdapter(args.project_key);
|
|
935
|
-
const newStatus = args.status === "blocked" ? "blocked" : "in_progress";
|
|
936
|
-
await adapter.changeCellStatus(args.project_key, args.bead_id, newStatus);
|
|
937
|
-
} catch (error) {
|
|
938
|
-
// Non-fatal - log but continue
|
|
939
|
-
console.error(`[swarm] Failed to update cell status: ${error instanceof Error ? error.message : String(error)}`);
|
|
940
|
-
}
|
|
941
|
-
}
|
|
942
|
-
|
|
943
|
-
// Extract epic ID from bead ID (e.g., bd-abc123.1 -> bd-abc123)
|
|
944
|
-
const epicId = args.bead_id.includes(".")
|
|
945
|
-
? args.bead_id.split(".")[0]
|
|
946
|
-
: args.bead_id;
|
|
947
|
-
|
|
948
|
-
// Send progress message to thread using embedded swarm-mail
|
|
949
|
-
await sendSwarmMessage({
|
|
950
|
-
projectPath: args.project_key,
|
|
951
|
-
fromAgent: args.agent_name,
|
|
952
|
-
toAgents: [], // Coordinator will pick it up from thread
|
|
953
|
-
subject: `Progress: ${args.bead_id} - ${args.status}`,
|
|
954
|
-
body: formatProgressMessage(validated),
|
|
955
|
-
threadId: epicId,
|
|
956
|
-
importance: args.status === "blocked" ? "high" : "normal",
|
|
957
|
-
});
|
|
958
|
-
|
|
959
|
-
// Auto-checkpoint at milestone progress (25%, 50%, 75%)
|
|
960
|
-
let checkpointCreated = false;
|
|
961
|
-
if (
|
|
962
|
-
args.progress_percent !== undefined &&
|
|
963
|
-
args.files_touched &&
|
|
964
|
-
args.files_touched.length > 0
|
|
965
|
-
) {
|
|
966
|
-
const milestones = [25, 50, 75];
|
|
967
|
-
if (milestones.includes(args.progress_percent)) {
|
|
968
|
-
try {
|
|
969
|
-
// Create checkpoint event directly (non-fatal if it fails)
|
|
970
|
-
const checkpoint = {
|
|
971
|
-
epic_id: epicId,
|
|
972
|
-
bead_id: args.bead_id,
|
|
973
|
-
strategy: "file-based" as const,
|
|
974
|
-
files: args.files_touched,
|
|
975
|
-
dependencies: [] as string[],
|
|
976
|
-
directives: {},
|
|
977
|
-
recovery: {
|
|
978
|
-
last_checkpoint: Date.now(),
|
|
979
|
-
files_modified: args.files_touched,
|
|
980
|
-
progress_percent: args.progress_percent,
|
|
981
|
-
last_message: args.message,
|
|
982
|
-
},
|
|
983
|
-
};
|
|
984
|
-
|
|
985
|
-
const event = createEvent("swarm_checkpointed", {
|
|
986
|
-
project_key: args.project_key,
|
|
987
|
-
...checkpoint,
|
|
988
|
-
});
|
|
989
|
-
await appendEvent(event, args.project_key);
|
|
990
|
-
|
|
991
|
-
// NOTE: The event handler (handleSwarmCheckpointed in store.ts) updates
|
|
992
|
-
// the swarm_contexts table. We follow event sourcing pattern here.
|
|
993
|
-
checkpointCreated = true;
|
|
994
|
-
} catch (error) {
|
|
995
|
-
// Non-fatal - log and continue
|
|
996
|
-
console.warn(
|
|
997
|
-
`[swarm_progress] Auto-checkpoint failed at ${args.progress_percent}%:`,
|
|
998
|
-
error,
|
|
999
|
-
);
|
|
1000
|
-
}
|
|
1001
|
-
}
|
|
1002
|
-
}
|
|
1003
|
-
|
|
1004
|
-
return `Progress reported: ${args.status}${args.progress_percent !== undefined ? ` (${args.progress_percent}%)` : ""}${checkpointCreated ? " [checkpoint created]" : ""}`;
|
|
1005
|
-
},
|
|
1006
|
-
});
|
|
1007
|
-
|
|
1008
|
-
/**
|
|
1009
|
-
* Broadcast context updates to all agents in the epic
|
|
1010
|
-
*
|
|
1011
|
-
* Enables mid-task coordination by sharing discoveries, warnings, or blockers
|
|
1012
|
-
* with all agents working on the same epic. Agents can broadcast without
|
|
1013
|
-
* waiting for task completion.
|
|
1014
|
-
*
|
|
1015
|
-
* Based on "Patterns for Building AI Agents" p.31: "Ensure subagents can share context along the way"
|
|
1016
|
-
*/
|
|
1017
|
-
export const swarm_broadcast = tool({
|
|
1018
|
-
description:
|
|
1019
|
-
"Broadcast context update to all agents working on the same epic",
|
|
1020
|
-
args: {
|
|
1021
|
-
project_path: tool.schema
|
|
1022
|
-
.string()
|
|
1023
|
-
.describe("Absolute path to project root"),
|
|
1024
|
-
agent_name: tool.schema
|
|
1025
|
-
.string()
|
|
1026
|
-
.describe("Name of the agent broadcasting the message"),
|
|
1027
|
-
epic_id: tool.schema.string().describe("Epic ID (e.g., bd-abc123)"),
|
|
1028
|
-
message: tool.schema
|
|
1029
|
-
.string()
|
|
1030
|
-
.describe("Context update to share (what changed, what was learned)"),
|
|
1031
|
-
importance: tool.schema
|
|
1032
|
-
.enum(["info", "warning", "blocker"])
|
|
1033
|
-
.default("info")
|
|
1034
|
-
.describe("Priority level (default: info)"),
|
|
1035
|
-
files_affected: tool.schema
|
|
1036
|
-
.array(tool.schema.string())
|
|
1037
|
-
.optional()
|
|
1038
|
-
.describe("Files this context relates to"),
|
|
1039
|
-
},
|
|
1040
|
-
async execute(args) {
|
|
1041
|
-
// Extract bead_id from context if available (for traceability)
|
|
1042
|
-
const beadId = "unknown"; // Context not currently available in tool execution
|
|
1043
|
-
|
|
1044
|
-
// Format the broadcast message
|
|
1045
|
-
const body = [
|
|
1046
|
-
`## Context Update`,
|
|
1047
|
-
"",
|
|
1048
|
-
`**From**: ${args.agent_name} (${beadId})`,
|
|
1049
|
-
`**Priority**: ${args.importance.toUpperCase()}`,
|
|
1050
|
-
"",
|
|
1051
|
-
args.message,
|
|
1052
|
-
"",
|
|
1053
|
-
args.files_affected && args.files_affected.length > 0
|
|
1054
|
-
? `**Files affected**:\n${args.files_affected.map((f) => `- \`${f}\``).join("\n")}`
|
|
1055
|
-
: "",
|
|
1056
|
-
]
|
|
1057
|
-
.filter(Boolean)
|
|
1058
|
-
.join("\n");
|
|
1059
|
-
|
|
1060
|
-
// Map importance to Agent Mail importance
|
|
1061
|
-
const mailImportance =
|
|
1062
|
-
args.importance === "blocker"
|
|
1063
|
-
? "urgent"
|
|
1064
|
-
: args.importance === "warning"
|
|
1065
|
-
? "high"
|
|
1066
|
-
: "normal";
|
|
1067
|
-
|
|
1068
|
-
// Send as broadcast to thread using embedded swarm-mail
|
|
1069
|
-
await sendSwarmMessage({
|
|
1070
|
-
projectPath: args.project_path,
|
|
1071
|
-
fromAgent: args.agent_name,
|
|
1072
|
-
toAgents: [], // Broadcast to thread
|
|
1073
|
-
subject: `[${args.importance.toUpperCase()}] Context update from ${args.agent_name}`,
|
|
1074
|
-
body,
|
|
1075
|
-
threadId: args.epic_id,
|
|
1076
|
-
importance: mailImportance,
|
|
1077
|
-
ackRequired: args.importance === "blocker",
|
|
1078
|
-
});
|
|
1079
|
-
|
|
1080
|
-
return JSON.stringify(
|
|
1081
|
-
{
|
|
1082
|
-
broadcast: true,
|
|
1083
|
-
epic_id: args.epic_id,
|
|
1084
|
-
from: args.agent_name,
|
|
1085
|
-
bead_id: beadId,
|
|
1086
|
-
importance: args.importance,
|
|
1087
|
-
recipients: "all agents in epic",
|
|
1088
|
-
ack_required: args.importance === "blocker",
|
|
1089
|
-
},
|
|
1090
|
-
null,
|
|
1091
|
-
2,
|
|
1092
|
-
);
|
|
1093
|
-
},
|
|
1094
|
-
});
|
|
1095
|
-
|
|
1096
|
-
/**
|
|
1097
|
-
* Mark a subtask as complete
|
|
1098
|
-
*
|
|
1099
|
-
* Implements the Verification Gate (from superpowers):
|
|
1100
|
-
* 1. IDENTIFY: What commands prove this claim?
|
|
1101
|
-
* 2. RUN: Execute verification (UBS, typecheck, tests)
|
|
1102
|
-
* 3. READ: Check exit codes and output
|
|
1103
|
-
* 4. VERIFY: All checks must pass
|
|
1104
|
-
* 5. ONLY THEN: Close the cell
|
|
1105
|
-
*
|
|
1106
|
-
* Closes cell, releases reservations, notifies coordinator, and resolves
|
|
1107
|
-
* a DurableDeferred keyed by bead_id for cross-agent task completion signaling.
|
|
1108
|
-
*
|
|
1109
|
-
* ## DurableDeferred Integration
|
|
1110
|
-
*
|
|
1111
|
-
* When a coordinator spawns workers, it can create a deferred BEFORE spawning:
|
|
1112
|
-
*
|
|
1113
|
-
* ```typescript
|
|
1114
|
-
* const swarmMail = await getSwarmMailLibSQL(projectPath);
|
|
1115
|
-
* const db = await swarmMail.getDatabase();
|
|
1116
|
-
*
|
|
1117
|
-
* // Create deferred keyed by bead_id
|
|
1118
|
-
* const deferredUrl = `deferred:${beadId}`;
|
|
1119
|
-
* await db.query(
|
|
1120
|
-
* `INSERT INTO deferred (url, resolved, expires_at, created_at) VALUES (?, 0, ?, ?)`,
|
|
1121
|
-
* [deferredUrl, Date.now() + 3600000, Date.now()]
|
|
1122
|
-
* );
|
|
1123
|
-
*
|
|
1124
|
-
* // Spawn worker (swarm_spawn_subtask...)
|
|
1125
|
-
*
|
|
1126
|
-
* // Await completion
|
|
1127
|
-
* const result = await db.query<{ value: string }>(
|
|
1128
|
-
* `SELECT value FROM deferred WHERE url = ? AND resolved = 1`,
|
|
1129
|
-
* [deferredUrl]
|
|
1130
|
-
* );
|
|
1131
|
-
* ```
|
|
1132
|
-
*
|
|
1133
|
-
* When the worker calls swarm_complete, it resolves the deferred automatically.
|
|
1134
|
-
* Coordinator can await without polling.
|
|
1135
|
-
*/
|
|
1136
|
-
export const swarm_complete = tool({
|
|
1137
|
-
description:
|
|
1138
|
-
"Mark subtask complete with Verification Gate. Runs typecheck and tests before allowing completion.",
|
|
1139
|
-
args: {
|
|
1140
|
-
project_key: tool.schema.string().describe("Project path"),
|
|
1141
|
-
agent_name: tool.schema.string().describe("Your Agent Mail name"),
|
|
1142
|
-
bead_id: tool.schema.string().describe("Subtask bead ID"),
|
|
1143
|
-
summary: tool.schema.string().describe("Brief summary of work done"),
|
|
1144
|
-
evaluation: tool.schema
|
|
1145
|
-
.string()
|
|
1146
|
-
.optional()
|
|
1147
|
-
.describe("Self-evaluation JSON (Evaluation schema)"),
|
|
1148
|
-
files_touched: tool.schema
|
|
1149
|
-
.array(tool.schema.string())
|
|
1150
|
-
.optional()
|
|
1151
|
-
.describe("Files modified - will be verified (typecheck, tests)"),
|
|
1152
|
-
skip_verification: tool.schema
|
|
1153
|
-
.boolean()
|
|
1154
|
-
.optional()
|
|
1155
|
-
.describe(
|
|
1156
|
-
"Skip ALL verification (typecheck, tests). Use sparingly! (default: false)",
|
|
1157
|
-
),
|
|
1158
|
-
planned_files: tool.schema
|
|
1159
|
-
.array(tool.schema.string())
|
|
1160
|
-
.optional()
|
|
1161
|
-
.describe("Files that were originally planned to be modified"),
|
|
1162
|
-
start_time: tool.schema
|
|
1163
|
-
.number()
|
|
1164
|
-
.optional()
|
|
1165
|
-
.describe("Task start timestamp (Unix ms) for duration calculation"),
|
|
1166
|
-
error_count: tool.schema
|
|
1167
|
-
.number()
|
|
1168
|
-
.optional()
|
|
1169
|
-
.describe("Number of errors encountered during task"),
|
|
1170
|
-
retry_count: tool.schema
|
|
1171
|
-
.number()
|
|
1172
|
-
.optional()
|
|
1173
|
-
.describe("Number of retry attempts during task"),
|
|
1174
|
-
skip_review: tool.schema
|
|
1175
|
-
.boolean()
|
|
1176
|
-
.optional()
|
|
1177
|
-
.describe(
|
|
1178
|
-
"Skip review gate check (default: false). Use only for tasks that don't require coordinator review.",
|
|
1179
|
-
),
|
|
1180
|
-
},
|
|
1181
|
-
async execute(args, _ctx) {
|
|
1182
|
-
// Extract epic ID early for error notifications and review gate
|
|
1183
|
-
const epicId = args.bead_id.includes(".")
|
|
1184
|
-
? args.bead_id.split(".")[0]
|
|
1185
|
-
: args.bead_id;
|
|
1186
|
-
|
|
1187
|
-
// Check review gate (unless skipped) - BEFORE try block so errors are clear
|
|
1188
|
-
if (!args.skip_review) {
|
|
1189
|
-
const reviewStatusResult = getReviewStatus(args.bead_id);
|
|
1190
|
-
|
|
1191
|
-
if (!reviewStatusResult.approved) {
|
|
1192
|
-
// Check if review was even attempted
|
|
1193
|
-
if (!reviewStatusResult.reviewed) {
|
|
1194
|
-
return JSON.stringify(
|
|
1195
|
-
{
|
|
1196
|
-
success: true,
|
|
1197
|
-
status: "pending_review",
|
|
1198
|
-
review_status: reviewStatusResult,
|
|
1199
|
-
message: "Task completed but awaiting coordinator review before finalization.",
|
|
1200
|
-
next_steps: [
|
|
1201
|
-
`Request review with swarm_review(project_key="${args.project_key}", epic_id="${epicId}", task_id="${args.bead_id}", files_touched=[...])`,
|
|
1202
|
-
"Wait for coordinator to review and approve with swarm_review_feedback",
|
|
1203
|
-
"Once approved, call swarm_complete again to finalize",
|
|
1204
|
-
"Or use skip_review=true to bypass (not recommended for production work)",
|
|
1205
|
-
],
|
|
1206
|
-
},
|
|
1207
|
-
null,
|
|
1208
|
-
2,
|
|
1209
|
-
);
|
|
1210
|
-
}
|
|
1211
|
-
|
|
1212
|
-
// Review was attempted but not approved
|
|
1213
|
-
return JSON.stringify(
|
|
1214
|
-
{
|
|
1215
|
-
success: true,
|
|
1216
|
-
status: "needs_changes",
|
|
1217
|
-
review_status: reviewStatusResult,
|
|
1218
|
-
message: `Task reviewed but changes requested. ${reviewStatusResult.remaining_attempts} attempt(s) remaining.`,
|
|
1219
|
-
next_steps: [
|
|
1220
|
-
"Address the feedback from the reviewer",
|
|
1221
|
-
`Request another review with swarm_review(project_key="${args.project_key}", epic_id="${epicId}", task_id="${args.bead_id}", files_touched=[...])`,
|
|
1222
|
-
"Once approved, call swarm_complete again to finalize",
|
|
1223
|
-
],
|
|
1224
|
-
},
|
|
1225
|
-
null,
|
|
1226
|
-
2,
|
|
1227
|
-
);
|
|
1228
|
-
}
|
|
1229
|
-
}
|
|
1230
|
-
|
|
1231
|
-
try {
|
|
1232
|
-
// Validate bead_id exists and is not already closed (EARLY validation)
|
|
1233
|
-
// NOTE: Use args.project_key directly - cells are stored with the original path
|
|
1234
|
-
// (e.g., "/Users/joel/Code/project"), not a mangled version.
|
|
1235
|
-
|
|
1236
|
-
// Use HiveAdapter for validation (not bd CLI)
|
|
1237
|
-
const adapter = await getHiveAdapter(args.project_key);
|
|
1238
|
-
|
|
1239
|
-
// 1. Check if bead exists
|
|
1240
|
-
const cell = await adapter.getCell(args.project_key, args.bead_id);
|
|
1241
|
-
if (!cell) {
|
|
1242
|
-
return JSON.stringify({
|
|
1243
|
-
success: false,
|
|
1244
|
-
error: `Bead not found: ${args.bead_id}`,
|
|
1245
|
-
hint: "Check the bead ID is correct. Use hive_query to list open cells.",
|
|
1246
|
-
});
|
|
1247
|
-
}
|
|
1248
|
-
|
|
1249
|
-
// 2. Check if bead is already closed
|
|
1250
|
-
if (cell.status === "closed") {
|
|
1251
|
-
return JSON.stringify({
|
|
1252
|
-
success: false,
|
|
1253
|
-
error: `Bead already closed: ${args.bead_id}`,
|
|
1254
|
-
hint: "This bead was already completed. No action needed.",
|
|
1255
|
-
});
|
|
1256
|
-
}
|
|
1257
|
-
|
|
1258
|
-
// Verify agent is registered in swarm-mail
|
|
1259
|
-
// This catches agents who skipped swarmmail_init
|
|
1260
|
-
let agentRegistered = false;
|
|
1261
|
-
let registrationWarning = "";
|
|
1262
|
-
|
|
1263
|
-
try {
|
|
1264
|
-
const agent = await getAgent(
|
|
1265
|
-
args.project_key,
|
|
1266
|
-
args.agent_name,
|
|
1267
|
-
args.project_key,
|
|
1268
|
-
);
|
|
1269
|
-
agentRegistered = agent !== null;
|
|
1270
|
-
|
|
1271
|
-
if (!agentRegistered) {
|
|
1272
|
-
registrationWarning = `⚠️ WARNING: Agent '${args.agent_name}' was NOT registered in swarm-mail for project '${args.project_key}'.
|
|
1273
|
-
|
|
1274
|
-
This usually means you skipped the MANDATORY swarmmail_init step.
|
|
1275
|
-
|
|
1276
|
-
**Impact:**
|
|
1277
|
-
- Your work was not tracked in the coordination system
|
|
1278
|
-
- File reservations may not have been managed
|
|
1279
|
-
- Other agents couldn't coordinate with you
|
|
1280
|
-
- Learning/eval data may be incomplete
|
|
1281
|
-
|
|
1282
|
-
**Next time:** Run swarmmail_init(project_path="${args.project_key}", task_description="<task>") FIRST, before any other work.
|
|
1283
|
-
|
|
1284
|
-
Continuing with completion, but this should be fixed for future subtasks.`;
|
|
1285
|
-
|
|
1286
|
-
console.warn(`[swarm_complete] ${registrationWarning}`);
|
|
1287
|
-
}
|
|
1288
|
-
} catch (error) {
|
|
1289
|
-
// Non-fatal - agent might be using legacy workflow
|
|
1290
|
-
console.warn(
|
|
1291
|
-
`[swarm_complete] Could not verify agent registration:`,
|
|
1292
|
-
error,
|
|
1293
|
-
);
|
|
1294
|
-
registrationWarning = `ℹ️ Could not verify swarm-mail registration (database may not be available). Consider running swarmmail_init next time.`;
|
|
1295
|
-
}
|
|
1296
|
-
|
|
1297
|
-
// Run Verification Gate unless explicitly skipped
|
|
1298
|
-
let verificationResult: VerificationGateResult | null = null;
|
|
1299
|
-
|
|
1300
|
-
if (!args.skip_verification && args.files_touched?.length) {
|
|
1301
|
-
verificationResult = await runVerificationGate(
|
|
1302
|
-
args.files_touched,
|
|
1303
|
-
false,
|
|
1304
|
-
);
|
|
1305
|
-
|
|
1306
|
-
// Block completion if verification failed
|
|
1307
|
-
if (!verificationResult.passed) {
|
|
1308
|
-
return JSON.stringify(
|
|
1309
|
-
{
|
|
1310
|
-
success: false,
|
|
1311
|
-
error: "Verification Gate FAILED - fix issues before completing",
|
|
1312
|
-
verification: {
|
|
1313
|
-
passed: false,
|
|
1314
|
-
summary: verificationResult.summary,
|
|
1315
|
-
blockers: verificationResult.blockers,
|
|
1316
|
-
steps: verificationResult.steps.map((s) => ({
|
|
1317
|
-
name: s.name,
|
|
1318
|
-
passed: s.passed,
|
|
1319
|
-
skipped: s.skipped,
|
|
1320
|
-
skipReason: s.skipReason,
|
|
1321
|
-
error: s.error?.slice(0, 200),
|
|
1322
|
-
})),
|
|
1323
|
-
},
|
|
1324
|
-
hint:
|
|
1325
|
-
verificationResult.blockers.length > 0
|
|
1326
|
-
? `Fix these issues: ${verificationResult.blockers.map((b, i) => `${i + 1}. ${b}`).join(", ")}. Use skip_verification=true only as last resort.`
|
|
1327
|
-
: "Fix the failing checks and try again. Use skip_verification=true only as last resort.",
|
|
1328
|
-
gate_function:
|
|
1329
|
-
"IDENTIFY → RUN → READ → VERIFY → CLAIM (you are at VERIFY, claim blocked)",
|
|
1330
|
-
},
|
|
1331
|
-
null,
|
|
1332
|
-
2,
|
|
1333
|
-
);
|
|
1334
|
-
}
|
|
1335
|
-
}
|
|
1336
|
-
|
|
1337
|
-
// NOTE: Legacy UBS-only path removed in v0.31
|
|
1338
|
-
// UBS scan was slowing down completion without proportional value.
|
|
1339
|
-
// Run UBS manually if needed: ubs scan <files>
|
|
1340
|
-
|
|
1341
|
-
// Contract Validation - check files_touched against WorkerHandoff contract
|
|
1342
|
-
let contractValidation: { valid: boolean; violations: string[] } | null = null;
|
|
1343
|
-
let contractWarning: string | undefined;
|
|
1344
|
-
|
|
1345
|
-
if (args.files_touched && args.files_touched.length > 0) {
|
|
1346
|
-
// Extract epic ID from subtask ID
|
|
1347
|
-
const isSubtask = args.bead_id.includes(".");
|
|
1348
|
-
|
|
1349
|
-
if (isSubtask) {
|
|
1350
|
-
const epicId = args.bead_id.split(".")[0];
|
|
1351
|
-
|
|
1352
|
-
// Query decomposition event for files_owned
|
|
1353
|
-
const filesOwned = await getSubtaskFilesOwned(
|
|
1354
|
-
args.project_key,
|
|
1355
|
-
epicId,
|
|
1356
|
-
args.bead_id
|
|
1357
|
-
);
|
|
1358
|
-
|
|
1359
|
-
if (filesOwned) {
|
|
1360
|
-
contractValidation = validateContract(args.files_touched, filesOwned);
|
|
1361
|
-
|
|
1362
|
-
if (!contractValidation.valid) {
|
|
1363
|
-
// Contract violation - log warning (don't block completion)
|
|
1364
|
-
contractWarning = `⚠️ CONTRACT VIOLATION: Modified files outside owned scope
|
|
1365
|
-
|
|
1366
|
-
**Files owned**: ${filesOwned.join(", ")}
|
|
1367
|
-
**Files touched**: ${args.files_touched.join(", ")}
|
|
1368
|
-
**Violations**: ${contractValidation.violations.join(", ")}
|
|
1369
|
-
|
|
1370
|
-
This indicates scope creep - the worker modified files they weren't assigned.
|
|
1371
|
-
This will be recorded as a negative learning signal.`;
|
|
1372
|
-
|
|
1373
|
-
console.warn(`[swarm_complete] ${contractWarning}`);
|
|
1374
|
-
} else {
|
|
1375
|
-
console.log(`[swarm_complete] Contract validation passed: all ${args.files_touched.length} files within owned scope`);
|
|
1376
|
-
}
|
|
1377
|
-
} else {
|
|
1378
|
-
console.warn(`[swarm_complete] Could not retrieve files_owned for contract validation - skipping`);
|
|
1379
|
-
}
|
|
1380
|
-
}
|
|
1381
|
-
}
|
|
1382
|
-
|
|
1383
|
-
// Parse and validate evaluation if provided
|
|
1384
|
-
let parsedEvaluation: Evaluation | undefined;
|
|
1385
|
-
if (args.evaluation) {
|
|
1386
|
-
try {
|
|
1387
|
-
parsedEvaluation = EvaluationSchema.parse(
|
|
1388
|
-
JSON.parse(args.evaluation),
|
|
1389
|
-
);
|
|
1390
|
-
} catch (error) {
|
|
1391
|
-
return JSON.stringify(
|
|
1392
|
-
{
|
|
1393
|
-
success: false,
|
|
1394
|
-
error: "Invalid evaluation format",
|
|
1395
|
-
details:
|
|
1396
|
-
error instanceof z.ZodError ? error.issues : String(error),
|
|
1397
|
-
},
|
|
1398
|
-
null,
|
|
1399
|
-
2,
|
|
1400
|
-
);
|
|
1401
|
-
}
|
|
1402
|
-
|
|
1403
|
-
// If evaluation failed, don't complete
|
|
1404
|
-
if (!parsedEvaluation.passed) {
|
|
1405
|
-
return JSON.stringify(
|
|
1406
|
-
{
|
|
1407
|
-
success: false,
|
|
1408
|
-
error: "Self-evaluation failed",
|
|
1409
|
-
retry_suggestion: parsedEvaluation.retry_suggestion,
|
|
1410
|
-
feedback: parsedEvaluation.overall_feedback,
|
|
1411
|
-
},
|
|
1412
|
-
null,
|
|
1413
|
-
2,
|
|
1414
|
-
);
|
|
1415
|
-
}
|
|
1416
|
-
}
|
|
1417
|
-
|
|
1418
|
-
// Close the cell using HiveAdapter (not bd CLI)
|
|
1419
|
-
try {
|
|
1420
|
-
await adapter.closeCell(args.project_key, args.bead_id, args.summary);
|
|
1421
|
-
} catch (closeError) {
|
|
1422
|
-
const errorMessage = closeError instanceof Error ? closeError.message : String(closeError);
|
|
1423
|
-
return JSON.stringify(
|
|
1424
|
-
{
|
|
1425
|
-
success: false,
|
|
1426
|
-
error: "Failed to close cell",
|
|
1427
|
-
failed_step: "closeCell",
|
|
1428
|
-
details: errorMessage,
|
|
1429
|
-
bead_id: args.bead_id,
|
|
1430
|
-
project_key: args.project_key,
|
|
1431
|
-
recovery: {
|
|
1432
|
-
steps: [
|
|
1433
|
-
`1. Check cell exists: hive_query()`,
|
|
1434
|
-
`2. Check cell status (might already be closed)`,
|
|
1435
|
-
`3. If cell is blocked, unblock first: hive_update(id="${args.bead_id}", status="in_progress")`,
|
|
1436
|
-
`4. Try closing directly: hive_close(id="${args.bead_id}", reason="...")`,
|
|
1437
|
-
],
|
|
1438
|
-
hint: "Cell may already be closed, or the ID is incorrect.",
|
|
1439
|
-
},
|
|
1440
|
-
},
|
|
1441
|
-
null,
|
|
1442
|
-
2,
|
|
1443
|
-
);
|
|
1444
|
-
}
|
|
1445
|
-
|
|
1446
|
-
// Resolve DurableDeferred for cross-agent task completion signaling
|
|
1447
|
-
// This allows coordinator to await worker completion without polling
|
|
1448
|
-
let deferredResolved = false;
|
|
1449
|
-
let deferredError: string | undefined;
|
|
1450
|
-
try {
|
|
1451
|
-
const swarmMail = await getSwarmMailLibSQL(args.project_key);
|
|
1452
|
-
const db = await swarmMail.getDatabase();
|
|
1453
|
-
|
|
1454
|
-
// Resolve deferred keyed by bead_id
|
|
1455
|
-
// Coordinator should have created this deferred before spawning worker
|
|
1456
|
-
const deferredUrl = `deferred:${args.bead_id}`;
|
|
1457
|
-
|
|
1458
|
-
// Check if deferred exists before resolving
|
|
1459
|
-
const checkResult = await db.query<{ url: string; resolved: number }>(
|
|
1460
|
-
`SELECT url, resolved FROM deferred WHERE url = ? AND resolved = 0`,
|
|
1461
|
-
[deferredUrl],
|
|
1462
|
-
);
|
|
1463
|
-
|
|
1464
|
-
if (checkResult.rows.length > 0) {
|
|
1465
|
-
// Resolve with completion payload
|
|
1466
|
-
await db.query(
|
|
1467
|
-
`UPDATE deferred SET resolved = 1, value = ? WHERE url = ? AND resolved = 0`,
|
|
1468
|
-
[JSON.stringify({ completed: true, summary: args.summary }), deferredUrl],
|
|
1469
|
-
);
|
|
1470
|
-
|
|
1471
|
-
deferredResolved = true;
|
|
1472
|
-
} else {
|
|
1473
|
-
// Deferred doesn't exist - worker was likely not spawned via swarm pattern
|
|
1474
|
-
// This is non-fatal - just log for debugging
|
|
1475
|
-
console.info(
|
|
1476
|
-
`[swarm_complete] No deferred found for ${args.bead_id} - task may not be part of active swarm`,
|
|
1477
|
-
);
|
|
1478
|
-
}
|
|
1479
|
-
} catch (error) {
|
|
1480
|
-
// Non-fatal - deferred resolution is optional for backward compatibility
|
|
1481
|
-
deferredError = error instanceof Error ? error.message : String(error);
|
|
1482
|
-
console.warn(
|
|
1483
|
-
`[swarm_complete] Failed to resolve deferred (non-fatal): ${deferredError}`,
|
|
1484
|
-
);
|
|
1485
|
-
}
|
|
1486
|
-
|
|
1487
|
-
// Sync cell to .hive/issues.jsonl (auto-sync on complete)
|
|
1488
|
-
// This ensures the worker's completed work persists before process exits
|
|
1489
|
-
let syncSuccess = false;
|
|
1490
|
-
let syncError: string | undefined;
|
|
1491
|
-
try {
|
|
1492
|
-
// Save current working directory and set to project path
|
|
1493
|
-
const previousWorkingDir = getHiveWorkingDirectory();
|
|
1494
|
-
setHiveWorkingDirectory(args.project_key);
|
|
1495
|
-
|
|
1496
|
-
try {
|
|
1497
|
-
const syncResult = await hive_sync.execute({ auto_pull: false }, _ctx);
|
|
1498
|
-
syncSuccess = !syncResult.includes("error");
|
|
1499
|
-
} finally {
|
|
1500
|
-
// Restore previous working directory
|
|
1501
|
-
setHiveWorkingDirectory(previousWorkingDir);
|
|
1502
|
-
}
|
|
1503
|
-
} catch (error) {
|
|
1504
|
-
// Non-fatal - log warning but don't block completion
|
|
1505
|
-
syncError = error instanceof Error ? error.message : String(error);
|
|
1506
|
-
console.warn(
|
|
1507
|
-
`[swarm_complete] Auto-sync failed (non-fatal): ${syncError}`,
|
|
1508
|
-
);
|
|
1509
|
-
}
|
|
1510
|
-
|
|
1511
|
-
// Emit SubtaskOutcomeEvent for learning system
|
|
1512
|
-
try {
|
|
1513
|
-
const durationMs = args.start_time ? Date.now() - args.start_time : 0;
|
|
1514
|
-
|
|
1515
|
-
// Determine epic ID: use parent_id if available, otherwise fall back to extracting from bead_id
|
|
1516
|
-
// (New hive cell IDs don't follow epicId.subtaskNum pattern - they're independent IDs)
|
|
1517
|
-
const eventEpicId = cell.parent_id || (args.bead_id.includes(".")
|
|
1518
|
-
? args.bead_id.split(".")[0]
|
|
1519
|
-
: args.bead_id);
|
|
1520
|
-
|
|
1521
|
-
const event = createEvent("subtask_outcome", {
|
|
1522
|
-
project_key: args.project_key,
|
|
1523
|
-
epic_id: eventEpicId,
|
|
1524
|
-
bead_id: args.bead_id,
|
|
1525
|
-
planned_files: args.planned_files || [],
|
|
1526
|
-
actual_files: args.files_touched || [],
|
|
1527
|
-
duration_ms: durationMs,
|
|
1528
|
-
error_count: args.error_count || 0,
|
|
1529
|
-
retry_count: args.retry_count || 0,
|
|
1530
|
-
success: true,
|
|
1531
|
-
scope_violation: contractValidation ? !contractValidation.valid : undefined,
|
|
1532
|
-
violation_files: contractValidation?.violations,
|
|
1533
|
-
});
|
|
1534
|
-
await appendEvent(event, args.project_key);
|
|
1535
|
-
} catch (error) {
|
|
1536
|
-
// Non-fatal - log and continue
|
|
1537
|
-
console.warn(
|
|
1538
|
-
"[swarm_complete] Failed to emit SubtaskOutcomeEvent:",
|
|
1539
|
-
error,
|
|
1540
|
-
);
|
|
1541
|
-
}
|
|
1542
|
-
|
|
1543
|
-
// Automatic memory capture (MANDATORY on successful completion)
|
|
1544
|
-
// Extract strategy from bead metadata if available
|
|
1545
|
-
let capturedStrategy: LearningDecompositionStrategy | undefined;
|
|
1546
|
-
const durationMs = args.start_time ? Date.now() - args.start_time : 0;
|
|
1547
|
-
|
|
1548
|
-
// Build memory information from task completion
|
|
1549
|
-
const memoryInfo = formatMemoryStoreOnSuccess(
|
|
1550
|
-
args.bead_id,
|
|
1551
|
-
args.summary,
|
|
1552
|
-
args.files_touched || [],
|
|
1553
|
-
capturedStrategy,
|
|
1554
|
-
);
|
|
1555
|
-
|
|
1556
|
-
let memoryStored = false;
|
|
1557
|
-
let memoryError: string | undefined;
|
|
1558
|
-
|
|
1559
|
-
// Attempt to store in semantic-memory (non-blocking)
|
|
1560
|
-
try {
|
|
1561
|
-
const memoryAvailable = await isToolAvailable("semantic-memory");
|
|
1562
|
-
if (memoryAvailable) {
|
|
1563
|
-
// Call semantic-memory store command
|
|
1564
|
-
const storeResult =
|
|
1565
|
-
await Bun.$`semantic-memory store ${memoryInfo.information} --metadata ${memoryInfo.metadata}`
|
|
1566
|
-
.quiet()
|
|
1567
|
-
.nothrow();
|
|
1568
|
-
|
|
1569
|
-
if (storeResult.exitCode === 0) {
|
|
1570
|
-
memoryStored = true;
|
|
1571
|
-
} else {
|
|
1572
|
-
memoryError = `semantic-memory store failed: ${storeResult.stderr.toString().slice(0, 200)}`;
|
|
1573
|
-
console.warn(`[swarm_complete] ${memoryError}`);
|
|
1574
|
-
}
|
|
1575
|
-
} else {
|
|
1576
|
-
memoryError =
|
|
1577
|
-
"semantic-memory not available - learning stored in-memory only";
|
|
1578
|
-
warnMissingTool("semantic-memory");
|
|
1579
|
-
}
|
|
1580
|
-
} catch (error) {
|
|
1581
|
-
memoryError = `Failed to store memory: ${error instanceof Error ? error.message : String(error)}`;
|
|
1582
|
-
console.warn(`[swarm_complete] ${memoryError}`);
|
|
1583
|
-
}
|
|
1584
|
-
|
|
1585
|
-
// Release file reservations for this agent using embedded swarm-mail
|
|
1586
|
-
try {
|
|
1587
|
-
await releaseSwarmFiles({
|
|
1588
|
-
projectPath: args.project_key,
|
|
1589
|
-
agentName: args.agent_name,
|
|
1590
|
-
// Release all reservations for this agent
|
|
1591
|
-
});
|
|
1592
|
-
} catch (error) {
|
|
1593
|
-
// Release might fail (e.g., no reservations existed)
|
|
1594
|
-
// This is non-fatal - log and continue
|
|
1595
|
-
console.warn(
|
|
1596
|
-
`[swarm] Failed to release file reservations for ${args.agent_name}:`,
|
|
1597
|
-
error,
|
|
1598
|
-
);
|
|
1599
|
-
}
|
|
1600
|
-
|
|
1601
|
-
// Extract epic ID
|
|
1602
|
-
const epicId = args.bead_id.includes(".")
|
|
1603
|
-
? args.bead_id.split(".")[0]
|
|
1604
|
-
: args.bead_id;
|
|
1605
|
-
|
|
1606
|
-
// Send completion message using embedded swarm-mail with memory capture status
|
|
1607
|
-
const completionBody = [
|
|
1608
|
-
`## Subtask Complete: ${args.bead_id}`,
|
|
1609
|
-
"",
|
|
1610
|
-
`**Summary**: ${args.summary}`,
|
|
1611
|
-
"",
|
|
1612
|
-
parsedEvaluation
|
|
1613
|
-
? `**Self-Evaluation**: ${parsedEvaluation.passed ? "PASSED" : "FAILED"}`
|
|
1614
|
-
: "",
|
|
1615
|
-
parsedEvaluation?.overall_feedback
|
|
1616
|
-
? `**Feedback**: ${parsedEvaluation.overall_feedback}`
|
|
1617
|
-
: "",
|
|
1618
|
-
"",
|
|
1619
|
-
`**Memory Capture**: ${memoryStored ? "✓ Stored in semantic-memory" : `✗ ${memoryError || "Failed"}`}`,
|
|
1620
|
-
]
|
|
1621
|
-
.filter(Boolean)
|
|
1622
|
-
.join("\n");
|
|
1623
|
-
|
|
1624
|
-
// Send completion message (non-fatal if it fails)
|
|
1625
|
-
let messageSent = false;
|
|
1626
|
-
let messageError: string | undefined;
|
|
1627
|
-
try {
|
|
1628
|
-
await sendSwarmMessage({
|
|
1629
|
-
projectPath: args.project_key,
|
|
1630
|
-
fromAgent: args.agent_name,
|
|
1631
|
-
toAgents: [], // Thread broadcast
|
|
1632
|
-
subject: `Complete: ${args.bead_id}`,
|
|
1633
|
-
body: completionBody,
|
|
1634
|
-
threadId: epicId,
|
|
1635
|
-
importance: "normal",
|
|
1636
|
-
});
|
|
1637
|
-
messageSent = true;
|
|
1638
|
-
} catch (error) {
|
|
1639
|
-
// Non-fatal - log and continue
|
|
1640
|
-
messageError = error instanceof Error ? error.message : String(error);
|
|
1641
|
-
console.warn(
|
|
1642
|
-
`[swarm_complete] Failed to send completion message: ${messageError}`,
|
|
1643
|
-
);
|
|
1644
|
-
}
|
|
1645
|
-
|
|
1646
|
-
// Build success response with semantic-memory integration
|
|
1647
|
-
const response = {
|
|
1648
|
-
success: true,
|
|
1649
|
-
bead_id: args.bead_id,
|
|
1650
|
-
closed: true,
|
|
1651
|
-
reservations_released: true,
|
|
1652
|
-
synced: syncSuccess,
|
|
1653
|
-
sync_error: syncError,
|
|
1654
|
-
message_sent: messageSent,
|
|
1655
|
-
message_error: messageError,
|
|
1656
|
-
deferred_resolved: deferredResolved,
|
|
1657
|
-
deferred_error: deferredError,
|
|
1658
|
-
agent_registration: {
|
|
1659
|
-
verified: agentRegistered,
|
|
1660
|
-
warning: registrationWarning || undefined,
|
|
1661
|
-
},
|
|
1662
|
-
verification_gate: verificationResult
|
|
1663
|
-
? {
|
|
1664
|
-
passed: true,
|
|
1665
|
-
summary: verificationResult.summary,
|
|
1666
|
-
steps: verificationResult.steps.map((s) => ({
|
|
1667
|
-
name: s.name,
|
|
1668
|
-
passed: s.passed,
|
|
1669
|
-
skipped: s.skipped,
|
|
1670
|
-
skipReason: s.skipReason,
|
|
1671
|
-
})),
|
|
1672
|
-
}
|
|
1673
|
-
: args.skip_verification
|
|
1674
|
-
? { skipped: true, reason: "skip_verification=true" }
|
|
1675
|
-
: { skipped: true, reason: "no files_touched provided" },
|
|
1676
|
-
learning_prompt: `## Reflection
|
|
1677
|
-
|
|
1678
|
-
Did you learn anything reusable during this subtask? Consider:
|
|
1679
|
-
|
|
1680
|
-
1. **Patterns**: Any code patterns or approaches that worked well?
|
|
1681
|
-
2. **Gotchas**: Edge cases or pitfalls to warn future agents about?
|
|
1682
|
-
3. **Best Practices**: Domain-specific guidelines worth documenting?
|
|
1683
|
-
4. **Tool Usage**: Effective ways to use tools for this type of task?
|
|
1684
|
-
|
|
1685
|
-
If you discovered something valuable, use \`swarm_learn\` or \`skills_create\` to preserve it as a skill for future swarms.
|
|
1686
|
-
|
|
1687
|
-
Files touched: ${args.files_touched?.join(", ") || "none recorded"}`,
|
|
1688
|
-
// Automatic memory capture (MANDATORY)
|
|
1689
|
-
memory_capture: {
|
|
1690
|
-
attempted: true,
|
|
1691
|
-
stored: memoryStored,
|
|
1692
|
-
error: memoryError,
|
|
1693
|
-
information: memoryInfo.information,
|
|
1694
|
-
metadata: memoryInfo.metadata,
|
|
1695
|
-
note: memoryStored
|
|
1696
|
-
? "Learning automatically stored in semantic-memory"
|
|
1697
|
-
: `Failed to store: ${memoryError}. Learning lost unless semantic-memory is available.`,
|
|
1698
|
-
},
|
|
1699
|
-
// Contract validation result
|
|
1700
|
-
contract_validation: contractValidation
|
|
1701
|
-
? {
|
|
1702
|
-
validated: true,
|
|
1703
|
-
passed: contractValidation.valid,
|
|
1704
|
-
violations: contractValidation.violations,
|
|
1705
|
-
warning: contractWarning,
|
|
1706
|
-
note: contractValidation.valid
|
|
1707
|
-
? "All files within owned scope"
|
|
1708
|
-
: "Scope violation detected - recorded as negative learning signal",
|
|
1709
|
-
}
|
|
1710
|
-
: {
|
|
1711
|
-
validated: false,
|
|
1712
|
-
reason: "No files_owned contract found (non-epic subtask or decomposition event missing)",
|
|
1713
|
-
},
|
|
1714
|
-
};
|
|
1715
|
-
|
|
1716
|
-
// Capture subtask completion outcome for eval data
|
|
1717
|
-
try {
|
|
1718
|
-
const { captureSubtaskOutcome } = await import("./eval-capture.js");
|
|
1719
|
-
const durationMs = args.start_time ? Date.now() - args.start_time : 0;
|
|
1720
|
-
|
|
1721
|
-
// Determine epic ID: use parent_id if available, otherwise fall back to extracting from bead_id
|
|
1722
|
-
const evalEpicId = cell.parent_id || epicId;
|
|
1723
|
-
|
|
1724
|
-
captureSubtaskOutcome({
|
|
1725
|
-
epicId: evalEpicId,
|
|
1726
|
-
projectPath: args.project_key,
|
|
1727
|
-
beadId: args.bead_id,
|
|
1728
|
-
title: cell.title,
|
|
1729
|
-
plannedFiles: args.planned_files || [],
|
|
1730
|
-
actualFiles: args.files_touched || [],
|
|
1731
|
-
durationMs,
|
|
1732
|
-
errorCount: args.error_count || 0,
|
|
1733
|
-
retryCount: args.retry_count || 0,
|
|
1734
|
-
success: true,
|
|
1735
|
-
});
|
|
1736
|
-
} catch (error) {
|
|
1737
|
-
// Non-fatal - don't block completion if capture fails
|
|
1738
|
-
console.warn("[swarm_complete] Failed to capture subtask outcome:", error);
|
|
1739
|
-
}
|
|
1740
|
-
|
|
1741
|
-
// Capture subtask completion outcome
|
|
1742
|
-
try {
|
|
1743
|
-
const durationMs = args.start_time ? Date.now() - args.start_time : 0;
|
|
1744
|
-
captureCoordinatorEvent({
|
|
1745
|
-
session_id: _ctx.sessionID || "unknown",
|
|
1746
|
-
epic_id: epicId,
|
|
1747
|
-
timestamp: new Date().toISOString(),
|
|
1748
|
-
event_type: "OUTCOME",
|
|
1749
|
-
outcome_type: "subtask_success",
|
|
1750
|
-
payload: {
|
|
1751
|
-
bead_id: args.bead_id,
|
|
1752
|
-
duration_ms: durationMs,
|
|
1753
|
-
files_touched: args.files_touched || [],
|
|
1754
|
-
verification_passed: verificationResult?.passed ?? false,
|
|
1755
|
-
verification_skipped: args.skip_verification ?? false,
|
|
1756
|
-
},
|
|
1757
|
-
});
|
|
1758
|
-
} catch (error) {
|
|
1759
|
-
// Non-fatal - don't block completion if capture fails
|
|
1760
|
-
console.warn("[swarm_complete] Failed to capture subtask_success:", error);
|
|
1761
|
-
}
|
|
1762
|
-
|
|
1763
|
-
return JSON.stringify(response, null, 2);
|
|
1764
|
-
} catch (error) {
|
|
1765
|
-
// CRITICAL: Notify coordinator of failure via swarm mail
|
|
1766
|
-
const errorMessage =
|
|
1767
|
-
error instanceof Error ? error.message : String(error);
|
|
1768
|
-
const errorStack = error instanceof Error ? error.stack : undefined;
|
|
1769
|
-
|
|
1770
|
-
// Determine which step failed
|
|
1771
|
-
let failedStep = "unknown";
|
|
1772
|
-
if (errorMessage.includes("verification")) {
|
|
1773
|
-
failedStep = "Verification Gate (typecheck/tests)";
|
|
1774
|
-
} else if (errorMessage.includes("evaluation")) {
|
|
1775
|
-
failedStep = "Self-evaluation parsing";
|
|
1776
|
-
} else if (
|
|
1777
|
-
errorMessage.includes("bead") ||
|
|
1778
|
-
errorMessage.includes("close")
|
|
1779
|
-
) {
|
|
1780
|
-
failedStep = "Bead close";
|
|
1781
|
-
} else if (
|
|
1782
|
-
errorMessage.includes("memory") ||
|
|
1783
|
-
errorMessage.includes("semantic")
|
|
1784
|
-
) {
|
|
1785
|
-
failedStep = "Memory storage (non-fatal)";
|
|
1786
|
-
} else if (
|
|
1787
|
-
errorMessage.includes("reservation") ||
|
|
1788
|
-
errorMessage.includes("release")
|
|
1789
|
-
) {
|
|
1790
|
-
failedStep = "File reservation release";
|
|
1791
|
-
} else if (
|
|
1792
|
-
errorMessage.includes("message") ||
|
|
1793
|
-
errorMessage.includes("mail")
|
|
1794
|
-
) {
|
|
1795
|
-
failedStep = "Swarm mail notification";
|
|
1796
|
-
}
|
|
1797
|
-
|
|
1798
|
-
// Build error notification body
|
|
1799
|
-
const errorBody = [
|
|
1800
|
-
`## ⚠️ SWARM_COMPLETE FAILED`,
|
|
1801
|
-
"",
|
|
1802
|
-
`**Bead**: ${args.bead_id}`,
|
|
1803
|
-
`**Agent**: ${args.agent_name}`,
|
|
1804
|
-
`**Failed Step**: ${failedStep}`,
|
|
1805
|
-
"",
|
|
1806
|
-
`### Error Message`,
|
|
1807
|
-
"```",
|
|
1808
|
-
errorMessage,
|
|
1809
|
-
"```",
|
|
1810
|
-
"",
|
|
1811
|
-
errorStack
|
|
1812
|
-
? `### Stack Trace\n\`\`\`\n${errorStack.slice(0, 1000)}\n\`\`\`\n`
|
|
1813
|
-
: "",
|
|
1814
|
-
`### Context`,
|
|
1815
|
-
`- **Summary**: ${args.summary}`,
|
|
1816
|
-
`- **Files touched**: ${args.files_touched?.length ? args.files_touched.join(", ") : "none"}`,
|
|
1817
|
-
`- **Skip verification**: ${args.skip_verification ?? false}`,
|
|
1818
|
-
"",
|
|
1819
|
-
`### Recovery Actions`,
|
|
1820
|
-
"1. Check error message for specific issue",
|
|
1821
|
-
"2. Review failed step (UBS scan, typecheck, cell close, etc.)",
|
|
1822
|
-
"3. Fix underlying issue or use skip flags if appropriate",
|
|
1823
|
-
"4. Retry swarm_complete after fixing",
|
|
1824
|
-
]
|
|
1825
|
-
.filter(Boolean)
|
|
1826
|
-
.join("\n");
|
|
1827
|
-
|
|
1828
|
-
// Send urgent notification to coordinator
|
|
1829
|
-
let notificationSent = false;
|
|
1830
|
-
try {
|
|
1831
|
-
await sendSwarmMessage({
|
|
1832
|
-
projectPath: args.project_key,
|
|
1833
|
-
fromAgent: args.agent_name,
|
|
1834
|
-
toAgents: [], // Thread broadcast to coordinator
|
|
1835
|
-
subject: `FAILED: swarm_complete for ${args.bead_id}`,
|
|
1836
|
-
body: errorBody,
|
|
1837
|
-
threadId: epicId,
|
|
1838
|
-
importance: "urgent",
|
|
1839
|
-
});
|
|
1840
|
-
notificationSent = true;
|
|
1841
|
-
} catch (mailError) {
|
|
1842
|
-
// Even swarm mail failed - log to console as last resort
|
|
1843
|
-
console.error(
|
|
1844
|
-
`[swarm_complete] CRITICAL: Failed to notify coordinator of failure for ${args.bead_id}:`,
|
|
1845
|
-
mailError,
|
|
1846
|
-
);
|
|
1847
|
-
console.error(`[swarm_complete] Original error:`, error);
|
|
1848
|
-
}
|
|
1849
|
-
|
|
1850
|
-
// Capture subtask failure outcome
|
|
1851
|
-
try {
|
|
1852
|
-
const durationMs = args.start_time ? Date.now() - args.start_time : 0;
|
|
1853
|
-
captureCoordinatorEvent({
|
|
1854
|
-
session_id: _ctx.sessionID || "unknown",
|
|
1855
|
-
epic_id: epicId,
|
|
1856
|
-
timestamp: new Date().toISOString(),
|
|
1857
|
-
event_type: "OUTCOME",
|
|
1858
|
-
outcome_type: "subtask_failed",
|
|
1859
|
-
payload: {
|
|
1860
|
-
bead_id: args.bead_id,
|
|
1861
|
-
duration_ms: durationMs,
|
|
1862
|
-
failed_step: failedStep,
|
|
1863
|
-
error_message: errorMessage.slice(0, 500),
|
|
1864
|
-
},
|
|
1865
|
-
});
|
|
1866
|
-
} catch (captureError) {
|
|
1867
|
-
// Non-fatal - don't block error return if capture fails
|
|
1868
|
-
console.warn("[swarm_complete] Failed to capture subtask_failed:", captureError);
|
|
1869
|
-
}
|
|
1870
|
-
|
|
1871
|
-
// Return structured error instead of throwing
|
|
1872
|
-
// This ensures the agent sees the actual error message
|
|
1873
|
-
return JSON.stringify(
|
|
1874
|
-
{
|
|
1875
|
-
success: false,
|
|
1876
|
-
error: `swarm_complete failed: ${errorMessage}`,
|
|
1877
|
-
failed_step: failedStep,
|
|
1878
|
-
bead_id: args.bead_id,
|
|
1879
|
-
agent_name: args.agent_name,
|
|
1880
|
-
coordinator_notified: notificationSent,
|
|
1881
|
-
stack_trace: errorStack?.slice(0, 500),
|
|
1882
|
-
hint: "Check the error message above. Common issues: bead not found, session not initialized.",
|
|
1883
|
-
context: {
|
|
1884
|
-
summary: args.summary,
|
|
1885
|
-
files_touched: args.files_touched || [],
|
|
1886
|
-
skip_verification: args.skip_verification ?? false,
|
|
1887
|
-
},
|
|
1888
|
-
recovery: {
|
|
1889
|
-
steps: [
|
|
1890
|
-
"1. Check the error message above for specific issue",
|
|
1891
|
-
`2. Review failed step: ${failedStep}`,
|
|
1892
|
-
"3. Fix underlying issue or use skip flags if appropriate",
|
|
1893
|
-
"4. Retry swarm_complete after fixing",
|
|
1894
|
-
],
|
|
1895
|
-
common_fixes: {
|
|
1896
|
-
"Verification Gate": "Use skip_verification=true to bypass (not recommended)",
|
|
1897
|
-
"Cell close": "Check cell status with hive_query(), may need hive_update() first",
|
|
1898
|
-
"Self-evaluation": "Check evaluation JSON format matches EvaluationSchema",
|
|
1899
|
-
},
|
|
1900
|
-
},
|
|
1901
|
-
},
|
|
1902
|
-
null,
|
|
1903
|
-
2,
|
|
1904
|
-
);
|
|
1905
|
-
}
|
|
1906
|
-
},
|
|
1907
|
-
});
|
|
1908
|
-
|
|
1909
|
-
/**
|
|
1910
|
-
* Record outcome signals from a completed subtask
|
|
1911
|
-
*
|
|
1912
|
-
* Tracks implicit feedback (duration, errors, retries) to score
|
|
1913
|
-
* decomposition quality over time. This data feeds into criterion
|
|
1914
|
-
* weight calculations.
|
|
1915
|
-
*
|
|
1916
|
-
* Strategy tracking enables learning about which decomposition strategies
|
|
1917
|
-
* work best for different task types.
|
|
1918
|
-
*
|
|
1919
|
-
* @see src/learning.ts for scoring logic
|
|
1920
|
-
*/
|
|
1921
|
-
export const swarm_record_outcome = tool({
|
|
1922
|
-
description:
|
|
1923
|
-
"Record subtask outcome for implicit feedback scoring. Tracks duration, errors, retries to learn decomposition quality.",
|
|
1924
|
-
args: {
|
|
1925
|
-
bead_id: tool.schema.string().describe("Subtask bead ID"),
|
|
1926
|
-
duration_ms: tool.schema
|
|
1927
|
-
.number()
|
|
1928
|
-
.int()
|
|
1929
|
-
.min(0)
|
|
1930
|
-
.describe("Duration in milliseconds"),
|
|
1931
|
-
error_count: tool.schema
|
|
1932
|
-
.number()
|
|
1933
|
-
.int()
|
|
1934
|
-
.min(0)
|
|
1935
|
-
.default(0)
|
|
1936
|
-
.describe("Number of errors encountered"),
|
|
1937
|
-
retry_count: tool.schema
|
|
1938
|
-
.number()
|
|
1939
|
-
.int()
|
|
1940
|
-
.min(0)
|
|
1941
|
-
.default(0)
|
|
1942
|
-
.describe("Number of retry attempts"),
|
|
1943
|
-
success: tool.schema.boolean().describe("Whether the subtask succeeded"),
|
|
1944
|
-
files_touched: tool.schema
|
|
1945
|
-
.array(tool.schema.string())
|
|
1946
|
-
.optional()
|
|
1947
|
-
.describe("Files that were modified"),
|
|
1948
|
-
criteria: tool.schema
|
|
1949
|
-
.array(tool.schema.string())
|
|
1950
|
-
.optional()
|
|
1951
|
-
.describe(
|
|
1952
|
-
"Criteria to generate feedback for (default: all default criteria)",
|
|
1953
|
-
),
|
|
1954
|
-
strategy: tool.schema
|
|
1955
|
-
.enum(["file-based", "feature-based", "risk-based", "research-based"])
|
|
1956
|
-
.optional()
|
|
1957
|
-
.describe("Decomposition strategy used for this task"),
|
|
1958
|
-
failure_mode: tool.schema
|
|
1959
|
-
.enum([
|
|
1960
|
-
"timeout",
|
|
1961
|
-
"conflict",
|
|
1962
|
-
"validation",
|
|
1963
|
-
"tool_failure",
|
|
1964
|
-
"context_overflow",
|
|
1965
|
-
"dependency_blocked",
|
|
1966
|
-
"user_cancelled",
|
|
1967
|
-
"unknown",
|
|
1968
|
-
])
|
|
1969
|
-
.optional()
|
|
1970
|
-
.describe(
|
|
1971
|
-
"Failure classification (only when success=false). Auto-classified if not provided.",
|
|
1972
|
-
),
|
|
1973
|
-
failure_details: tool.schema
|
|
1974
|
-
.string()
|
|
1975
|
-
.optional()
|
|
1976
|
-
.describe("Detailed failure context (error message, stack trace, etc.)"),
|
|
1977
|
-
project_path: tool.schema
|
|
1978
|
-
.string()
|
|
1979
|
-
.optional()
|
|
1980
|
-
.describe("Project path (for finalizing eval records when all subtasks complete)"),
|
|
1981
|
-
epic_id: tool.schema
|
|
1982
|
-
.string()
|
|
1983
|
-
.optional()
|
|
1984
|
-
.describe("Epic ID (for finalizing eval records when all subtasks complete)"),
|
|
1985
|
-
},
|
|
1986
|
-
async execute(args) {
|
|
1987
|
-
// Build outcome signals
|
|
1988
|
-
const signals: OutcomeSignals = {
|
|
1989
|
-
bead_id: args.bead_id,
|
|
1990
|
-
duration_ms: args.duration_ms,
|
|
1991
|
-
error_count: args.error_count ?? 0,
|
|
1992
|
-
retry_count: args.retry_count ?? 0,
|
|
1993
|
-
success: args.success,
|
|
1994
|
-
files_touched: args.files_touched ?? [],
|
|
1995
|
-
timestamp: new Date().toISOString(),
|
|
1996
|
-
strategy: args.strategy as LearningDecompositionStrategy | undefined,
|
|
1997
|
-
failure_mode: args.failure_mode,
|
|
1998
|
-
failure_details: args.failure_details,
|
|
1999
|
-
};
|
|
2000
|
-
|
|
2001
|
-
// If task failed but no failure_mode provided, try to classify from failure_details
|
|
2002
|
-
if (!args.success && !args.failure_mode && args.failure_details) {
|
|
2003
|
-
const classified = classifyFailure(args.failure_details);
|
|
2004
|
-
signals.failure_mode = classified as OutcomeSignals["failure_mode"];
|
|
2005
|
-
}
|
|
2006
|
-
|
|
2007
|
-
// Validate signals
|
|
2008
|
-
const validated = OutcomeSignalsSchema.parse(signals);
|
|
2009
|
-
|
|
2010
|
-
// Score the outcome
|
|
2011
|
-
const scored: ScoredOutcome = scoreImplicitFeedback(
|
|
2012
|
-
validated,
|
|
2013
|
-
DEFAULT_LEARNING_CONFIG,
|
|
2014
|
-
);
|
|
2015
|
-
|
|
2016
|
-
// Get error patterns from accumulator
|
|
2017
|
-
const errorStats = await globalErrorAccumulator.getErrorStats(args.bead_id);
|
|
2018
|
-
|
|
2019
|
-
// Finalize eval record if project_path and epic_id provided
|
|
2020
|
-
let finalizedRecord: EvalRecord | null = null;
|
|
2021
|
-
if (args.project_path && args.epic_id) {
|
|
2022
|
-
try {
|
|
2023
|
-
const { finalizeEvalRecord } = await import("./eval-capture.js");
|
|
2024
|
-
finalizedRecord = finalizeEvalRecord({
|
|
2025
|
-
epicId: args.epic_id,
|
|
2026
|
-
projectPath: args.project_path,
|
|
2027
|
-
});
|
|
2028
|
-
} catch (error) {
|
|
2029
|
-
// Non-fatal - log and continue
|
|
2030
|
-
console.warn("[swarm_record_outcome] Failed to finalize eval record:", error);
|
|
2031
|
-
}
|
|
2032
|
-
}
|
|
2033
|
-
|
|
2034
|
-
// Generate feedback events for each criterion
|
|
2035
|
-
const criteriaToScore = args.criteria ?? [
|
|
2036
|
-
"type_safe",
|
|
2037
|
-
"no_bugs",
|
|
2038
|
-
"patterns",
|
|
2039
|
-
"readable",
|
|
2040
|
-
];
|
|
2041
|
-
const feedbackEvents: FeedbackEvent[] = criteriaToScore.map((criterion) => {
|
|
2042
|
-
const event = outcomeToFeedback(scored, criterion);
|
|
2043
|
-
// Include strategy in feedback context for future analysis
|
|
2044
|
-
if (args.strategy) {
|
|
2045
|
-
event.context =
|
|
2046
|
-
`${event.context || ""} [strategy: ${args.strategy}]`.trim();
|
|
2047
|
-
}
|
|
2048
|
-
// Include error patterns in feedback context
|
|
2049
|
-
if (errorStats.total > 0) {
|
|
2050
|
-
const errorSummary = Object.entries(errorStats.by_type)
|
|
2051
|
-
.map(([type, count]) => `${type}:${count}`)
|
|
2052
|
-
.join(", ");
|
|
2053
|
-
event.context =
|
|
2054
|
-
`${event.context || ""} [errors: ${errorSummary}]`.trim();
|
|
2055
|
-
}
|
|
2056
|
-
return event;
|
|
2057
|
-
});
|
|
2058
|
-
|
|
2059
|
-
return JSON.stringify(
|
|
2060
|
-
{
|
|
2061
|
-
success: true,
|
|
2062
|
-
outcome: {
|
|
2063
|
-
signals: validated,
|
|
2064
|
-
scored: {
|
|
2065
|
-
type: scored.type,
|
|
2066
|
-
decayed_value: scored.decayed_value,
|
|
2067
|
-
reasoning: scored.reasoning,
|
|
2068
|
-
},
|
|
2069
|
-
},
|
|
2070
|
-
feedback_events: feedbackEvents,
|
|
2071
|
-
error_patterns: errorStats,
|
|
2072
|
-
summary: {
|
|
2073
|
-
feedback_type: scored.type,
|
|
2074
|
-
duration_seconds: Math.round(args.duration_ms / 1000),
|
|
2075
|
-
error_count: args.error_count ?? 0,
|
|
2076
|
-
retry_count: args.retry_count ?? 0,
|
|
2077
|
-
success: args.success,
|
|
2078
|
-
strategy: args.strategy,
|
|
2079
|
-
failure_mode: validated.failure_mode,
|
|
2080
|
-
failure_details: validated.failure_details,
|
|
2081
|
-
accumulated_errors: errorStats.total,
|
|
2082
|
-
unresolved_errors: errorStats.unresolved,
|
|
2083
|
-
},
|
|
2084
|
-
finalized_eval_record: finalizedRecord || undefined,
|
|
2085
|
-
note: "Feedback events should be stored for criterion weight calculation. Use learning.ts functions to apply weights.",
|
|
2086
|
-
},
|
|
2087
|
-
null,
|
|
2088
|
-
2,
|
|
2089
|
-
);
|
|
2090
|
-
},
|
|
2091
|
-
});
|
|
2092
|
-
|
|
2093
|
-
// ============================================================================
|
|
2094
|
-
// Research Phase
|
|
2095
|
-
// ============================================================================
|
|
2096
|
-
|
|
2097
|
-
/**
|
|
2098
|
-
* Known technology patterns for extraction from task descriptions
|
|
2099
|
-
* Maps common mentions to normalized package names
|
|
2100
|
-
*/
|
|
2101
|
-
const TECH_PATTERNS: Record<string, RegExp> = {
|
|
2102
|
-
next: /next\.?js|nextjs/i,
|
|
2103
|
-
react: /react(?!ive)/i,
|
|
2104
|
-
zod: /zod/i,
|
|
2105
|
-
typescript: /typescript|ts(?!\w)/i,
|
|
2106
|
-
tailwind: /tailwind(?:css)?/i,
|
|
2107
|
-
prisma: /prisma/i,
|
|
2108
|
-
drizzle: /drizzle(?:-orm)?/i,
|
|
2109
|
-
trpc: /trpc/i,
|
|
2110
|
-
"react-query": /react-query|tanstack.*query/i,
|
|
2111
|
-
axios: /axios/i,
|
|
2112
|
-
"node-fetch": /node-fetch|fetch api/i,
|
|
2113
|
-
};
|
|
2114
|
-
|
|
2115
|
-
/**
|
|
2116
|
-
* Extract technology stack from task description
|
|
2117
|
-
*
|
|
2118
|
-
* Searches for common framework/library mentions and returns
|
|
2119
|
-
* a deduplicated array of normalized names.
|
|
2120
|
-
*
|
|
2121
|
-
* @param task - Task description
|
|
2122
|
-
* @returns Array of detected technology names (normalized, lowercase)
|
|
2123
|
-
*
|
|
2124
|
-
* @example
|
|
2125
|
-
* ```typescript
|
|
2126
|
-
* extractTechStack("Add Next.js API routes with Zod validation")
|
|
2127
|
-
* // => ["next", "zod"]
|
|
2128
|
-
* ```
|
|
2129
|
-
*/
|
|
2130
|
-
export function extractTechStack(task: string): string[] {
|
|
2131
|
-
const detected = new Set<string>();
|
|
2132
|
-
|
|
2133
|
-
for (const [tech, pattern] of Object.entries(TECH_PATTERNS)) {
|
|
2134
|
-
if (pattern.test(task)) {
|
|
2135
|
-
detected.add(tech);
|
|
2136
|
-
}
|
|
2137
|
-
}
|
|
2138
|
-
|
|
2139
|
-
return Array.from(detected);
|
|
2140
|
-
}
|
|
2141
|
-
|
|
2142
|
-
/**
|
|
2143
|
-
* Spawn instruction for a researcher worker
|
|
2144
|
-
*/
|
|
2145
|
-
export interface ResearchSpawnInstruction {
|
|
2146
|
-
/** Unique ID for this research task */
|
|
2147
|
-
research_id: string;
|
|
2148
|
-
/** Technology being researched */
|
|
2149
|
-
tech: string;
|
|
2150
|
-
/** Full prompt for the researcher agent */
|
|
2151
|
-
prompt: string;
|
|
2152
|
-
/** Agent type for the Task tool */
|
|
2153
|
-
subagent_type: "swarm/researcher";
|
|
2154
|
-
}
|
|
2155
|
-
|
|
2156
|
-
/**
|
|
2157
|
-
* Research result from documentation discovery phase
|
|
2158
|
-
*/
|
|
2159
|
-
export interface ResearchResult {
|
|
2160
|
-
/** Technologies identified and researched */
|
|
2161
|
-
tech_stack: string[];
|
|
2162
|
-
/** Spawn instructions for researcher workers */
|
|
2163
|
-
spawn_instructions: ResearchSpawnInstruction[];
|
|
2164
|
-
/** Summaries keyed by technology name */
|
|
2165
|
-
summaries: Record<string, string>;
|
|
2166
|
-
/** Semantic-memory IDs where research is stored */
|
|
2167
|
-
memory_ids: string[];
|
|
2168
|
-
}
|
|
2169
|
-
|
|
2170
|
-
/**
|
|
2171
|
-
* Run research phase before task decomposition
|
|
2172
|
-
*
|
|
2173
|
-
* This is the INTEGRATION point that:
|
|
2174
|
-
* 1. Analyzes task to identify technologies
|
|
2175
|
-
* 2. Spawns researcher agents for each technology (parallel)
|
|
2176
|
-
* 3. Waits for researchers to complete
|
|
2177
|
-
* 4. Collects summaries from semantic-memory
|
|
2178
|
-
* 5. Returns combined context for shared_context
|
|
2179
|
-
*
|
|
2180
|
-
* Flow:
|
|
2181
|
-
* ```
|
|
2182
|
-
* Task received
|
|
2183
|
-
* ↓
|
|
2184
|
-
* extractTechStack(task) → ["next", "zod"]
|
|
2185
|
-
* ↓
|
|
2186
|
-
* For each tech: swarm_spawn_researcher(tech_stack=[tech])
|
|
2187
|
-
* ↓
|
|
2188
|
-
* Spawn Task agents in parallel
|
|
2189
|
-
* ↓
|
|
2190
|
-
* Wait for all to complete
|
|
2191
|
-
* ↓
|
|
2192
|
-
* Collect summaries from swarm mail
|
|
2193
|
-
* ↓
|
|
2194
|
-
* Return ResearchResult → inject into shared_context
|
|
2195
|
-
* ```
|
|
2196
|
-
*
|
|
2197
|
-
* @param task - Task description to analyze
|
|
2198
|
-
* @param projectPath - Absolute path to project root
|
|
2199
|
-
* @param options - Optional configuration
|
|
2200
|
-
* @returns Research results with summaries and memory IDs
|
|
2201
|
-
*
|
|
2202
|
-
* @example
|
|
2203
|
-
* ```typescript
|
|
2204
|
-
* const result = await runResearchPhase(
|
|
2205
|
-
* "Add Next.js API routes with Zod validation",
|
|
2206
|
-
* "/path/to/project"
|
|
2207
|
-
* );
|
|
2208
|
-
* // result.tech_stack => ["next", "zod"]
|
|
2209
|
-
* // result.summaries => { next: "...", zod: "..." }
|
|
2210
|
-
* // Use result as shared_context for decomposition
|
|
2211
|
-
* ```
|
|
2212
|
-
*/
|
|
2213
|
-
export async function runResearchPhase(
|
|
2214
|
-
task: string,
|
|
2215
|
-
projectPath: string,
|
|
2216
|
-
options?: { checkUpgrades?: boolean }
|
|
2217
|
-
): Promise<ResearchResult> {
|
|
2218
|
-
// Step 1: Extract technologies from task description
|
|
2219
|
-
const techStack = extractTechStack(task);
|
|
2220
|
-
|
|
2221
|
-
// Early return if no technologies detected
|
|
2222
|
-
if (techStack.length === 0) {
|
|
2223
|
-
return {
|
|
2224
|
-
tech_stack: [],
|
|
2225
|
-
spawn_instructions: [],
|
|
2226
|
-
summaries: {},
|
|
2227
|
-
memory_ids: [],
|
|
2228
|
-
};
|
|
2229
|
-
}
|
|
2230
|
-
|
|
2231
|
-
// Step 2: Generate spawn instructions for each technology
|
|
2232
|
-
// The coordinator will use these to spawn researcher workers via Task()
|
|
2233
|
-
const spawnInstructions: ResearchSpawnInstruction[] = [];
|
|
2234
|
-
|
|
2235
|
-
for (const tech of techStack) {
|
|
2236
|
-
// Generate unique research ID
|
|
2237
|
-
const researchId = `research-${tech}-${Date.now()}-${Math.random().toString(36).slice(2, 9)}`;
|
|
2238
|
-
|
|
2239
|
-
// Generate researcher prompt
|
|
2240
|
-
const prompt = formatResearcherPrompt({
|
|
2241
|
-
research_id: researchId,
|
|
2242
|
-
epic_id: "standalone-research", // No epic context for standalone research
|
|
2243
|
-
tech_stack: [tech], // Single tech per researcher
|
|
2244
|
-
project_path: projectPath,
|
|
2245
|
-
check_upgrades: options?.checkUpgrades ?? false,
|
|
2246
|
-
});
|
|
2247
|
-
|
|
2248
|
-
spawnInstructions.push({
|
|
2249
|
-
research_id: researchId,
|
|
2250
|
-
tech,
|
|
2251
|
-
prompt,
|
|
2252
|
-
subagent_type: "swarm/researcher",
|
|
2253
|
-
});
|
|
2254
|
-
}
|
|
2255
|
-
|
|
2256
|
-
// Step 3: Return spawn instructions for coordinator
|
|
2257
|
-
// The coordinator will spawn Task() agents using these instructions
|
|
2258
|
-
// and collect results from swarm mail after completion
|
|
2259
|
-
return {
|
|
2260
|
-
tech_stack: techStack,
|
|
2261
|
-
spawn_instructions: spawnInstructions,
|
|
2262
|
-
summaries: {}, // Will be populated by coordinator after researchers complete
|
|
2263
|
-
memory_ids: [], // Will be populated by coordinator after researchers store in semantic-memory
|
|
2264
|
-
};
|
|
2265
|
-
}
|
|
2266
|
-
|
|
2267
|
-
/**
|
|
2268
|
-
* Plugin tool for running research phase
|
|
2269
|
-
*
|
|
2270
|
-
* Exposes research phase as a tool for manual triggering or
|
|
2271
|
-
* integration into orchestration flows.
|
|
2272
|
-
*/
|
|
2273
|
-
export const swarm_research_phase = tool({
|
|
2274
|
-
description:
|
|
2275
|
-
"Run research phase to gather documentation for detected technologies. Returns summaries for injection into shared_context.",
|
|
2276
|
-
args: {
|
|
2277
|
-
task: tool.schema.string().min(1).describe("Task description to analyze"),
|
|
2278
|
-
project_path: tool.schema
|
|
2279
|
-
.string()
|
|
2280
|
-
.describe("Absolute path to project root"),
|
|
2281
|
-
check_upgrades: tool.schema
|
|
2282
|
-
.boolean()
|
|
2283
|
-
.optional()
|
|
2284
|
-
.describe(
|
|
2285
|
-
"Compare installed vs latest versions (default: false)"
|
|
2286
|
-
),
|
|
2287
|
-
},
|
|
2288
|
-
async execute(args) {
|
|
2289
|
-
const result = await runResearchPhase(args.task, args.project_path, {
|
|
2290
|
-
checkUpgrades: args.check_upgrades,
|
|
2291
|
-
});
|
|
2292
|
-
|
|
2293
|
-
return JSON.stringify(
|
|
2294
|
-
{
|
|
2295
|
-
tech_stack: result.tech_stack,
|
|
2296
|
-
summaries: result.summaries,
|
|
2297
|
-
memory_ids: result.memory_ids,
|
|
2298
|
-
summary: {
|
|
2299
|
-
technologies_detected: result.tech_stack.length,
|
|
2300
|
-
summaries_collected: Object.keys(result.summaries).length,
|
|
2301
|
-
memories_stored: result.memory_ids.length,
|
|
2302
|
-
},
|
|
2303
|
-
usage_hint:
|
|
2304
|
-
"Inject summaries into shared_context for task decomposition. Each technology has documentation in semantic-memory.",
|
|
2305
|
-
},
|
|
2306
|
-
null,
|
|
2307
|
-
2
|
|
2308
|
-
);
|
|
2309
|
-
},
|
|
2310
|
-
});
|
|
2311
|
-
|
|
2312
|
-
/**
|
|
2313
|
-
* Record an error during subtask execution
|
|
2314
|
-
*
|
|
2315
|
-
* Implements pattern from "Patterns for Building AI Agents" p.40:
|
|
2316
|
-
* "Good agents examine and correct errors when something goes wrong"
|
|
2317
|
-
*
|
|
2318
|
-
* Errors are accumulated and can be fed into retry prompts to help
|
|
2319
|
-
* agents learn from past failures.
|
|
2320
|
-
*/
|
|
2321
|
-
export const swarm_accumulate_error = tool({
|
|
2322
|
-
description:
|
|
2323
|
-
"Record an error during subtask execution. Errors feed into retry prompts.",
|
|
2324
|
-
args: {
|
|
2325
|
-
bead_id: tool.schema.string().describe("Cell ID where error occurred"),
|
|
2326
|
-
error_type: tool.schema
|
|
2327
|
-
.enum(["validation", "timeout", "conflict", "tool_failure", "unknown"])
|
|
2328
|
-
.describe("Category of error"),
|
|
2329
|
-
message: tool.schema.string().describe("Human-readable error message"),
|
|
2330
|
-
stack_trace: tool.schema
|
|
2331
|
-
.string()
|
|
2332
|
-
.optional()
|
|
2333
|
-
.describe("Stack trace for debugging"),
|
|
2334
|
-
tool_name: tool.schema.string().optional().describe("Tool that failed"),
|
|
2335
|
-
context: tool.schema
|
|
2336
|
-
.string()
|
|
2337
|
-
.optional()
|
|
2338
|
-
.describe("What was happening when error occurred"),
|
|
2339
|
-
},
|
|
2340
|
-
async execute(args) {
|
|
2341
|
-
const entry = await globalErrorAccumulator.recordError(
|
|
2342
|
-
args.bead_id,
|
|
2343
|
-
args.error_type as ErrorType,
|
|
2344
|
-
args.message,
|
|
2345
|
-
{
|
|
2346
|
-
stack_trace: args.stack_trace,
|
|
2347
|
-
tool_name: args.tool_name,
|
|
2348
|
-
context: args.context,
|
|
2349
|
-
},
|
|
2350
|
-
);
|
|
2351
|
-
|
|
2352
|
-
return JSON.stringify(
|
|
2353
|
-
{
|
|
2354
|
-
success: true,
|
|
2355
|
-
error_id: entry.id,
|
|
2356
|
-
bead_id: entry.bead_id,
|
|
2357
|
-
error_type: entry.error_type,
|
|
2358
|
-
message: entry.message,
|
|
2359
|
-
timestamp: entry.timestamp,
|
|
2360
|
-
note: "Error recorded for retry context. Use swarm_get_error_context to retrieve accumulated errors.",
|
|
2361
|
-
},
|
|
2362
|
-
null,
|
|
2363
|
-
2,
|
|
2364
|
-
);
|
|
2365
|
-
},
|
|
2366
|
-
});
|
|
2367
|
-
|
|
2368
|
-
/**
|
|
2369
|
-
* Get accumulated errors for a bead to feed into retry prompts
|
|
2370
|
-
*
|
|
2371
|
-
* Returns formatted error context that can be injected into retry prompts
|
|
2372
|
-
* to help agents learn from past failures.
|
|
2373
|
-
*/
|
|
2374
|
-
export const swarm_get_error_context = tool({
|
|
2375
|
-
description:
|
|
2376
|
-
"Get accumulated errors for a bead. Returns formatted context for retry prompts.",
|
|
2377
|
-
args: {
|
|
2378
|
-
bead_id: tool.schema.string().describe("Cell ID to get errors for"),
|
|
2379
|
-
include_resolved: tool.schema
|
|
2380
|
-
.boolean()
|
|
2381
|
-
.optional()
|
|
2382
|
-
.describe("Include resolved errors (default: false)"),
|
|
2383
|
-
},
|
|
2384
|
-
async execute(args) {
|
|
2385
|
-
const errorContext = await globalErrorAccumulator.getErrorContext(
|
|
2386
|
-
args.bead_id,
|
|
2387
|
-
args.include_resolved ?? false,
|
|
2388
|
-
);
|
|
2389
|
-
|
|
2390
|
-
const stats = await globalErrorAccumulator.getErrorStats(args.bead_id);
|
|
2391
|
-
|
|
2392
|
-
return JSON.stringify(
|
|
2393
|
-
{
|
|
2394
|
-
bead_id: args.bead_id,
|
|
2395
|
-
error_context: errorContext,
|
|
2396
|
-
stats: {
|
|
2397
|
-
total_errors: stats.total,
|
|
2398
|
-
unresolved: stats.unresolved,
|
|
2399
|
-
by_type: stats.by_type,
|
|
2400
|
-
},
|
|
2401
|
-
has_errors: errorContext.length > 0,
|
|
2402
|
-
usage:
|
|
2403
|
-
"Inject error_context into retry prompt using {error_context} placeholder",
|
|
2404
|
-
},
|
|
2405
|
-
null,
|
|
2406
|
-
2,
|
|
2407
|
-
);
|
|
2408
|
-
},
|
|
2409
|
-
});
|
|
2410
|
-
|
|
2411
|
-
/**
|
|
2412
|
-
* Mark an error as resolved
|
|
2413
|
-
*
|
|
2414
|
-
* Call this after an agent successfully addresses an error to update
|
|
2415
|
-
* the accumulator state.
|
|
2416
|
-
*/
|
|
2417
|
-
export const swarm_resolve_error = tool({
|
|
2418
|
-
description:
|
|
2419
|
-
"Mark an error as resolved after fixing it. Updates error accumulator state.",
|
|
2420
|
-
args: {
|
|
2421
|
-
error_id: tool.schema.string().describe("Error ID to mark as resolved"),
|
|
2422
|
-
},
|
|
2423
|
-
async execute(args) {
|
|
2424
|
-
await globalErrorAccumulator.resolveError(args.error_id);
|
|
2425
|
-
|
|
2426
|
-
return JSON.stringify(
|
|
2427
|
-
{
|
|
2428
|
-
success: true,
|
|
2429
|
-
error_id: args.error_id,
|
|
2430
|
-
resolved: true,
|
|
2431
|
-
},
|
|
2432
|
-
null,
|
|
2433
|
-
2,
|
|
2434
|
-
);
|
|
2435
|
-
},
|
|
2436
|
-
});
|
|
2437
|
-
|
|
2438
|
-
/**
|
|
2439
|
-
* Check if a bead has struck out (3 consecutive failures)
|
|
2440
|
-
*
|
|
2441
|
-
* The 3-Strike Rule:
|
|
2442
|
-
* IF 3+ fixes have failed:
|
|
2443
|
-
* STOP → Question the architecture
|
|
2444
|
-
* DON'T attempt Fix #4
|
|
2445
|
-
* Discuss with human partner
|
|
2446
|
-
*
|
|
2447
|
-
* This is NOT a failed hypothesis.
|
|
2448
|
-
* This is a WRONG ARCHITECTURE.
|
|
2449
|
-
*
|
|
2450
|
-
* Use this tool to:
|
|
2451
|
-
* - Check strike count before attempting a fix
|
|
2452
|
-
* - Get architecture review prompt if struck out
|
|
2453
|
-
* - Record a strike when a fix fails
|
|
2454
|
-
* - Clear strikes when a fix succeeds
|
|
2455
|
-
*/
|
|
2456
|
-
export const swarm_check_strikes = tool({
|
|
2457
|
-
description:
|
|
2458
|
-
"Check 3-strike status for a bead. Records failures, detects architectural problems, generates architecture review prompts.",
|
|
2459
|
-
args: {
|
|
2460
|
-
bead_id: tool.schema.string().describe("Cell ID to check"),
|
|
2461
|
-
action: tool.schema
|
|
2462
|
-
.enum(["check", "add_strike", "clear", "get_prompt"])
|
|
2463
|
-
.describe(
|
|
2464
|
-
"Action: check count, add strike, clear strikes, or get prompt",
|
|
2465
|
-
),
|
|
2466
|
-
attempt: tool.schema
|
|
2467
|
-
.string()
|
|
2468
|
-
.optional()
|
|
2469
|
-
.describe("Description of fix attempt (required for add_strike)"),
|
|
2470
|
-
reason: tool.schema
|
|
2471
|
-
.string()
|
|
2472
|
-
.optional()
|
|
2473
|
-
.describe("Why the fix failed (required for add_strike)"),
|
|
2474
|
-
},
|
|
2475
|
-
async execute(args) {
|
|
2476
|
-
switch (args.action) {
|
|
2477
|
-
case "check": {
|
|
2478
|
-
const count = await getStrikes(args.bead_id, globalStrikeStorage);
|
|
2479
|
-
const strikedOut = await isStrikedOut(
|
|
2480
|
-
args.bead_id,
|
|
2481
|
-
globalStrikeStorage,
|
|
2482
|
-
);
|
|
2483
|
-
|
|
2484
|
-
return JSON.stringify(
|
|
2485
|
-
{
|
|
2486
|
-
bead_id: args.bead_id,
|
|
2487
|
-
strike_count: count,
|
|
2488
|
-
is_striked_out: strikedOut,
|
|
2489
|
-
message: strikedOut
|
|
2490
|
-
? "⚠️ STRUCK OUT: 3 strikes reached. Use get_prompt action for architecture review."
|
|
2491
|
-
: count === 0
|
|
2492
|
-
? "No strikes. Clear to proceed."
|
|
2493
|
-
: `${count} strike${count > 1 ? "s" : ""}. ${3 - count} remaining before architecture review required.`,
|
|
2494
|
-
next_action: strikedOut
|
|
2495
|
-
? "Call with action=get_prompt to get architecture review questions"
|
|
2496
|
-
: "Continue with fix attempt",
|
|
2497
|
-
},
|
|
2498
|
-
null,
|
|
2499
|
-
2,
|
|
2500
|
-
);
|
|
2501
|
-
}
|
|
2502
|
-
|
|
2503
|
-
case "add_strike": {
|
|
2504
|
-
if (!args.attempt || !args.reason) {
|
|
2505
|
-
return JSON.stringify(
|
|
2506
|
-
{
|
|
2507
|
-
error: "add_strike requires 'attempt' and 'reason' parameters",
|
|
2508
|
-
},
|
|
2509
|
-
null,
|
|
2510
|
-
2,
|
|
2511
|
-
);
|
|
2512
|
-
}
|
|
2513
|
-
|
|
2514
|
-
const record = await addStrike(
|
|
2515
|
-
args.bead_id,
|
|
2516
|
-
args.attempt,
|
|
2517
|
-
args.reason,
|
|
2518
|
-
globalStrikeStorage,
|
|
2519
|
-
);
|
|
2520
|
-
|
|
2521
|
-
const strikedOut = record.strike_count >= 3;
|
|
2522
|
-
|
|
2523
|
-
// Build response with memory storage hint on 3-strike
|
|
2524
|
-
const response: Record<string, unknown> = {
|
|
2525
|
-
bead_id: args.bead_id,
|
|
2526
|
-
strike_count: record.strike_count,
|
|
2527
|
-
is_striked_out: strikedOut,
|
|
2528
|
-
failures: record.failures,
|
|
2529
|
-
message: strikedOut
|
|
2530
|
-
? "⚠️ STRUCK OUT: 3 strikes reached. STOP and question the architecture."
|
|
2531
|
-
: `Strike ${record.strike_count} recorded. ${3 - record.strike_count} remaining.`,
|
|
2532
|
-
warning: strikedOut
|
|
2533
|
-
? "DO NOT attempt Fix #4. Call with action=get_prompt for architecture review."
|
|
2534
|
-
: undefined,
|
|
2535
|
-
};
|
|
2536
|
-
|
|
2537
|
-
// Add semantic-memory storage hint on 3-strike
|
|
2538
|
-
if (strikedOut) {
|
|
2539
|
-
response.memory_store = formatMemoryStoreOn3Strike(
|
|
2540
|
-
args.bead_id,
|
|
2541
|
-
record.failures,
|
|
2542
|
-
);
|
|
2543
|
-
}
|
|
2544
|
-
|
|
2545
|
-
return JSON.stringify(response, null, 2);
|
|
2546
|
-
}
|
|
2547
|
-
|
|
2548
|
-
case "clear": {
|
|
2549
|
-
await clearStrikes(args.bead_id, globalStrikeStorage);
|
|
2550
|
-
|
|
2551
|
-
return JSON.stringify(
|
|
2552
|
-
{
|
|
2553
|
-
bead_id: args.bead_id,
|
|
2554
|
-
strike_count: 0,
|
|
2555
|
-
is_striked_out: false,
|
|
2556
|
-
message: "Strikes cleared. Fresh start.",
|
|
2557
|
-
},
|
|
2558
|
-
null,
|
|
2559
|
-
2,
|
|
2560
|
-
);
|
|
2561
|
-
}
|
|
2562
|
-
|
|
2563
|
-
case "get_prompt": {
|
|
2564
|
-
const prompt = await getArchitecturePrompt(
|
|
2565
|
-
args.bead_id,
|
|
2566
|
-
globalStrikeStorage,
|
|
2567
|
-
);
|
|
2568
|
-
|
|
2569
|
-
if (!prompt) {
|
|
2570
|
-
return JSON.stringify(
|
|
2571
|
-
{
|
|
2572
|
-
bead_id: args.bead_id,
|
|
2573
|
-
has_prompt: false,
|
|
2574
|
-
message: "No architecture prompt (not struck out yet)",
|
|
2575
|
-
},
|
|
2576
|
-
null,
|
|
2577
|
-
2,
|
|
2578
|
-
);
|
|
2579
|
-
}
|
|
2580
|
-
|
|
2581
|
-
return JSON.stringify(
|
|
2582
|
-
{
|
|
2583
|
-
bead_id: args.bead_id,
|
|
2584
|
-
has_prompt: true,
|
|
2585
|
-
architecture_review_prompt: prompt,
|
|
2586
|
-
message:
|
|
2587
|
-
"Architecture review required. Present this prompt to the human partner.",
|
|
2588
|
-
},
|
|
2589
|
-
null,
|
|
2590
|
-
2,
|
|
2591
|
-
);
|
|
2592
|
-
}
|
|
2593
|
-
|
|
2594
|
-
default:
|
|
2595
|
-
return JSON.stringify(
|
|
2596
|
-
{
|
|
2597
|
-
error: `Unknown action: ${args.action}`,
|
|
2598
|
-
},
|
|
2599
|
-
null,
|
|
2600
|
-
2,
|
|
2601
|
-
);
|
|
2602
|
-
}
|
|
2603
|
-
},
|
|
2604
|
-
});
|
|
2605
|
-
|
|
2606
|
-
/**
|
|
2607
|
-
* Swarm context shape stored in swarm_contexts table
|
|
2608
|
-
*/
|
|
2609
|
-
interface SwarmBeadContext {
|
|
2610
|
-
id: string;
|
|
2611
|
-
epic_id: string;
|
|
2612
|
-
bead_id: string;
|
|
2613
|
-
strategy: "file-based" | "feature-based" | "risk-based";
|
|
2614
|
-
files: string[];
|
|
2615
|
-
dependencies: string[];
|
|
2616
|
-
directives: {
|
|
2617
|
-
shared_context?: string;
|
|
2618
|
-
skills_to_load?: string[];
|
|
2619
|
-
coordinator_notes?: string;
|
|
2620
|
-
};
|
|
2621
|
-
recovery: {
|
|
2622
|
-
last_checkpoint: number;
|
|
2623
|
-
files_modified: string[];
|
|
2624
|
-
progress_percent: number;
|
|
2625
|
-
last_message?: string;
|
|
2626
|
-
error_context?: string;
|
|
2627
|
-
};
|
|
2628
|
-
created_at: number;
|
|
2629
|
-
updated_at: number;
|
|
2630
|
-
}
|
|
2631
|
-
|
|
2632
|
-
/**
|
|
2633
|
-
* Checkpoint swarm context for recovery
|
|
2634
|
-
*
|
|
2635
|
-
* Records the current state of a subtask to enable recovery after crashes,
|
|
2636
|
-
* context overflows, or agent restarts. Non-fatal errors - logs warnings
|
|
2637
|
-
* and continues if checkpoint fails.
|
|
2638
|
-
*
|
|
2639
|
-
* Integration:
|
|
2640
|
-
* - Called automatically by swarm_progress at milestone thresholds (25%, 50%, 75%)
|
|
2641
|
-
* - Can be called manually by agents at critical points
|
|
2642
|
-
* - Emits SwarmCheckpointedEvent for audit trail
|
|
2643
|
-
* - Updates swarm_contexts table for fast recovery queries
|
|
2644
|
-
*/
|
|
2645
|
-
export const swarm_checkpoint = tool({
|
|
2646
|
-
description:
|
|
2647
|
-
"Checkpoint swarm context for recovery. Records current state for crash recovery. Non-fatal errors.",
|
|
2648
|
-
args: {
|
|
2649
|
-
project_key: tool.schema.string().describe("Project path"),
|
|
2650
|
-
agent_name: tool.schema.string().describe("Agent name"),
|
|
2651
|
-
bead_id: tool.schema.string().describe("Subtask bead ID"),
|
|
2652
|
-
epic_id: tool.schema.string().describe("Epic bead ID"),
|
|
2653
|
-
files_modified: tool.schema
|
|
2654
|
-
.array(tool.schema.string())
|
|
2655
|
-
.describe("Files modified so far"),
|
|
2656
|
-
progress_percent: tool.schema
|
|
2657
|
-
.number()
|
|
2658
|
-
.min(0)
|
|
2659
|
-
.max(100)
|
|
2660
|
-
.describe("Current progress"),
|
|
2661
|
-
directives: tool.schema
|
|
2662
|
-
.object({
|
|
2663
|
-
shared_context: tool.schema.string().optional(),
|
|
2664
|
-
skills_to_load: tool.schema.array(tool.schema.string()).optional(),
|
|
2665
|
-
coordinator_notes: tool.schema.string().optional(),
|
|
2666
|
-
})
|
|
2667
|
-
.optional()
|
|
2668
|
-
.describe("Coordinator directives for this subtask"),
|
|
2669
|
-
error_context: tool.schema
|
|
2670
|
-
.string()
|
|
2671
|
-
.optional()
|
|
2672
|
-
.describe("Error context if checkpoint is during error handling"),
|
|
2673
|
-
},
|
|
2674
|
-
async execute(args) {
|
|
2675
|
-
try {
|
|
2676
|
-
// Build checkpoint data
|
|
2677
|
-
const checkpoint: Omit<
|
|
2678
|
-
SwarmBeadContext,
|
|
2679
|
-
"id" | "created_at" | "updated_at"
|
|
2680
|
-
> = {
|
|
2681
|
-
epic_id: args.epic_id,
|
|
2682
|
-
bead_id: args.bead_id,
|
|
2683
|
-
strategy: "file-based", // TODO: Extract from decomposition metadata
|
|
2684
|
-
files: args.files_modified,
|
|
2685
|
-
dependencies: [], // TODO: Extract from bead metadata
|
|
2686
|
-
directives: args.directives || {},
|
|
2687
|
-
recovery: {
|
|
2688
|
-
last_checkpoint: Date.now(),
|
|
2689
|
-
files_modified: args.files_modified,
|
|
2690
|
-
progress_percent: args.progress_percent,
|
|
2691
|
-
error_context: args.error_context,
|
|
2692
|
-
},
|
|
2693
|
-
};
|
|
2694
|
-
|
|
2695
|
-
// Emit checkpoint event
|
|
2696
|
-
const event = createEvent("swarm_checkpointed", {
|
|
2697
|
-
project_key: args.project_key,
|
|
2698
|
-
epic_id: args.epic_id,
|
|
2699
|
-
bead_id: args.bead_id,
|
|
2700
|
-
strategy: checkpoint.strategy,
|
|
2701
|
-
files: checkpoint.files,
|
|
2702
|
-
dependencies: checkpoint.dependencies,
|
|
2703
|
-
directives: checkpoint.directives,
|
|
2704
|
-
recovery: checkpoint.recovery,
|
|
2705
|
-
});
|
|
2706
|
-
|
|
2707
|
-
await appendEvent(event, args.project_key);
|
|
2708
|
-
|
|
2709
|
-
// NOTE: The event handler (handleSwarmCheckpointed in store.ts) updates
|
|
2710
|
-
// the swarm_contexts table. We don't write directly here to follow
|
|
2711
|
-
// event sourcing pattern - single source of truth is the event log.
|
|
2712
|
-
|
|
2713
|
-
const now = Date.now();
|
|
2714
|
-
|
|
2715
|
-
return JSON.stringify(
|
|
2716
|
-
{
|
|
2717
|
-
success: true,
|
|
2718
|
-
checkpoint_timestamp: now,
|
|
2719
|
-
summary: `Checkpoint saved for ${args.bead_id} at ${args.progress_percent}%`,
|
|
2720
|
-
bead_id: args.bead_id,
|
|
2721
|
-
epic_id: args.epic_id,
|
|
2722
|
-
files_tracked: args.files_modified.length,
|
|
2723
|
-
},
|
|
2724
|
-
null,
|
|
2725
|
-
2,
|
|
2726
|
-
);
|
|
2727
|
-
} catch (error) {
|
|
2728
|
-
// Non-fatal - log warning and continue
|
|
2729
|
-
console.warn(
|
|
2730
|
-
`[swarm_checkpoint] Failed to checkpoint ${args.bead_id}:`,
|
|
2731
|
-
error,
|
|
2732
|
-
);
|
|
2733
|
-
return JSON.stringify(
|
|
2734
|
-
{
|
|
2735
|
-
success: false,
|
|
2736
|
-
warning: "Checkpoint failed but continuing",
|
|
2737
|
-
error: error instanceof Error ? error.message : String(error),
|
|
2738
|
-
bead_id: args.bead_id,
|
|
2739
|
-
note: "This is non-fatal. Work can continue without checkpoint.",
|
|
2740
|
-
},
|
|
2741
|
-
null,
|
|
2742
|
-
2,
|
|
2743
|
-
);
|
|
2744
|
-
}
|
|
2745
|
-
},
|
|
2746
|
-
});
|
|
2747
|
-
|
|
2748
|
-
/**
|
|
2749
|
-
* Recover swarm context from last checkpoint
|
|
2750
|
-
*
|
|
2751
|
-
* Queries swarm_contexts table for the most recent checkpoint of an epic.
|
|
2752
|
-
* Returns the full context including files, progress, and recovery state.
|
|
2753
|
-
* Emits SwarmRecoveredEvent for audit trail.
|
|
2754
|
-
*
|
|
2755
|
-
* Graceful fallback: Returns { found: false } if no checkpoint exists.
|
|
2756
|
-
*/
|
|
2757
|
-
export const swarm_recover = tool({
|
|
2758
|
-
description:
|
|
2759
|
-
"Recover swarm context from last checkpoint. Returns context or null if not found.",
|
|
2760
|
-
args: {
|
|
2761
|
-
project_key: tool.schema.string().describe("Project path"),
|
|
2762
|
-
epic_id: tool.schema.string().describe("Epic bead ID to recover"),
|
|
2763
|
-
},
|
|
2764
|
-
async execute(args) {
|
|
2765
|
-
try {
|
|
2766
|
-
const { getSwarmMailLibSQL } = await import("swarm-mail");
|
|
2767
|
-
const swarmMail = await getSwarmMailLibSQL(args.project_key);
|
|
2768
|
-
const db = await swarmMail.getDatabase();
|
|
2769
|
-
|
|
2770
|
-
// Query most recent checkpoint for this epic
|
|
2771
|
-
const result = await db.query<{
|
|
2772
|
-
id: string;
|
|
2773
|
-
epic_id: string;
|
|
2774
|
-
bead_id: string;
|
|
2775
|
-
strategy: string;
|
|
2776
|
-
files: string;
|
|
2777
|
-
dependencies: string;
|
|
2778
|
-
directives: string;
|
|
2779
|
-
recovery: string;
|
|
2780
|
-
created_at: number;
|
|
2781
|
-
updated_at: number;
|
|
2782
|
-
}>(
|
|
2783
|
-
`SELECT * FROM swarm_contexts
|
|
2784
|
-
WHERE epic_id = $1
|
|
2785
|
-
ORDER BY updated_at DESC
|
|
2786
|
-
LIMIT 1`,
|
|
2787
|
-
[args.epic_id],
|
|
2788
|
-
);
|
|
2789
|
-
|
|
2790
|
-
if (result.rows.length === 0) {
|
|
2791
|
-
return JSON.stringify(
|
|
2792
|
-
{
|
|
2793
|
-
found: false,
|
|
2794
|
-
message: `No checkpoint found for epic ${args.epic_id}`,
|
|
2795
|
-
epic_id: args.epic_id,
|
|
2796
|
-
},
|
|
2797
|
-
null,
|
|
2798
|
-
2,
|
|
2799
|
-
);
|
|
2800
|
-
}
|
|
2801
|
-
|
|
2802
|
-
const row = result.rows[0];
|
|
2803
|
-
// PGLite auto-parses JSON columns, so we need to handle both cases
|
|
2804
|
-
const parseIfString = <T>(val: unknown): T =>
|
|
2805
|
-
typeof val === "string" ? JSON.parse(val) : (val as T);
|
|
2806
|
-
|
|
2807
|
-
const context: SwarmBeadContext = {
|
|
2808
|
-
id: row.id,
|
|
2809
|
-
epic_id: row.epic_id,
|
|
2810
|
-
bead_id: row.bead_id,
|
|
2811
|
-
strategy: row.strategy as SwarmBeadContext["strategy"],
|
|
2812
|
-
files: parseIfString<string[]>(row.files),
|
|
2813
|
-
dependencies: parseIfString<string[]>(row.dependencies),
|
|
2814
|
-
directives: parseIfString<SwarmBeadContext["directives"]>(
|
|
2815
|
-
row.directives,
|
|
2816
|
-
),
|
|
2817
|
-
recovery: parseIfString<SwarmBeadContext["recovery"]>(row.recovery),
|
|
2818
|
-
created_at: row.created_at,
|
|
2819
|
-
updated_at: row.updated_at,
|
|
2820
|
-
};
|
|
2821
|
-
|
|
2822
|
-
// Emit recovery event
|
|
2823
|
-
const event = createEvent("swarm_recovered", {
|
|
2824
|
-
project_key: args.project_key,
|
|
2825
|
-
epic_id: args.epic_id,
|
|
2826
|
-
bead_id: context.bead_id,
|
|
2827
|
-
recovered_from_checkpoint: context.recovery.last_checkpoint,
|
|
2828
|
-
});
|
|
2829
|
-
|
|
2830
|
-
await appendEvent(event, args.project_key);
|
|
2831
|
-
|
|
2832
|
-
return JSON.stringify(
|
|
2833
|
-
{
|
|
2834
|
-
found: true,
|
|
2835
|
-
context,
|
|
2836
|
-
summary: `Recovered checkpoint from ${new Date(context.updated_at).toISOString()}`,
|
|
2837
|
-
age_seconds: Math.round((Date.now() - context.updated_at) / 1000),
|
|
2838
|
-
},
|
|
2839
|
-
null,
|
|
2840
|
-
2,
|
|
2841
|
-
);
|
|
2842
|
-
} catch (error) {
|
|
2843
|
-
// Graceful fallback
|
|
2844
|
-
console.warn(
|
|
2845
|
-
`[swarm_recover] Failed to recover context for ${args.epic_id}:`,
|
|
2846
|
-
error,
|
|
2847
|
-
);
|
|
2848
|
-
return JSON.stringify(
|
|
2849
|
-
{
|
|
2850
|
-
found: false,
|
|
2851
|
-
error: error instanceof Error ? error.message : String(error),
|
|
2852
|
-
message: `Recovery failed for epic ${args.epic_id}`,
|
|
2853
|
-
epic_id: args.epic_id,
|
|
2854
|
-
},
|
|
2855
|
-
null,
|
|
2856
|
-
2,
|
|
2857
|
-
);
|
|
2858
|
-
}
|
|
2859
|
-
},
|
|
2860
|
-
});
|
|
2861
|
-
|
|
2862
|
-
/**
|
|
2863
|
-
* Learn from completed work and optionally create a skill
|
|
2864
|
-
*
|
|
2865
|
-
* This tool helps agents reflect on patterns, best practices, or domain
|
|
2866
|
-
* knowledge discovered during task execution and codify them into reusable
|
|
2867
|
-
* skills for future swarms.
|
|
2868
|
-
*
|
|
2869
|
-
* Implements the "learning swarm" pattern where swarms get smarter over time.
|
|
2870
|
-
*/
|
|
2871
|
-
export const swarm_learn = tool({
|
|
2872
|
-
description: `Analyze completed work and optionally create a skill from learned patterns.
|
|
2873
|
-
|
|
2874
|
-
Use after completing a subtask when you've discovered:
|
|
2875
|
-
- Reusable code patterns or approaches
|
|
2876
|
-
- Domain-specific best practices
|
|
2877
|
-
- Gotchas or edge cases to warn about
|
|
2878
|
-
- Effective tool usage patterns
|
|
2879
|
-
|
|
2880
|
-
This tool helps you formalize learnings into a skill that future agents can discover and use.`,
|
|
2881
|
-
args: {
|
|
2882
|
-
summary: tool.schema
|
|
2883
|
-
.string()
|
|
2884
|
-
.describe("Brief summary of what was learned (1-2 sentences)"),
|
|
2885
|
-
pattern_type: tool.schema
|
|
2886
|
-
.enum([
|
|
2887
|
-
"code-pattern",
|
|
2888
|
-
"best-practice",
|
|
2889
|
-
"gotcha",
|
|
2890
|
-
"tool-usage",
|
|
2891
|
-
"domain-knowledge",
|
|
2892
|
-
"workflow",
|
|
2893
|
-
])
|
|
2894
|
-
.describe("Category of the learning"),
|
|
2895
|
-
details: tool.schema
|
|
2896
|
-
.string()
|
|
2897
|
-
.describe("Detailed explanation of the pattern or practice"),
|
|
2898
|
-
example: tool.schema
|
|
2899
|
-
.string()
|
|
2900
|
-
.optional()
|
|
2901
|
-
.describe("Code example or concrete illustration"),
|
|
2902
|
-
when_to_use: tool.schema
|
|
2903
|
-
.string()
|
|
2904
|
-
.describe("When should an agent apply this knowledge?"),
|
|
2905
|
-
files_context: tool.schema
|
|
2906
|
-
.array(tool.schema.string())
|
|
2907
|
-
.optional()
|
|
2908
|
-
.describe("Files that exemplify this pattern"),
|
|
2909
|
-
create_skill: tool.schema
|
|
2910
|
-
.boolean()
|
|
2911
|
-
.optional()
|
|
2912
|
-
.describe(
|
|
2913
|
-
"Create a skill from this learning (default: false, just document)",
|
|
2914
|
-
),
|
|
2915
|
-
skill_name: tool.schema
|
|
2916
|
-
.string()
|
|
2917
|
-
.regex(/^[a-z0-9-]+$/)
|
|
2918
|
-
.max(64)
|
|
2919
|
-
.optional()
|
|
2920
|
-
.describe("Skill name if creating (required if create_skill=true)"),
|
|
2921
|
-
skill_tags: tool.schema
|
|
2922
|
-
.array(tool.schema.string())
|
|
2923
|
-
.optional()
|
|
2924
|
-
.describe("Tags for the skill if creating"),
|
|
2925
|
-
},
|
|
2926
|
-
async execute(args) {
|
|
2927
|
-
// Format the learning as structured documentation
|
|
2928
|
-
const learning = {
|
|
2929
|
-
summary: args.summary,
|
|
2930
|
-
type: args.pattern_type,
|
|
2931
|
-
details: args.details,
|
|
2932
|
-
example: args.example,
|
|
2933
|
-
when_to_use: args.when_to_use,
|
|
2934
|
-
files_context: args.files_context,
|
|
2935
|
-
recorded_at: new Date().toISOString(),
|
|
2936
|
-
};
|
|
2937
|
-
|
|
2938
|
-
// If creating a skill, generate and create it
|
|
2939
|
-
if (args.create_skill) {
|
|
2940
|
-
if (!args.skill_name) {
|
|
2941
|
-
return JSON.stringify(
|
|
2942
|
-
{
|
|
2943
|
-
success: false,
|
|
2944
|
-
error: "skill_name is required when create_skill=true",
|
|
2945
|
-
learning: learning,
|
|
2946
|
-
},
|
|
2947
|
-
null,
|
|
2948
|
-
2,
|
|
2949
|
-
);
|
|
2950
|
-
}
|
|
2951
|
-
|
|
2952
|
-
// Build skill body from learning
|
|
2953
|
-
const skillBody = `# ${args.summary}
|
|
2954
|
-
|
|
2955
|
-
## When to Use
|
|
2956
|
-
${args.when_to_use}
|
|
2957
|
-
|
|
2958
|
-
## ${args.pattern_type.replace(/-/g, " ").replace(/\b\w/g, (c) => c.toUpperCase())}
|
|
2959
|
-
|
|
2960
|
-
${args.details}
|
|
2961
|
-
|
|
2962
|
-
${args.example ? `## Example\n\n\`\`\`\n${args.example}\n\`\`\`\n` : ""}
|
|
2963
|
-
${args.files_context && args.files_context.length > 0 ? `## Reference Files\n\n${args.files_context.map((f) => `- \`${f}\``).join("\n")}\n` : ""}
|
|
2964
|
-
|
|
2965
|
-
---
|
|
2966
|
-
*Learned from swarm execution on ${new Date().toISOString().split("T")[0]}*`;
|
|
2967
|
-
|
|
2968
|
-
// Import skills_create functionality
|
|
2969
|
-
const { getSkill, invalidateSkillsCache } = await import("./skills");
|
|
2970
|
-
const { mkdir, writeFile } = await import("node:fs/promises");
|
|
2971
|
-
const { join } = await import("node:path");
|
|
2972
|
-
|
|
2973
|
-
// Check if skill exists
|
|
2974
|
-
const existing = await getSkill(args.skill_name);
|
|
2975
|
-
if (existing) {
|
|
2976
|
-
return JSON.stringify(
|
|
2977
|
-
{
|
|
2978
|
-
success: false,
|
|
2979
|
-
error: `Skill '${args.skill_name}' already exists`,
|
|
2980
|
-
existing_path: existing.path,
|
|
2981
|
-
learning: learning,
|
|
2982
|
-
suggestion:
|
|
2983
|
-
"Use skills_update to add to existing skill, or choose a different name",
|
|
2984
|
-
},
|
|
2985
|
-
null,
|
|
2986
|
-
2,
|
|
2987
|
-
);
|
|
2988
|
-
}
|
|
2989
|
-
|
|
2990
|
-
// Create skill directory and file
|
|
2991
|
-
const skillDir = join(
|
|
2992
|
-
process.cwd(),
|
|
2993
|
-
".opencode",
|
|
2994
|
-
"skills",
|
|
2995
|
-
args.skill_name,
|
|
2996
|
-
);
|
|
2997
|
-
const skillPath = join(skillDir, "SKILL.md");
|
|
2998
|
-
|
|
2999
|
-
const frontmatter = [
|
|
3000
|
-
"---",
|
|
3001
|
-
`name: ${args.skill_name}`,
|
|
3002
|
-
`description: ${args.when_to_use.slice(0, 200)}${args.when_to_use.length > 200 ? "..." : ""}`,
|
|
3003
|
-
"tags:",
|
|
3004
|
-
` - ${args.pattern_type}`,
|
|
3005
|
-
` - learned`,
|
|
3006
|
-
...(args.skill_tags || []).map((t) => ` - ${t}`),
|
|
3007
|
-
"---",
|
|
3008
|
-
].join("\n");
|
|
3009
|
-
|
|
3010
|
-
try {
|
|
3011
|
-
await mkdir(skillDir, { recursive: true });
|
|
3012
|
-
await writeFile(skillPath, `${frontmatter}\n\n${skillBody}`, "utf-8");
|
|
3013
|
-
invalidateSkillsCache();
|
|
3014
|
-
|
|
3015
|
-
return JSON.stringify(
|
|
3016
|
-
{
|
|
3017
|
-
success: true,
|
|
3018
|
-
skill_created: true,
|
|
3019
|
-
skill: {
|
|
3020
|
-
name: args.skill_name,
|
|
3021
|
-
path: skillPath,
|
|
3022
|
-
type: args.pattern_type,
|
|
3023
|
-
},
|
|
3024
|
-
learning: learning,
|
|
3025
|
-
message: `Created skill '${args.skill_name}' from learned pattern. Future agents can discover it with skills_list.`,
|
|
3026
|
-
},
|
|
3027
|
-
null,
|
|
3028
|
-
2,
|
|
3029
|
-
);
|
|
3030
|
-
} catch (error) {
|
|
3031
|
-
return JSON.stringify(
|
|
3032
|
-
{
|
|
3033
|
-
success: false,
|
|
3034
|
-
error: `Failed to create skill: ${error instanceof Error ? error.message : String(error)}`,
|
|
3035
|
-
learning: learning,
|
|
3036
|
-
},
|
|
3037
|
-
null,
|
|
3038
|
-
2,
|
|
3039
|
-
);
|
|
3040
|
-
}
|
|
3041
|
-
}
|
|
3042
|
-
|
|
3043
|
-
// Just document the learning without creating a skill
|
|
3044
|
-
return JSON.stringify(
|
|
3045
|
-
{
|
|
3046
|
-
success: true,
|
|
3047
|
-
skill_created: false,
|
|
3048
|
-
learning: learning,
|
|
3049
|
-
message:
|
|
3050
|
-
"Learning documented. Use create_skill=true to persist as a skill for future agents.",
|
|
3051
|
-
suggested_skill_name:
|
|
3052
|
-
args.skill_name ||
|
|
3053
|
-
args.summary
|
|
3054
|
-
.toLowerCase()
|
|
3055
|
-
.replace(/[^a-z0-9\s-]/g, "")
|
|
3056
|
-
.replace(/\s+/g, "-")
|
|
3057
|
-
.slice(0, 64),
|
|
3058
|
-
},
|
|
3059
|
-
null,
|
|
3060
|
-
2,
|
|
3061
|
-
);
|
|
3062
|
-
},
|
|
3063
|
-
});
|
|
3064
|
-
|
|
3065
|
-
// ============================================================================
|
|
3066
|
-
// Export tools
|
|
3067
|
-
// ============================================================================
|
|
3068
|
-
|
|
3069
|
-
export const orchestrateTools = {
|
|
3070
|
-
swarm_init,
|
|
3071
|
-
swarm_status,
|
|
3072
|
-
swarm_progress,
|
|
3073
|
-
swarm_broadcast,
|
|
3074
|
-
swarm_complete,
|
|
3075
|
-
swarm_record_outcome,
|
|
3076
|
-
swarm_research_phase,
|
|
3077
|
-
swarm_accumulate_error,
|
|
3078
|
-
swarm_get_error_context,
|
|
3079
|
-
swarm_resolve_error,
|
|
3080
|
-
swarm_check_strikes,
|
|
3081
|
-
swarm_checkpoint,
|
|
3082
|
-
swarm_recover,
|
|
3083
|
-
swarm_learn,
|
|
3084
|
-
};
|