opencode-swarm-plugin 0.14.0 → 0.16.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.beads/analysis/skill-architecture-meta-skills.md +1562 -0
- package/.beads/issues.jsonl +79 -0
- package/README.md +20 -18
- package/VERIFICATION_QUALITY_PATTERNS.md +565 -0
- package/bin/swarm.ts +5 -5
- package/dist/index.js +1318 -28
- package/dist/plugin.js +1218 -14
- package/docs/analysis/subagent-coordination-patterns.md +900 -0
- package/docs/analysis-socratic-planner-pattern.md +504 -0
- package/examples/commands/swarm.md +112 -7
- package/global-skills/swarm-coordination/SKILL.md +118 -20
- package/global-skills/swarm-coordination/references/coordinator-patterns.md +1 -1
- package/package.json +1 -1
- package/src/index.ts +78 -0
- package/src/learning.integration.test.ts +310 -0
- package/src/learning.ts +198 -0
- package/src/mandate-promotion.test.ts +473 -0
- package/src/mandate-promotion.ts +239 -0
- package/src/mandate-storage.test.ts +578 -0
- package/src/mandate-storage.ts +786 -0
- package/src/mandates.ts +540 -0
- package/src/schemas/index.ts +27 -0
- package/src/schemas/mandate.ts +232 -0
- package/src/skills.test.ts +194 -0
- package/src/skills.ts +184 -15
- package/src/swarm.integration.test.ts +4 -4
- package/src/swarm.ts +496 -19
- package/workflow-integration-analysis.md +876 -0
|
@@ -63,6 +63,53 @@ Swarm Mail is embedded (no external server needed) and provides:
|
|
|
63
63
|
|
|
64
64
|
**Heuristic:** If you can describe the task in one sentence without "and", don't swarm.
|
|
65
65
|
|
|
66
|
+
## Task Clarity Check (BEFORE Decomposing)
|
|
67
|
+
|
|
68
|
+
**Before decomposing, ask: Is this task clear enough to parallelize?**
|
|
69
|
+
|
|
70
|
+
### Vague Task Signals (ASK QUESTIONS FIRST)
|
|
71
|
+
|
|
72
|
+
| Signal | Example | Problem |
|
|
73
|
+
| ------------------------ | ------------------------------ | -------------------------------- |
|
|
74
|
+
| No files mentioned | "improve performance" | Where? Which files? |
|
|
75
|
+
| Vague verbs | "fix", "update", "make better" | What specifically? |
|
|
76
|
+
| Large undefined scope | "refactor the codebase" | Which parts? What pattern? |
|
|
77
|
+
| Missing success criteria | "add auth" | OAuth? JWT? Session? What flows? |
|
|
78
|
+
| Ambiguous boundaries | "handle errors" | Which errors? Where? How? |
|
|
79
|
+
|
|
80
|
+
### How to Clarify
|
|
81
|
+
|
|
82
|
+
```markdown
|
|
83
|
+
The task "<task>" needs clarification before I can decompose it.
|
|
84
|
+
|
|
85
|
+
**Question:** [Specific question about scope/files/approach]
|
|
86
|
+
|
|
87
|
+
Options:
|
|
88
|
+
a) [Option A] - [trade-off]
|
|
89
|
+
b) [Option B] - [trade-off]
|
|
90
|
+
c) [Option C] - [trade-off]
|
|
91
|
+
|
|
92
|
+
I'd recommend (a) because [reason]. Which approach?
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
**Rules:**
|
|
96
|
+
|
|
97
|
+
- ONE question at a time (don't overwhelm)
|
|
98
|
+
- Offer 2-3 concrete options when possible
|
|
99
|
+
- Lead with your recommendation and why
|
|
100
|
+
- Wait for answer before asking next question
|
|
101
|
+
|
|
102
|
+
### Clear Task Signals (PROCEED to decompose)
|
|
103
|
+
|
|
104
|
+
| Signal | Example | Why it's clear |
|
|
105
|
+
| ------------------ | ------------------------------ | ---------------- |
|
|
106
|
+
| Specific files | "update src/auth/\*.ts" | Scope defined |
|
|
107
|
+
| Concrete verbs | "migrate from X to Y" | Action defined |
|
|
108
|
+
| Defined scope | "the payment module" | Boundaries clear |
|
|
109
|
+
| Measurable outcome | "tests pass", "no type errors" | Success criteria |
|
|
110
|
+
|
|
111
|
+
**When in doubt, ask.** A 30-second clarification beats a 30-minute wrong decomposition.
|
|
112
|
+
|
|
66
113
|
## Coordinator Workflow
|
|
67
114
|
|
|
68
115
|
### Phase 1: Initialize Swarm Mail (FIRST)
|
|
@@ -95,30 +142,76 @@ skills_list();
|
|
|
95
142
|
|
|
96
143
|
Synthesize findings into `shared_context` for workers.
|
|
97
144
|
|
|
98
|
-
### Phase 3: Decomposition
|
|
145
|
+
### Phase 3: Decomposition (DELEGATE TO SUBAGENT)
|
|
146
|
+
|
|
147
|
+
> **⚠️ CRITICAL: Context Preservation Pattern**
|
|
148
|
+
>
|
|
149
|
+
> **NEVER do planning inline in the coordinator thread.** Decomposition work (file reading, CASS searching, reasoning about task breakdown) consumes massive amounts of context and will exhaust your token budget on long swarms.
|
|
150
|
+
>
|
|
151
|
+
> **ALWAYS delegate planning to a `swarm/planner` subagent** and receive only the structured BeadTree JSON result back.
|
|
152
|
+
|
|
153
|
+
**❌ Anti-Pattern (Context-Heavy):**
|
|
154
|
+
|
|
155
|
+
```typescript
|
|
156
|
+
// DON'T DO THIS - pollutes main thread context
|
|
157
|
+
const plan = await swarm_plan_prompt({ task, ... });
|
|
158
|
+
// ... agent reasons about decomposition inline ...
|
|
159
|
+
// ... context fills with file contents, analysis ...
|
|
160
|
+
const validation = await swarm_validate_decomposition({ ... });
|
|
161
|
+
```
|
|
162
|
+
|
|
163
|
+
**✅ Correct Pattern (Context-Lean):**
|
|
99
164
|
|
|
100
165
|
```typescript
|
|
101
|
-
//
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
include_skills: true, // lists relevant skills
|
|
166
|
+
// 1. Create planning bead with full context
|
|
167
|
+
await beads_create({
|
|
168
|
+
title: `Plan: ${taskTitle}`,
|
|
169
|
+
type: "task",
|
|
170
|
+
description: `Decompose into subtasks. Context: ${synthesizedContext}`,
|
|
107
171
|
});
|
|
108
172
|
|
|
109
|
-
//
|
|
110
|
-
const
|
|
111
|
-
|
|
173
|
+
// 2. Delegate to swarm/planner subagent
|
|
174
|
+
const planningResult = await Task({
|
|
175
|
+
subagent_type: "swarm/planner",
|
|
176
|
+
description: `Decompose task: ${taskTitle}`,
|
|
177
|
+
prompt: `
|
|
178
|
+
You are a swarm planner. Generate a BeadTree for this task.
|
|
179
|
+
|
|
180
|
+
## Task
|
|
181
|
+
${taskDescription}
|
|
182
|
+
|
|
183
|
+
## Synthesized Context
|
|
184
|
+
${synthesizedContext}
|
|
185
|
+
|
|
186
|
+
## Instructions
|
|
187
|
+
1. Use swarm_plan_prompt(task="...", max_subtasks=5, query_cass=true)
|
|
188
|
+
2. Reason about decomposition strategy
|
|
189
|
+
3. Generate BeadTree JSON
|
|
190
|
+
4. Validate with swarm_validate_decomposition
|
|
191
|
+
5. Return ONLY the validated BeadTree JSON (no analysis, no file contents)
|
|
192
|
+
|
|
193
|
+
Output format: Valid BeadTree JSON only.
|
|
194
|
+
`,
|
|
112
195
|
});
|
|
113
196
|
|
|
114
|
-
//
|
|
197
|
+
// 3. Parse result (subagent already validated)
|
|
198
|
+
const beadTree = JSON.parse(planningResult);
|
|
199
|
+
|
|
200
|
+
// 4. Create epic + subtasks atomically
|
|
115
201
|
await beads_create_epic({
|
|
116
|
-
epic_title:
|
|
117
|
-
epic_description:
|
|
118
|
-
subtasks:
|
|
202
|
+
epic_title: beadTree.epic.title,
|
|
203
|
+
epic_description: beadTree.epic.description,
|
|
204
|
+
subtasks: beadTree.subtasks,
|
|
119
205
|
});
|
|
120
206
|
```
|
|
121
207
|
|
|
208
|
+
**Why This Matters:**
|
|
209
|
+
|
|
210
|
+
- **Main thread context stays clean** - only receives final JSON, not reasoning
|
|
211
|
+
- **Subagent context is disposable** - gets garbage collected after planning
|
|
212
|
+
- **Scales to long swarms** - coordinator can manage 10+ workers without exhaustion
|
|
213
|
+
- **Faster coordination** - less context = faster responses when monitoring workers
|
|
214
|
+
|
|
122
215
|
### Phase 4: Reserve Files (via Swarm Mail)
|
|
123
216
|
|
|
124
217
|
```typescript
|
|
@@ -263,12 +356,17 @@ One blocker affects multiple subtasks.
|
|
|
263
356
|
|
|
264
357
|
## Anti-Patterns
|
|
265
358
|
|
|
266
|
-
| Anti-Pattern
|
|
267
|
-
|
|
|
268
|
-
| **
|
|
269
|
-
| **
|
|
270
|
-
| **
|
|
271
|
-
| **
|
|
359
|
+
| Anti-Pattern | Symptom | Fix |
|
|
360
|
+
| --------------------------- | ------------------------------------------ | ------------------------------------ |
|
|
361
|
+
| **Decomposing Vague Tasks** | Wrong subtasks, wasted agent cycles | Ask clarifying questions FIRST |
|
|
362
|
+
| **Mega-Coordinator** | Coordinator editing files | Coordinator only orchestrates |
|
|
363
|
+
| **Silent Swarm** | No communication, late conflicts | Require updates, check inbox |
|
|
364
|
+
| **Over-Decomposed** | 10 subtasks for 20 lines | 2-5 subtasks max |
|
|
365
|
+
| **Under-Specified** | "Implement backend" | Clear goal, files, criteria |
|
|
366
|
+
| **Inline Planning** ⚠️ | Context pollution, exhaustion on long runs | Delegate planning to subagent |
|
|
367
|
+
| **Heavy File Reading** | Coordinator reading 10+ files | Subagent reads, returns summary only |
|
|
368
|
+
| **Deep CASS Drilling** | Multiple cass_search calls inline | Subagent searches, summarizes |
|
|
369
|
+
| **Manual Decomposition** | Hand-crafting subtasks without validation | Use swarm_plan_prompt + validation |
|
|
272
370
|
|
|
273
371
|
## Shared Context Template
|
|
274
372
|
|
|
@@ -49,7 +49,7 @@ For each subtask:
|
|
|
49
49
|
### 4. Progress Monitoring
|
|
50
50
|
|
|
51
51
|
- Check `beads_query(status="in_progress")` for active work
|
|
52
|
-
- Check `
|
|
52
|
+
- Check `swarmmail_inbox()` for worker messages
|
|
53
53
|
- Intervene on blockers (see Intervention Patterns below)
|
|
54
54
|
|
|
55
55
|
### 5. Completion & Aggregation
|
package/package.json
CHANGED
package/src/index.ts
CHANGED
|
@@ -38,6 +38,7 @@ import { structuredTools } from "./structured";
|
|
|
38
38
|
import { swarmTools } from "./swarm";
|
|
39
39
|
import { repoCrawlTools } from "./repo-crawl";
|
|
40
40
|
import { skillsTools, setSkillsProjectDirectory } from "./skills";
|
|
41
|
+
import { mandateTools } from "./mandates";
|
|
41
42
|
|
|
42
43
|
/**
|
|
43
44
|
* OpenCode Swarm Plugin
|
|
@@ -50,6 +51,7 @@ import { skillsTools, setSkillsProjectDirectory } from "./skills";
|
|
|
50
51
|
* - swarm:* - Swarm orchestration and task decomposition
|
|
51
52
|
* - repo-crawl:* - GitHub API tools for repository research
|
|
52
53
|
* - skills:* - Agent skills discovery, activation, and execution
|
|
54
|
+
* - mandate:* - Agent voting system for collaborative knowledge curation
|
|
53
55
|
*
|
|
54
56
|
* @param input - Plugin context from OpenCode
|
|
55
57
|
* @returns Plugin hooks including tools, events, and tool execution hooks
|
|
@@ -132,6 +134,7 @@ export const SwarmPlugin: Plugin = async (
|
|
|
132
134
|
* - agent-mail:init, agent-mail:send, agent-mail:reserve, etc. (legacy MCP)
|
|
133
135
|
* - swarm-mail:init, swarm-mail:send, swarm-mail:reserve, etc. (embedded)
|
|
134
136
|
* - repo-crawl:readme, repo-crawl:structure, etc.
|
|
137
|
+
* - mandate:file, mandate:vote, mandate:query, etc.
|
|
135
138
|
*/
|
|
136
139
|
tool: {
|
|
137
140
|
...beadsTools,
|
|
@@ -140,6 +143,7 @@ export const SwarmPlugin: Plugin = async (
|
|
|
140
143
|
...swarmTools,
|
|
141
144
|
...repoCrawlTools,
|
|
142
145
|
...skillsTools,
|
|
146
|
+
...mandateTools,
|
|
143
147
|
},
|
|
144
148
|
|
|
145
149
|
/**
|
|
@@ -361,6 +365,7 @@ export const allTools = {
|
|
|
361
365
|
...swarmTools,
|
|
362
366
|
...repoCrawlTools,
|
|
363
367
|
...skillsTools,
|
|
368
|
+
...mandateTools,
|
|
364
369
|
} as const;
|
|
365
370
|
|
|
366
371
|
/**
|
|
@@ -473,3 +478,76 @@ export {
|
|
|
473
478
|
type SkillMetadata,
|
|
474
479
|
type SkillRef,
|
|
475
480
|
} from "./skills";
|
|
481
|
+
|
|
482
|
+
/**
|
|
483
|
+
* Re-export mandates module
|
|
484
|
+
*
|
|
485
|
+
* Agent voting system for collaborative knowledge curation.
|
|
486
|
+
*
|
|
487
|
+
* Includes:
|
|
488
|
+
* - mandateTools - All mandate tools (file, vote, query, list, stats)
|
|
489
|
+
* - MandateError - Error class
|
|
490
|
+
*
|
|
491
|
+
* Features:
|
|
492
|
+
* - Submit ideas, tips, lore, snippets, and feature requests
|
|
493
|
+
* - Vote on entries (upvote/downvote) with 90-day decay
|
|
494
|
+
* - Semantic search for relevant mandates
|
|
495
|
+
* - Status transitions based on consensus (candidate → established → mandate)
|
|
496
|
+
* - Persistent storage with semantic-memory
|
|
497
|
+
*
|
|
498
|
+
* Types:
|
|
499
|
+
* - MandateEntry, Vote, MandateScore - Core data types
|
|
500
|
+
* - MandateStatus, MandateContentType - Enum types
|
|
501
|
+
*/
|
|
502
|
+
export { mandateTools, MandateError } from "./mandates";
|
|
503
|
+
|
|
504
|
+
/**
|
|
505
|
+
* Re-export mandate-storage module
|
|
506
|
+
*
|
|
507
|
+
* Includes:
|
|
508
|
+
* - createMandateStorage - Factory function
|
|
509
|
+
* - getMandateStorage, setMandateStorage, resetMandateStorage - Global instance management
|
|
510
|
+
* - updateMandateStatus, updateAllMandateStatuses - Status update helpers
|
|
511
|
+
* - InMemoryMandateStorage, SemanticMemoryMandateStorage - Storage implementations
|
|
512
|
+
*
|
|
513
|
+
* Types:
|
|
514
|
+
* - MandateStorage - Unified storage interface
|
|
515
|
+
* - MandateStorageConfig, MandateStorageBackend, MandateStorageCollections - Configuration types
|
|
516
|
+
*/
|
|
517
|
+
export {
|
|
518
|
+
createMandateStorage,
|
|
519
|
+
getMandateStorage,
|
|
520
|
+
setMandateStorage,
|
|
521
|
+
resetMandateStorage,
|
|
522
|
+
updateMandateStatus,
|
|
523
|
+
updateAllMandateStatuses,
|
|
524
|
+
InMemoryMandateStorage,
|
|
525
|
+
SemanticMemoryMandateStorage,
|
|
526
|
+
DEFAULT_MANDATE_STORAGE_CONFIG,
|
|
527
|
+
type MandateStorage,
|
|
528
|
+
type MandateStorageConfig,
|
|
529
|
+
type MandateStorageBackend,
|
|
530
|
+
type MandateStorageCollections,
|
|
531
|
+
} from "./mandate-storage";
|
|
532
|
+
|
|
533
|
+
/**
|
|
534
|
+
* Re-export mandate-promotion module
|
|
535
|
+
*
|
|
536
|
+
* Includes:
|
|
537
|
+
* - evaluatePromotion - Evaluate status transitions
|
|
538
|
+
* - shouldPromote - Determine new status based on score
|
|
539
|
+
* - formatPromotionResult - Format promotion result for display
|
|
540
|
+
* - evaluateBatchPromotions, getStatusChanges, groupByTransition - Batch helpers
|
|
541
|
+
*
|
|
542
|
+
* Types:
|
|
543
|
+
* - PromotionResult - Promotion evaluation result
|
|
544
|
+
*/
|
|
545
|
+
export {
|
|
546
|
+
evaluatePromotion,
|
|
547
|
+
shouldPromote,
|
|
548
|
+
formatPromotionResult,
|
|
549
|
+
evaluateBatchPromotions,
|
|
550
|
+
getStatusChanges,
|
|
551
|
+
groupByTransition,
|
|
552
|
+
type PromotionResult,
|
|
553
|
+
} from "./mandate-promotion";
|
|
@@ -1427,3 +1427,313 @@ describe("Storage Module", () => {
|
|
|
1427
1427
|
});
|
|
1428
1428
|
});
|
|
1429
1429
|
});
|
|
1430
|
+
|
|
1431
|
+
// ============================================================================
|
|
1432
|
+
// 3-Strike Detection Tests
|
|
1433
|
+
// ============================================================================
|
|
1434
|
+
|
|
1435
|
+
import {
|
|
1436
|
+
InMemoryStrikeStorage,
|
|
1437
|
+
addStrike,
|
|
1438
|
+
getStrikes,
|
|
1439
|
+
isStrikedOut,
|
|
1440
|
+
getArchitecturePrompt,
|
|
1441
|
+
clearStrikes,
|
|
1442
|
+
type StrikeStorage,
|
|
1443
|
+
} from "./learning";
|
|
1444
|
+
|
|
1445
|
+
describe("3-Strike Detection", () => {
|
|
1446
|
+
let storage: StrikeStorage;
|
|
1447
|
+
|
|
1448
|
+
beforeEach(() => {
|
|
1449
|
+
storage = new InMemoryStrikeStorage();
|
|
1450
|
+
});
|
|
1451
|
+
|
|
1452
|
+
describe("addStrike", () => {
|
|
1453
|
+
it("records first strike", async () => {
|
|
1454
|
+
const record = await addStrike(
|
|
1455
|
+
"test-bead-1",
|
|
1456
|
+
"Attempted null check fix",
|
|
1457
|
+
"Still getting undefined errors",
|
|
1458
|
+
storage,
|
|
1459
|
+
);
|
|
1460
|
+
|
|
1461
|
+
expect(record.bead_id).toBe("test-bead-1");
|
|
1462
|
+
expect(record.strike_count).toBe(1);
|
|
1463
|
+
expect(record.failures).toHaveLength(1);
|
|
1464
|
+
expect(record.failures[0].attempt).toBe("Attempted null check fix");
|
|
1465
|
+
expect(record.failures[0].reason).toBe("Still getting undefined errors");
|
|
1466
|
+
expect(record.first_strike_at).toBeDefined();
|
|
1467
|
+
expect(record.last_strike_at).toBeDefined();
|
|
1468
|
+
});
|
|
1469
|
+
|
|
1470
|
+
it("increments strike count on subsequent strikes", async () => {
|
|
1471
|
+
await addStrike("test-bead-2", "Fix 1", "Failed 1", storage);
|
|
1472
|
+
const record2 = await addStrike(
|
|
1473
|
+
"test-bead-2",
|
|
1474
|
+
"Fix 2",
|
|
1475
|
+
"Failed 2",
|
|
1476
|
+
storage,
|
|
1477
|
+
);
|
|
1478
|
+
|
|
1479
|
+
expect(record2.strike_count).toBe(2);
|
|
1480
|
+
expect(record2.failures).toHaveLength(2);
|
|
1481
|
+
});
|
|
1482
|
+
|
|
1483
|
+
it("caps strike count at 3", async () => {
|
|
1484
|
+
await addStrike("test-bead-3", "Fix 1", "Failed 1", storage);
|
|
1485
|
+
await addStrike("test-bead-3", "Fix 2", "Failed 2", storage);
|
|
1486
|
+
await addStrike("test-bead-3", "Fix 3", "Failed 3", storage);
|
|
1487
|
+
const record4 = await addStrike(
|
|
1488
|
+
"test-bead-3",
|
|
1489
|
+
"Fix 4",
|
|
1490
|
+
"Failed 4",
|
|
1491
|
+
storage,
|
|
1492
|
+
);
|
|
1493
|
+
|
|
1494
|
+
expect(record4.strike_count).toBe(3);
|
|
1495
|
+
expect(record4.failures).toHaveLength(4); // Records all attempts
|
|
1496
|
+
});
|
|
1497
|
+
|
|
1498
|
+
it("preserves first_strike_at timestamp", async () => {
|
|
1499
|
+
const record1 = await addStrike(
|
|
1500
|
+
"test-bead-4",
|
|
1501
|
+
"Fix 1",
|
|
1502
|
+
"Failed 1",
|
|
1503
|
+
storage,
|
|
1504
|
+
);
|
|
1505
|
+
await new Promise((resolve) => setTimeout(resolve, 100));
|
|
1506
|
+
const record2 = await addStrike(
|
|
1507
|
+
"test-bead-4",
|
|
1508
|
+
"Fix 2",
|
|
1509
|
+
"Failed 2",
|
|
1510
|
+
storage,
|
|
1511
|
+
);
|
|
1512
|
+
|
|
1513
|
+
expect(record2.first_strike_at).toBe(record1.first_strike_at);
|
|
1514
|
+
expect(record2.last_strike_at).not.toBe(record1.last_strike_at);
|
|
1515
|
+
});
|
|
1516
|
+
});
|
|
1517
|
+
|
|
1518
|
+
describe("getStrikes", () => {
|
|
1519
|
+
it("returns 0 for bead with no strikes", async () => {
|
|
1520
|
+
const count = await getStrikes("no-strikes-bead", storage);
|
|
1521
|
+
expect(count).toBe(0);
|
|
1522
|
+
});
|
|
1523
|
+
|
|
1524
|
+
it("returns correct strike count", async () => {
|
|
1525
|
+
await addStrike("bead-with-strikes", "Fix 1", "Failed 1", storage);
|
|
1526
|
+
await addStrike("bead-with-strikes", "Fix 2", "Failed 2", storage);
|
|
1527
|
+
|
|
1528
|
+
const count = await getStrikes("bead-with-strikes", storage);
|
|
1529
|
+
expect(count).toBe(2);
|
|
1530
|
+
});
|
|
1531
|
+
});
|
|
1532
|
+
|
|
1533
|
+
describe("isStrikedOut", () => {
|
|
1534
|
+
it("returns false for bead with < 3 strikes", async () => {
|
|
1535
|
+
await addStrike("bead-safe", "Fix 1", "Failed 1", storage);
|
|
1536
|
+
await addStrike("bead-safe", "Fix 2", "Failed 2", storage);
|
|
1537
|
+
|
|
1538
|
+
const strikedOut = await isStrikedOut("bead-safe", storage);
|
|
1539
|
+
expect(strikedOut).toBe(false);
|
|
1540
|
+
});
|
|
1541
|
+
|
|
1542
|
+
it("returns true for bead with 3 strikes", async () => {
|
|
1543
|
+
await addStrike("bead-danger", "Fix 1", "Failed 1", storage);
|
|
1544
|
+
await addStrike("bead-danger", "Fix 2", "Failed 2", storage);
|
|
1545
|
+
await addStrike("bead-danger", "Fix 3", "Failed 3", storage);
|
|
1546
|
+
|
|
1547
|
+
const strikedOut = await isStrikedOut("bead-danger", storage);
|
|
1548
|
+
expect(strikedOut).toBe(true);
|
|
1549
|
+
});
|
|
1550
|
+
|
|
1551
|
+
it("returns false for bead with no strikes", async () => {
|
|
1552
|
+
const strikedOut = await isStrikedOut("no-record", storage);
|
|
1553
|
+
expect(strikedOut).toBe(false);
|
|
1554
|
+
});
|
|
1555
|
+
});
|
|
1556
|
+
|
|
1557
|
+
describe("getArchitecturePrompt", () => {
|
|
1558
|
+
it("returns empty string for bead with < 3 strikes", async () => {
|
|
1559
|
+
await addStrike("bead-prompt-1", "Fix 1", "Failed 1", storage);
|
|
1560
|
+
|
|
1561
|
+
const prompt = await getArchitecturePrompt("bead-prompt-1", storage);
|
|
1562
|
+
expect(prompt).toBe("");
|
|
1563
|
+
});
|
|
1564
|
+
|
|
1565
|
+
it("returns empty string for bead with no strikes", async () => {
|
|
1566
|
+
const prompt = await getArchitecturePrompt("no-strikes", storage);
|
|
1567
|
+
expect(prompt).toBe("");
|
|
1568
|
+
});
|
|
1569
|
+
|
|
1570
|
+
it("generates architecture review prompt for struck out bead", async () => {
|
|
1571
|
+
await addStrike(
|
|
1572
|
+
"bead-prompt-2",
|
|
1573
|
+
"Added null checks",
|
|
1574
|
+
"Still crashes on undefined",
|
|
1575
|
+
storage,
|
|
1576
|
+
);
|
|
1577
|
+
await addStrike(
|
|
1578
|
+
"bead-prompt-2",
|
|
1579
|
+
"Used optional chaining",
|
|
1580
|
+
"Runtime error persists",
|
|
1581
|
+
storage,
|
|
1582
|
+
);
|
|
1583
|
+
await addStrike(
|
|
1584
|
+
"bead-prompt-2",
|
|
1585
|
+
"Wrapped in try-catch",
|
|
1586
|
+
"Error still happening",
|
|
1587
|
+
storage,
|
|
1588
|
+
);
|
|
1589
|
+
|
|
1590
|
+
const prompt = await getArchitecturePrompt("bead-prompt-2", storage);
|
|
1591
|
+
|
|
1592
|
+
expect(prompt).toContain("Architecture Review Required");
|
|
1593
|
+
expect(prompt).toContain("bead-prompt-2");
|
|
1594
|
+
expect(prompt).toContain("Added null checks");
|
|
1595
|
+
expect(prompt).toContain("Still crashes on undefined");
|
|
1596
|
+
expect(prompt).toContain("Used optional chaining");
|
|
1597
|
+
expect(prompt).toContain("Runtime error persists");
|
|
1598
|
+
expect(prompt).toContain("Wrapped in try-catch");
|
|
1599
|
+
expect(prompt).toContain("Error still happening");
|
|
1600
|
+
expect(prompt).toContain("architectural problem");
|
|
1601
|
+
expect(prompt).toContain("DO NOT attempt Fix #4");
|
|
1602
|
+
expect(prompt).toContain("Refactor architecture");
|
|
1603
|
+
expect(prompt).toContain("Continue with Fix #4");
|
|
1604
|
+
expect(prompt).toContain("Abandon this approach");
|
|
1605
|
+
});
|
|
1606
|
+
|
|
1607
|
+
it("lists all failures in order", async () => {
|
|
1608
|
+
await addStrike(
|
|
1609
|
+
"bead-prompt-3",
|
|
1610
|
+
"First attempt",
|
|
1611
|
+
"First failure",
|
|
1612
|
+
storage,
|
|
1613
|
+
);
|
|
1614
|
+
await addStrike(
|
|
1615
|
+
"bead-prompt-3",
|
|
1616
|
+
"Second attempt",
|
|
1617
|
+
"Second failure",
|
|
1618
|
+
storage,
|
|
1619
|
+
);
|
|
1620
|
+
await addStrike(
|
|
1621
|
+
"bead-prompt-3",
|
|
1622
|
+
"Third attempt",
|
|
1623
|
+
"Third failure",
|
|
1624
|
+
storage,
|
|
1625
|
+
);
|
|
1626
|
+
|
|
1627
|
+
const prompt = await getArchitecturePrompt("bead-prompt-3", storage);
|
|
1628
|
+
|
|
1629
|
+
const lines = prompt.split("\n");
|
|
1630
|
+
const failureLine1 = lines.find((l) => l.includes("First attempt"));
|
|
1631
|
+
const failureLine2 = lines.find((l) => l.includes("Second attempt"));
|
|
1632
|
+
const failureLine3 = lines.find((l) => l.includes("Third attempt"));
|
|
1633
|
+
|
|
1634
|
+
expect(failureLine1).toBeDefined();
|
|
1635
|
+
expect(failureLine2).toBeDefined();
|
|
1636
|
+
expect(failureLine3).toBeDefined();
|
|
1637
|
+
|
|
1638
|
+
// Check ordering
|
|
1639
|
+
const idx1 = lines.indexOf(failureLine1!);
|
|
1640
|
+
const idx2 = lines.indexOf(failureLine2!);
|
|
1641
|
+
const idx3 = lines.indexOf(failureLine3!);
|
|
1642
|
+
|
|
1643
|
+
expect(idx1).toBeLessThan(idx2);
|
|
1644
|
+
expect(idx2).toBeLessThan(idx3);
|
|
1645
|
+
});
|
|
1646
|
+
});
|
|
1647
|
+
|
|
1648
|
+
describe("clearStrikes", () => {
|
|
1649
|
+
it("clears strikes for a bead", async () => {
|
|
1650
|
+
await addStrike("bead-clear", "Fix 1", "Failed 1", storage);
|
|
1651
|
+
await addStrike("bead-clear", "Fix 2", "Failed 2", storage);
|
|
1652
|
+
|
|
1653
|
+
expect(await getStrikes("bead-clear", storage)).toBe(2);
|
|
1654
|
+
|
|
1655
|
+
await clearStrikes("bead-clear", storage);
|
|
1656
|
+
|
|
1657
|
+
expect(await getStrikes("bead-clear", storage)).toBe(0);
|
|
1658
|
+
expect(await isStrikedOut("bead-clear", storage)).toBe(false);
|
|
1659
|
+
});
|
|
1660
|
+
|
|
1661
|
+
it("handles clearing non-existent bead gracefully", async () => {
|
|
1662
|
+
await expect(clearStrikes("no-bead", storage)).resolves.toBeUndefined();
|
|
1663
|
+
});
|
|
1664
|
+
});
|
|
1665
|
+
|
|
1666
|
+
describe("InMemoryStrikeStorage", () => {
|
|
1667
|
+
it("stores and retrieves strike records", async () => {
|
|
1668
|
+
const storage = new InMemoryStrikeStorage();
|
|
1669
|
+
const record = await addStrike("bead-1", "Fix", "Failed", storage);
|
|
1670
|
+
|
|
1671
|
+
const retrieved = await storage.get("bead-1");
|
|
1672
|
+
expect(retrieved).not.toBeNull();
|
|
1673
|
+
expect(retrieved!.bead_id).toBe("bead-1");
|
|
1674
|
+
expect(retrieved!.strike_count).toBe(1);
|
|
1675
|
+
});
|
|
1676
|
+
|
|
1677
|
+
it("returns null for non-existent bead", async () => {
|
|
1678
|
+
const storage = new InMemoryStrikeStorage();
|
|
1679
|
+
const retrieved = await storage.get("non-existent");
|
|
1680
|
+
expect(retrieved).toBeNull();
|
|
1681
|
+
});
|
|
1682
|
+
|
|
1683
|
+
it("lists all strike records", async () => {
|
|
1684
|
+
const storage = new InMemoryStrikeStorage();
|
|
1685
|
+
await addStrike("bead-1", "Fix", "Failed", storage);
|
|
1686
|
+
await addStrike("bead-2", "Fix", "Failed", storage);
|
|
1687
|
+
|
|
1688
|
+
const all = await storage.getAll();
|
|
1689
|
+
expect(all).toHaveLength(2);
|
|
1690
|
+
});
|
|
1691
|
+
|
|
1692
|
+
it("clears specific bead strikes", async () => {
|
|
1693
|
+
const storage = new InMemoryStrikeStorage();
|
|
1694
|
+
await addStrike("bead-1", "Fix", "Failed", storage);
|
|
1695
|
+
await addStrike("bead-2", "Fix", "Failed", storage);
|
|
1696
|
+
|
|
1697
|
+
await storage.clear("bead-1");
|
|
1698
|
+
|
|
1699
|
+
expect(await storage.get("bead-1")).toBeNull();
|
|
1700
|
+
expect(await storage.get("bead-2")).not.toBeNull();
|
|
1701
|
+
});
|
|
1702
|
+
});
|
|
1703
|
+
|
|
1704
|
+
describe("3-Strike Rule Integration", () => {
|
|
1705
|
+
it("follows complete workflow from no strikes to architecture review", async () => {
|
|
1706
|
+
const beadId = "integration-bead";
|
|
1707
|
+
|
|
1708
|
+
// Start: No strikes
|
|
1709
|
+
expect(await getStrikes(beadId, storage)).toBe(0);
|
|
1710
|
+
expect(await isStrikedOut(beadId, storage)).toBe(false);
|
|
1711
|
+
expect(await getArchitecturePrompt(beadId, storage)).toBe("");
|
|
1712
|
+
|
|
1713
|
+
// Strike 1
|
|
1714
|
+
await addStrike(beadId, "Tried approach A", "Didn't work", storage);
|
|
1715
|
+
expect(await getStrikes(beadId, storage)).toBe(1);
|
|
1716
|
+
expect(await isStrikedOut(beadId, storage)).toBe(false);
|
|
1717
|
+
|
|
1718
|
+
// Strike 2
|
|
1719
|
+
await addStrike(beadId, "Tried approach B", "Also failed", storage);
|
|
1720
|
+
expect(await getStrikes(beadId, storage)).toBe(2);
|
|
1721
|
+
expect(await isStrikedOut(beadId, storage)).toBe(false);
|
|
1722
|
+
|
|
1723
|
+
// Strike 3 - STRUCK OUT
|
|
1724
|
+
await addStrike(beadId, "Tried approach C", "Still broken", storage);
|
|
1725
|
+
expect(await getStrikes(beadId, storage)).toBe(3);
|
|
1726
|
+
expect(await isStrikedOut(beadId, storage)).toBe(true);
|
|
1727
|
+
|
|
1728
|
+
// Architecture prompt should now be available
|
|
1729
|
+
const prompt = await getArchitecturePrompt(beadId, storage);
|
|
1730
|
+
expect(prompt).not.toBe("");
|
|
1731
|
+
expect(prompt).toContain("Architecture Review Required");
|
|
1732
|
+
|
|
1733
|
+
// Clear strikes (e.g., after human intervention)
|
|
1734
|
+
await clearStrikes(beadId, storage);
|
|
1735
|
+
expect(await getStrikes(beadId, storage)).toBe(0);
|
|
1736
|
+
expect(await isStrikedOut(beadId, storage)).toBe(false);
|
|
1737
|
+
});
|
|
1738
|
+
});
|
|
1739
|
+
});
|