compound-agent 1.6.0 → 1.6.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli.js CHANGED
@@ -11,7 +11,7 @@ import { createRequire } from 'module';
11
11
  import { execSync, execFileSync, spawn } from 'child_process';
12
12
  import { fileURLToPath } from 'url';
13
13
  import { Command } from 'commander';
14
- import chalk4 from 'chalk';
14
+ import chalk5 from 'chalk';
15
15
  import { createInterface } from 'readline';
16
16
 
17
17
  var __defProp = Object.defineProperty;
@@ -2193,7 +2193,10 @@ function acquireEmbedLock(repoRoot) {
2193
2193
  if (err.code !== "EEXIST") throw err;
2194
2194
  const existing = readLock(file);
2195
2195
  if (existing && isProcessAlive(existing.pid)) {
2196
- return { acquired: false, holder: existing.pid };
2196
+ const lockAge = Date.now() - new Date(existing.startedAt).getTime();
2197
+ if (lockAge < LOCK_MAX_AGE_MS) {
2198
+ return { acquired: false, holder: existing.pid };
2199
+ }
2197
2200
  }
2198
2201
  try {
2199
2202
  unlinkSync(file);
@@ -2221,8 +2224,10 @@ function releaseLock(file) {
2221
2224
  } catch {
2222
2225
  }
2223
2226
  }
2227
+ var LOCK_MAX_AGE_MS;
2224
2228
  var init_embed_lock = __esm({
2225
2229
  "src/memory/knowledge/embed-lock.ts"() {
2230
+ LOCK_MAX_AGE_MS = 60 * 60 * 1e3;
2226
2231
  }
2227
2232
  });
2228
2233
  function statusPath(repoRoot) {
@@ -4395,9 +4400,24 @@ async function ensureGitignore(repoRoot) {
4395
4400
  if (missing.length === 0) {
4396
4401
  return { added: [] };
4397
4402
  }
4398
- const section = [SECTION_COMMENT, ...missing].join("\n");
4399
- const separator = content.length > 0 && !content.endsWith("\n") ? "\n\n" : content.length > 0 ? "\n" : "";
4400
- const newContent = content + separator + section + "\n";
4403
+ let newContent;
4404
+ const sectionIdx = lines.findIndex((l) => l.trim() === SECTION_COMMENT);
4405
+ if (sectionIdx >= 0) {
4406
+ let insertAfter = sectionIdx;
4407
+ for (let i = sectionIdx + 1; i < lines.length; i++) {
4408
+ const line = lines[i];
4409
+ if (line === void 0) break;
4410
+ const trimmed = line.trim();
4411
+ if (trimmed === "" || trimmed.startsWith("#")) break;
4412
+ insertAfter = i;
4413
+ }
4414
+ lines.splice(insertAfter + 1, 0, ...missing);
4415
+ newContent = lines.join("\n");
4416
+ } else {
4417
+ const section = [SECTION_COMMENT, ...missing].join("\n");
4418
+ const separator = content.length > 0 && !content.endsWith("\n") ? "\n\n" : content.length > 0 ? "\n" : "";
4419
+ newContent = content + separator + section + "\n";
4420
+ }
4401
4421
  await writeFile(gitignorePath, newContent, "utf-8");
4402
4422
  return { added: missing };
4403
4423
  }
@@ -4881,7 +4901,7 @@ Analyze the repository to understand its structure, coding conventions, tech sta
4881
4901
  Return findings directly to the caller for synthesis into the plan.
4882
4902
 
4883
4903
  ## Deployment
4884
- Subagent spawned via the Task tool during the **plan** and **brainstorm** phases. Return findings directly to the caller.
4904
+ Subagent spawned via the Task tool during the **plan** and **spec-dev** phases. Return findings directly to the caller.
4885
4905
 
4886
4906
  ## Output Format
4887
4907
  Return a structured summary:
@@ -4912,7 +4932,7 @@ Search compound-agent memory to find relevant lessons, patterns, and decisions f
4912
4932
  Return findings directly to the caller for synthesis into the plan.
4913
4933
 
4914
4934
  ## Deployment
4915
- Subagent spawned via the Task tool during the **plan** and **brainstorm** phases. Return findings directly to the caller.
4935
+ Subagent spawned via the Task tool during the **plan** and **spec-dev** phases. Return findings directly to the caller.
4916
4936
 
4917
4937
  ## Output Format
4918
4938
  Return a list of relevant memory items:
@@ -5557,16 +5577,16 @@ var AGENT_ROLE_SKILLS = {
5557
5577
 
5558
5578
  // src/setup/templates/commands.ts
5559
5579
  var WORKFLOW_COMMANDS = {
5560
- "brainstorm.md": `---
5561
- name: compound:brainstorm
5562
- description: Explore requirements through collaborative dialogue before committing to a plan
5563
- argument-hint: "<goal or topic to brainstorm>"
5580
+ "spec-dev.md": `---
5581
+ name: compound:spec-dev
5582
+ description: Develop precise specifications through Socratic dialogue, EARS notation, and Mermaid diagrams
5583
+ argument-hint: "<goal or feature to specify>"
5564
5584
  ---
5565
5585
  $ARGUMENTS
5566
5586
 
5567
- # Brainstorm
5587
+ # Spec Dev
5568
5588
 
5569
- **MANDATORY FIRST STEP -- NON-NEGOTIABLE**: Use the Read tool to open and read \`.claude/skills/compound/brainstorm/SKILL.md\` NOW. Do NOT proceed until you have read the complete skill file. It contains the full workflow you must follow.
5589
+ **MANDATORY FIRST STEP -- NON-NEGOTIABLE**: Use the Read tool to open and read \`.claude/skills/compound/spec-dev/SKILL.md\` NOW. Do NOT proceed until you have read the complete skill file. It contains the full workflow you must follow.
5570
5590
  `,
5571
5591
  "plan.md": `---
5572
5592
  name: compound:plan
@@ -5612,17 +5632,17 @@ $ARGUMENTS
5612
5632
 
5613
5633
  **MANDATORY FIRST STEP -- NON-NEGOTIABLE**: Use the Read tool to open and read \`.claude/skills/compound/compound/SKILL.md\` NOW. Do NOT proceed until you have read the complete skill file. It contains the full workflow you must follow.
5614
5634
  `,
5615
- "lfg.md": `---
5616
- name: compound:lfg
5635
+ "cook-it.md": `---
5636
+ name: compound:cook-it
5617
5637
  description: Full workflow cycle chaining all five phases
5618
5638
  argument-hint: "<goal>"
5619
5639
  disable-model-invocation: true
5620
5640
  ---
5621
5641
  $ARGUMENTS
5622
5642
 
5623
- # LFG
5643
+ # Cook It
5624
5644
 
5625
- **MANDATORY FIRST STEP -- NON-NEGOTIABLE**: Use the Read tool to open and read \`.claude/skills/compound/lfg/SKILL.md\` NOW. Do NOT proceed until you have read the complete skill file. It contains the full orchestration workflow you must follow.
5645
+ **MANDATORY FIRST STEP -- NON-NEGOTIABLE**: Use the Read tool to open and read \`.claude/skills/compound/cook-it/SKILL.md\` NOW. Do NOT proceed until you have read the complete skill file. It contains the full orchestration workflow you must follow.
5626
5646
  `,
5627
5647
  "research.md": `---
5628
5648
  name: compound:research
@@ -5779,7 +5799,7 @@ npx ca doctor
5779
5799
  settings.json # Claude Code hooks
5780
5800
  plugin.json # Plugin manifest
5781
5801
  agents/compound/ # Subagent definitions
5782
- commands/compound/ # Slash commands (brainstorm, plan, work, review, compound, lfg)
5802
+ commands/compound/ # Slash commands (spec-dev, plan, work, review, compound, cook-it)
5783
5803
  skills/compound/ # Phase skills + agent role skills
5784
5804
  lessons/
5785
5805
  index.jsonl # Memory items (git-tracked source of truth)
@@ -5800,14 +5820,14 @@ docs/compound/
5800
5820
  | Search docs knowledge | \`npx ca knowledge "query"\` |
5801
5821
  | Check plan against memory | \`npx ca check-plan --plan "description"\` |
5802
5822
  | View stats | \`npx ca stats\` |
5803
- | Run full workflow | \`/compound:lfg <epic-id>\` |
5823
+ | Run full workflow | \`/compound:cook-it <epic-id>\` |
5804
5824
  | Health check | \`npx ca doctor\` |
5805
5825
 
5806
5826
  ---
5807
5827
 
5808
5828
  ## Further reading
5809
5829
 
5810
- - [WORKFLOW.md](WORKFLOW.md) -- The 5-phase development workflow and LFG orchestrator
5830
+ - [WORKFLOW.md](WORKFLOW.md) -- The 5-phase development workflow and cook-it orchestrator
5811
5831
  - [CLI_REFERENCE.md](CLI_REFERENCE.md) -- Complete CLI command reference
5812
5832
  - [SKILLS.md](SKILLS.md) -- Phase skills and agent role skills
5813
5833
  - [INTEGRATION.md](INTEGRATION.md) -- Memory system, hooks, beads, and agent guidance
@@ -5815,29 +5835,29 @@ docs/compound/
5815
5835
  "WORKFLOW.md": `---
5816
5836
  version: "{{VERSION}}"
5817
5837
  last-updated: "{{DATE}}"
5818
- summary: "The 5-phase compound-agent workflow and LFG orchestrator"
5838
+ summary: "The 5-phase compound-agent workflow and cook-it orchestrator"
5819
5839
  ---
5820
5840
 
5821
5841
  # Workflow
5822
5842
 
5823
- Every feature or epic follows five phases. The \`/compound:lfg\` skill chains them with enforcement gates.
5843
+ Every feature or epic follows five phases. The \`/compound:cook-it\` skill chains them with enforcement gates.
5824
5844
 
5825
5845
  ---
5826
5846
 
5827
- ## Phase 1: Brainstorm
5847
+ ## Phase 1: Spec Dev
5828
5848
 
5829
- Explore the problem space before committing to a solution.
5849
+ Develop precise specifications through Socratic dialogue, EARS notation, and Mermaid diagrams.
5830
5850
 
5831
- - Ask "why" before "how"
5832
- - Search memory for similar past features
5833
- - Generate multiple approaches, then converge
5851
+ - Ask "why" before "how" -- understand the real need
5852
+ - Search memory for past features, constraints, decisions
5853
+ - Use EARS notation for clear, testable requirements
5834
5854
  - Create a beads epic: \`bd create --title="..." --type=epic\`
5835
5855
 
5836
5856
  ## Phase 2: Plan
5837
5857
 
5838
5858
  Decompose work into small, testable tasks with dependencies.
5839
5859
 
5840
- - Review brainstorm output
5860
+ - Review spec-dev output
5841
5861
  - Create beads tasks: \`bd create --title="..." --type=task\`
5842
5862
  - Create Review and Compound blocking tasks (these survive compaction)
5843
5863
 
@@ -5870,20 +5890,20 @@ Extract and store lessons learned. This is what makes the system compound.
5870
5890
 
5871
5891
  ---
5872
5892
 
5873
- ## LFG orchestrator
5893
+ ## Cook-it orchestrator
5874
5894
 
5875
- \`/compound:lfg\` chains all 5 phases with enforcement gates.
5895
+ \`/compound:cook-it\` chains all 5 phases with enforcement gates.
5876
5896
 
5877
5897
  ### Invocation
5878
5898
 
5879
5899
  \`\`\`
5880
- /compound:lfg <epic-id>
5881
- /compound:lfg <epic-id> from plan
5900
+ /compound:cook-it <epic-id>
5901
+ /compound:cook-it <epic-id> from plan
5882
5902
  \`\`\`
5883
5903
 
5884
5904
  ### Phase execution protocol
5885
5905
 
5886
- For each phase, LFG:
5906
+ For each phase, cook-it:
5887
5907
 
5888
5908
  1. Announces progress: \`[Phase N/5] PHASE_NAME\`
5889
5909
  2. Initializes state: \`npx ca phase-check start <phase>\`
@@ -5902,18 +5922,18 @@ For each phase, LFG:
5902
5922
  | Gate 4 | After Review | \`/implementation-reviewer\` returned APPROVED |
5903
5923
  | Final | After Compound | \`npx ca verify-gates <epic-id>\` passes, \`pnpm test\` and \`pnpm lint\` pass |
5904
5924
 
5905
- If any gate fails, LFG stops. You must fix the issue before proceeding.
5925
+ If any gate fails, cook-it stops. You must fix the issue before proceeding.
5906
5926
 
5907
5927
  ### Resumption
5908
5928
 
5909
- If interrupted, LFG can resume:
5929
+ If interrupted, cook-it can resume:
5910
5930
 
5911
5931
  1. Run \`bd show <epic-id>\` and read the notes for phase state
5912
5932
  2. Re-invoke with \`from <phase>\` to skip completed phases
5913
5933
 
5914
5934
  ### Phase state tracking
5915
5935
 
5916
- LFG persists state in \`.claude/.ca-phase-state.json\`. Useful commands:
5936
+ Cook-it persists state in \`.claude/.ca-phase-state.json\`. Useful commands:
5917
5937
 
5918
5938
  \`\`\`bash
5919
5939
  npx ca phase-check status # See current phase state
@@ -5922,7 +5942,7 @@ npx ca phase-check clean # Reset phase state (escape hatch)
5922
5942
 
5923
5943
  ### Session close
5924
5944
 
5925
- Before saying "done", LFG runs this inviolable checklist:
5945
+ Before saying "done", cook-it runs this inviolable checklist:
5926
5946
 
5927
5947
  \`\`\`bash
5928
5948
  git status
@@ -6084,21 +6104,21 @@ Skills are instructions that Claude reads before executing each phase. They live
6084
6104
 
6085
6105
  ## Phase skills
6086
6106
 
6087
- ### \`/compound:brainstorm\`
6107
+ ### \`/compound:spec-dev\`
6088
6108
 
6089
- **Purpose**: Divergent-then-convergent thinking to explore the solution space.
6109
+ **Purpose**: Develop precise specifications through Socratic dialogue, EARS notation, and Mermaid diagrams.
6090
6110
 
6091
6111
  **When invoked**: At the start of a new feature or epic, before any planning.
6092
6112
 
6093
- **What it does**: Spawns research subagents, searches memory for similar past features, generates multiple approaches, converges on a decision with documented rationale, and creates a beads epic.
6113
+ **What it does**: Guides the user through 4 phases (Explore, Understand, Specify, Hand off) to produce a rigorous spec. Spawns research subagents, uses Mermaid diagrams as thinking tools, detects NL ambiguity, writes EARS-notation requirements, and stores the consolidated spec in the beads epic description.
6094
6114
 
6095
6115
  ### \`/compound:plan\`
6096
6116
 
6097
6117
  **Purpose**: Decompose work into small testable tasks with dependencies.
6098
6118
 
6099
- **When invoked**: After brainstorm, before any implementation.
6119
+ **When invoked**: After spec-dev, before any implementation.
6100
6120
 
6101
- **What it does**: Reviews brainstorm output, spawns analysts, decomposes into tasks with acceptance criteria, creates beads issues, and creates Review + Compound blocking tasks.
6121
+ **What it does**: Reviews spec-dev output, spawns analysts, decomposes into tasks with acceptance criteria, creates beads issues, and creates Review + Compound blocking tasks.
6102
6122
 
6103
6123
  ### \`/compound:work\`
6104
6124
 
@@ -6124,7 +6144,7 @@ Skills are instructions that Claude reads before executing each phase. They live
6124
6144
 
6125
6145
  **What it does**: Spawns an analysis pipeline (context-analyzer, lesson-extractor, pattern-matcher, solution-writer, compounding), applies quality filters, classifies items by type and severity, stores via \`npx ca learn\`, runs \`npx ca verify-gates\`.
6126
6146
 
6127
- ### \`/compound:lfg\`
6147
+ ### \`/compound:cook-it\`
6128
6148
 
6129
6149
  **Purpose**: Full-cycle orchestrator chaining all five phases.
6130
6150
 
@@ -6147,12 +6167,12 @@ Skills are instructions that Claude reads before executing each phase. They live
6147
6167
  Skills are invoked as Claude Code slash commands:
6148
6168
 
6149
6169
  \`\`\`
6150
- /compound:brainstorm # Start brainstorm phase
6170
+ /compound:spec-dev # Start spec-dev phase
6151
6171
  /compound:plan # Start plan phase
6152
6172
  /compound:work # Start work phase
6153
6173
  /compound:review # Start review phase
6154
6174
  /compound:compound # Start compound phase
6155
- /compound:lfg <epic-id> # Run all phases end-to-end
6175
+ /compound:cook-it <epic-id> # Run all phases end-to-end
6156
6176
  /compound:research # Spawn research subagent
6157
6177
  /compound:test-clean # Clean test artifacts
6158
6178
  /compound:get-a-phd <focus> # Deep research for agent knowledge
@@ -6311,61 +6331,91 @@ Work is not complete until \`git push\` succeeds.
6311
6331
 
6312
6332
  // src/setup/templates/skills.ts
6313
6333
  var PHASE_SKILLS = {
6314
- brainstorm: `---
6315
- name: Brainstorm
6316
- description: Divergent-then-convergent thinking to explore solution space
6334
+ "spec-dev": `---
6335
+ name: Spec Dev
6336
+ description: Develop precise specifications through Socratic dialogue, EARS notation, and Mermaid diagrams
6317
6337
  ---
6318
6338
 
6319
- # Brainstorm Skill
6339
+ # Spec Dev Skill
6320
6340
 
6321
6341
  ## Overview
6322
- Explore the problem space before committing to a solution. This phase produces a structured brainstorm document with decisions, open questions, and a beads epic for handoff to planning.
6323
-
6324
- ## Methodology
6325
- 1. Ask "why" before "how" -- understand the real problem
6326
- 2. Search memory with \`npx ca search\` and docs with \`npx ca knowledge "relevant topic"\` for similar past features and known constraints
6327
- 3. Spawn **subagents** via Task tool in parallel for research (lightweight, no inter-agent coordination):
6328
- - Available agents: \`.claude/agents/compound/repo-analyst.md\`, \`memory-analyst.md\`
6329
- - Or use \`subagent_type: Explore\` for ad-hoc research
6330
- - Deploy MULTIPLE when topic spans several domains; synthesize all findings before proceeding
6331
- 4. When facing deep unknowns or complex technical domains, invoke the **researcher skill** (read \`.claude/skills/compound/researcher/SKILL.md\`) to produce a structured survey document before narrowing approaches
6332
- 5. Use \`AskUserQuestion\` to clarify scope, constraints, and preferences
6333
- 6. Divergent phase: generate multiple approaches without filtering
6334
- 7. Identify constraints and non-functional requirements (performance, security, etc.)
6335
- 8. Convergent phase: evaluate approaches against constraints
6336
- 9. Document decisions with rationale, list open questions, and create a beads epic
6337
- 10. Auto-create ADR files in \`docs/decisions/\` for significant decisions (lightweight: Status, Context, Decision, Consequences)
6342
+ Develop unambiguous, testable specifications before implementation. Structured 4-phase process producing EARS-notation requirements, architecture diagrams, and a beads epic.
6343
+
6344
+ Scale formality to risk: skip for trivial (<1h), lightweight (EARS + epic) for small, full 4-phase for medium+. Use \`AskUserQuestion\` early to gauge scope.
6345
+
6346
+ ## Methodology: 4-Phase Spec Development
6347
+
6348
+ ### Phase 1: Explore
6349
+ **Goal**: Map the problem domain before narrowing.
6350
+ 1. Ask "why" before "how" -- understand the real need
6351
+ 2. Search memory: \`npx ca search\` for past features, constraints, decisions
6352
+ 3. Search knowledge: \`npx ca knowledge "relevant terms"\`
6353
+ 4. Spawn subagents for research (\`.claude/agents/compound/repo-analyst.md\`, \`memory-analyst.md\`, or \`subagent_type: Explore\`)
6354
+ 5. For deep domain knowledge, consider \`/get-a-phd\`
6355
+ 6. Build a discovery mindmap (Mermaid \`mindmap\`) -- makes implicit assumptions visible
6356
+ 7. Use \`AskUserQuestion\` to clarify scope and preferences
6357
+
6358
+ **Iteration trigger**: If research reveals the problem is fundamentally different, restart Explore.
6359
+
6360
+ ### Phase 2: Understand
6361
+ **Goal**: Crystallize requirements through Socratic dialogue.
6362
+ 1. For each capability, ask: triggers? edge cases? constraints? acceptance criteria?
6363
+ 2. Use Mermaid diagrams (\`sequenceDiagram\`, \`stateDiagram-v2\`) to expose hidden structure
6364
+ 3. Detect ambiguities: vague adjectives, unclear pronouns, passive voice, compound requirements. See \`references/spec-guide.md\` for full checklist
6365
+ 4. Build a domain glossary for ambiguous terms
6366
+ 5. Use \`AskUserQuestion\` to resolve each ambiguity
6367
+
6368
+ **Iteration trigger**: If specifying reveals missing knowledge, loop back to Explore.
6369
+
6370
+ ### Phase 3: Specify
6371
+ **Goal**: Produce formal, testable requirements.
6372
+ 1. Write each requirement using **EARS notation**:
6373
+ - Ubiquitous: \`The system shall <action>.\`
6374
+ - Event-driven: \`When <trigger>, the system shall <action>.\`
6375
+ - State-driven: \`While <state>, the system shall <action>.\`
6376
+ - Unwanted behavior: \`If <condition>, then the system shall <action>.\`
6377
+ - Optional: \`Where <feature>, the system shall <action>.\`
6378
+ - Combined ordering: \`Where > While > When > If/then > shall\`
6379
+ 2. Verify each requirement: no vague adjectives, edge cases covered, quantities specified, testable
6380
+ 3. Document trade-offs when requirements conflict (see \`references/spec-guide.md\`)
6381
+ 4. Produce architecture diagrams (\`erDiagram\`, \`C4Context\`, \`flowchart\`)
6382
+ 5. Create ADRs in \`docs/decisions/\` for significant decisions
6383
+
6384
+ **Iteration trigger**: If contradictions or gaps emerge, loop back to Understand.
6385
+
6386
+ ### Phase 4: Hand off
6387
+ 1. Store spec in beads epic description (\`bd update <epic> --description="..."\`) -- single source of truth
6388
+ 2. Create beads epic if needed (\`bd create\`)
6389
+ 3. Flag open questions for plan phase
6390
+ 4. Capture lessons: \`npx ca learn\`
6338
6391
 
6339
6392
  ## Memory Integration
6340
- - Run \`npx ca search\` and \`npx ca knowledge "relevant topic"\` with relevant keywords before generating approaches
6341
- - Look for past architectural decisions, pitfalls, and preferences
6342
- - If the problem domain matches past work, review those lessons first
6393
+ - \`npx ca search\` before generating approaches
6394
+ - \`npx ca knowledge\` for indexed project docs
6395
+ - \`npx ca learn\` after corrections or discoveries
6343
6396
 
6344
- ## Docs Integration
6345
- - Spawn docs-explorer to scan \`docs/\` for relevant architecture docs, research, and standards
6346
- - Review existing ADRs in \`docs/decisions/\` -- prior decisions may constrain the brainstorm
6347
- - Auto-create ADR for each significant decision made during convergence
6397
+ ## Reference Material
6398
+ Read \`.claude/skills/compound/spec-dev/references/spec-guide.md\` on demand for EARS patterns, Mermaid templates, ambiguity checklists, and trade-off frameworks.
6348
6399
 
6349
6400
  ## Common Pitfalls
6350
- - Jumping to the first solution without exploring alternatives
6351
- - Ignoring non-functional requirements (scalability, maintainability)
6352
- - Not searching memory for similar past features
6353
- - Not checking existing docs and ADRs for prior decisions
6354
- - Over-scoping: trying to solve everything at once
6355
- - Skipping the "why" and diving into "how"
6356
- - Not invoking the researcher skill when the domain requires deep investigation
6357
- - Not creating a beads epic from conclusions (losing brainstorm output)
6401
+ - Jumping to solutions before exploring the problem
6402
+ - Skipping diagrams -- they reveal hidden assumptions
6403
+ - Vague requirements without EARS patterns
6404
+ - Not searching memory for past patterns and pitfalls
6405
+ - Over-specifying trivial tasks
6406
+ - Ignoring iteration signals when gaps emerge
6407
+ - Not creating the beads epic
6408
+ - Specifying implementation instead of requirements
6358
6409
 
6359
6410
  ## Quality Criteria
6360
- - Multiple approaches were considered (at least 2-3)
6361
- - Constraints and requirements are explicitly listed
6362
- - Memory was searched for relevant context
6363
- - Existing docs and ADRs were reviewed for prior decisions
6364
- - User was engaged via \`AskUserQuestion\` for clarification
6365
- - A clear decision was made with documented rationale
6366
- - ADRs created for significant architectural decisions
6367
- - Open questions are captured for the plan phase
6368
- - A beads epic was created from conclusions via \`bd create\`
6411
+ - [ ] Requirements use EARS notation
6412
+ - [ ] Ambiguities detected and resolved via dialogue
6413
+ - [ ] Mermaid diagrams used as thinking tools
6414
+ - [ ] Memory searched (\`npx ca search\`)
6415
+ - [ ] Trade-offs documented with rationale
6416
+ - [ ] User engaged via \`AskUserQuestion\` at decisions
6417
+ - [ ] Spec stored in beads epic description
6418
+ - [ ] ADRs created for significant decisions
6369
6419
  `,
6370
6420
  plan: `---
6371
6421
  name: Plan
@@ -6378,7 +6428,7 @@ description: Decompose work into small testable tasks with clear dependencies
6378
6428
  Create a concrete implementation plan by decomposing work into small, testable tasks with dependencies and acceptance criteria.
6379
6429
 
6380
6430
  ## Methodology
6381
- 1. Review brainstorm output for decisions and open questions
6431
+ 1. Read the spec from the epic description (\`bd show <epic>\`) for EARS requirements, decisions, and open questions
6382
6432
  2. Search memory with \`npx ca search\` and docs with \`npx ca knowledge "relevant topic"\` for architectural patterns and past mistakes
6383
6433
  3. Spawn **subagents** via Task tool in parallel for research (lightweight, no inter-agent coordination):
6384
6434
  - Available agents: \`.claude/agents/compound/repo-analyst.md\`, \`memory-analyst.md\`
@@ -6389,9 +6439,10 @@ Create a concrete implementation plan by decomposing work into small, testable t
6389
6439
  6. Use \`AskUserQuestion\` to resolve ambiguities, conflicting constraints, or priority trade-offs before decomposing
6390
6440
  7. Decompose into tasks small enough to verify individually
6391
6441
  8. Define acceptance criteria for each task
6392
- 9. Map dependencies between tasks
6393
- 10. Create beads issues: \`bd create --title="..." --type=task\`
6394
- 11. Create review and compound blocking tasks (\`bd create\` + \`bd dep add\`) that depend on work tasks \u2014 these survive compaction and surface via \`bd ready\` after work completes
6442
+ 9. Ensure each task traces back to a spec requirement for traceability
6443
+ 10. Map dependencies between tasks
6444
+ 11. Create beads issues: \`bd create --title="..." --type=task\`
6445
+ 12. Create review and compound blocking tasks (\`bd create\` + \`bd dep add\`) that depend on work tasks \u2014 these survive compaction and surface via \`bd ready\` after work completes
6395
6446
 
6396
6447
  ## Memory Integration
6397
6448
  - Run \`npx ca search\` and \`npx ca knowledge "relevant topic"\` for patterns related to the feature area
@@ -6420,6 +6471,7 @@ Create a concrete implementation plan by decomposing work into small, testable t
6420
6471
  - Existing docs and ADRs were checked for constraints
6421
6472
  - Ambiguities resolved via \`AskUserQuestion\` before decomposing
6422
6473
  - Complexity estimates are realistic (no "should be quick")
6474
+ - Each task traces back to a spec requirement
6423
6475
 
6424
6476
  ## POST-PLAN VERIFICATION -- MANDATORY
6425
6477
  After creating all tasks, verify review and compound tasks exist:
@@ -6439,18 +6491,20 @@ Execute implementation through an AgentTeam using adaptive TDD. The lead coordin
6439
6491
  ## Methodology
6440
6492
  1. Pick tasks from \`bd ready\` or \`$ARGUMENTS\`
6441
6493
  2. Mark tasks in progress: \`bd update <id> --status=in_progress\`
6442
- 3. Run \`npx ca search\` per agent/subtask for targeted context. Display results.
6443
- 4. Assess parallelization: identify independent tasks that can be worked simultaneously
6444
- 5. Deploy an **AgentTeam** (TeamCreate + Task with \`team_name\`) with MULTIPLE test-writers and implementers:
6494
+ 3. Read the epic description (\`bd show <epic>\`) for spec context -- EARS requirements guide what "done" looks like
6495
+ 4. Run \`npx ca search\` per agent/subtask for targeted context. Display results.
6496
+ 5. Assess parallelization: identify independent tasks that can be worked simultaneously
6497
+ 6. Deploy an **AgentTeam** (TeamCreate + Task with \`team_name\`) with MULTIPLE test-writers and implementers:
6445
6498
  - Role skills: \`.claude/skills/compound/agents/{test-writer,implementer}/SKILL.md\`
6446
6499
  - Scale teammate count to independent tasks; pairs coordinate via SendMessage on shared interfaces
6447
- 6. Agents communicate via SendMessage when working on overlapping areas.
6448
- 7. Lead coordinates: review agent outputs, resolve conflicts, verify tests pass. Do not write code directly.
6449
- 8. If blocked, use AskUserQuestion to get user direction.
6450
- 9. Shut down the team when done: send shutdown_request to all teammates.
6451
- 10. Commit incrementally as tests pass.
6452
- 11. Run full test suite for regressions.
6453
- 12. Close tasks: \`bd close <id>\`
6500
+ 7. Agents communicate via SendMessage when working on overlapping areas.
6501
+ 8. Lead coordinates: review agent outputs, resolve conflicts, verify tests pass. Do not write code directly.
6502
+ 9. If implementation diverges from spec requirements, stop and discuss with user via AskUserQuestion before proceeding.
6503
+ 10. If blocked, use AskUserQuestion to get user direction.
6504
+ 11. Shut down the team when done: send shutdown_request to all teammates.
6505
+ 12. Commit incrementally as tests pass.
6506
+ 13. Run full test suite for regressions.
6507
+ 14. Close tasks: \`bd close <id>\`
6454
6508
 
6455
6509
  ## Memory Integration
6456
6510
  - Run \`npx ca search\` per delegated subtask with the subtask's specific description
@@ -6477,6 +6531,12 @@ for complex changes. For all changes, \`/implementation-reviewer\` is the minimu
6477
6531
  - **Subagent spawning within teammates**: each teammate should spawn opus subagents for independent subtasks (e.g., a test-writer spawning subagents to write tests for multiple modules in parallel)
6478
6532
  - **Coordinate on shared interfaces**: teammates working on overlapping APIs must communicate via SendMessage before implementing
6479
6533
 
6534
+ ## Literature
6535
+ - Consult \`docs/compound/research/tdd/\` for TDD methodology, test-first development evidence, and best practices
6536
+ - Consult \`docs/compound/research/property-testing/\` for property-based testing theory and invariant design
6537
+ - Run \`npx ca knowledge "TDD test-first"\` for indexed knowledge on testing methodology
6538
+ - Run \`npx ca search "testing"\` for lessons from past TDD cycles
6539
+
6480
6540
  ## Common Pitfalls
6481
6541
  - Lead writing code instead of delegating to agents
6482
6542
  - Not injecting memory context into agent prompts
@@ -6490,6 +6550,7 @@ for complex changes. For all changes, \`/implementation-reviewer\` is the minimu
6490
6550
  - Incremental commits made as tests pass
6491
6551
  - All tests pass after refactoring
6492
6552
  - Task lifecycle tracked via beads (\`bd\`)
6553
+ - Implementation aligns with spec requirements from epic
6493
6554
 
6494
6555
  ## PHASE GATE 3 -- MANDATORY
6495
6556
  Before starting Review, verify ALL work tasks are closed:
@@ -6509,23 +6570,25 @@ Perform thorough code review by spawning specialized reviewers in parallel, cons
6509
6570
 
6510
6571
  ## Methodology
6511
6572
  1. Run quality gates first: \`pnpm test && pnpm lint\`
6512
- 2. Search memory with \`npx ca search\` for known patterns and recurring issues
6513
- 3. Select reviewer tier based on diff size:
6573
+ 2. Read the epic description (\`bd show <epic>\`) for EARS requirements -- reviewers verify each requirement is met
6574
+ 3. Search memory with \`npx ca search\` for known patterns and recurring issues
6575
+ 4. Select reviewer tier based on diff size:
6514
6576
  - **Small** (<100 lines): 4 core -- security, test-coverage, simplicity, cct-reviewer
6515
6577
  - **Medium** (100-500): add architecture, performance, edge-case (7 total)
6516
6578
  - **Large** (500+): all 11 reviewers including docs, consistency, error-handling, pattern-matcher
6517
- 4. Spawn reviewers in an **AgentTeam** (TeamCreate + Task with \`team_name\`):
6579
+ 5. Spawn reviewers in an **AgentTeam** (TeamCreate + Task with \`team_name\`):
6518
6580
  - Role skills: \`.claude/skills/compound/agents/{security-reviewer,architecture-reviewer,performance-reviewer,test-coverage-reviewer,simplicity-reviewer}/SKILL.md\`
6519
6581
  - Security specialist skills (on-demand, spawned by security-reviewer): \`.claude/skills/compound/agents/{security-injection,security-secrets,security-auth,security-data,security-deps}/SKILL.md\`
6520
6582
  - For large diffs (500+), deploy MULTIPLE instances; split files across instances, coordinate via SendMessage
6521
- 5. Reviewers communicate findings to each other via \`SendMessage\`
6522
- 6. Collect, consolidate, and deduplicate all findings
6523
- 7. Classify by severity: P0 (blocks merge), P1 (critical/blocking), P2 (important), P3 (minor)
6524
- 8. Use \`AskUserQuestion\` when severity is ambiguous or fix has multiple valid options
6525
- 9. Create beads issues for P1 findings: \`bd create --title="P1: ..."\`
6526
- 10. Fix all P1 findings before proceeding
6527
- 11. Run \`/implementation-reviewer\` as mandatory gate
6528
- 12. Capture novel findings with \`npx ca learn\`; pattern-matcher auto-reinforces recurring issues
6583
+ 6. Reviewers communicate findings to each other via \`SendMessage\`
6584
+ 7. Collect, consolidate, and deduplicate all findings
6585
+ 8. Classify by severity: P0 (blocks merge), P1 (critical/blocking), P2 (important), P3 (minor)
6586
+ 9. Use \`AskUserQuestion\` when severity is ambiguous or fix has multiple valid options
6587
+ 10. Create beads issues for P1 findings: \`bd create --title="P1: ..."\`
6588
+ 11. Verify spec alignment: flag unmet EARS requirements as P1, flag requirements met but missing from acceptance criteria as gaps
6589
+ 12. Fix all P1 findings before proceeding
6590
+ 13. Run \`/implementation-reviewer\` as mandatory gate
6591
+ 14. Capture novel findings with \`npx ca learn\`; pattern-matcher auto-reinforces recurring issues
6529
6592
 
6530
6593
  ## Memory Integration
6531
6594
  - Run \`npx ca search\` before review for known recurring issues
@@ -6537,6 +6600,11 @@ Perform thorough code review by spawning specialized reviewers in parallel, cons
6537
6600
  - **docs-reviewer** checks code/docs alignment and ADR compliance
6538
6601
  - Flags undocumented public APIs and ADR violations
6539
6602
 
6603
+ ## Literature
6604
+ - Consult \`docs/compound/research/code-review/\` for systematic review methodology, severity taxonomies, and evidence-based review practices
6605
+ - Run \`npx ca knowledge "code review methodology"\` for indexed knowledge on review techniques
6606
+ - Run \`npx ca search "review"\` for lessons from past review cycles
6607
+
6540
6608
  ## Common Pitfalls
6541
6609
  - Ignoring reviewer feedback because "it works"
6542
6610
  - Not running all 11 reviewer perspectives (skipping dimensions)
@@ -6556,6 +6624,7 @@ Perform thorough code review by spawning specialized reviewers in parallel, cons
6556
6624
  - security-reviewer P0 findings: none (blocks merge)
6557
6625
  - security-reviewer P1 findings: all acknowledged or resolved
6558
6626
  - All P1 findings fixed before \`/implementation-reviewer\` approval
6627
+ - All spec requirements verified against implementation
6559
6628
  - \`/implementation-reviewer\` approved as mandatory gate
6560
6629
 
6561
6630
  ## PHASE GATE 4 -- MANDATORY
@@ -6580,26 +6649,32 @@ Lessons go to \`.claude/lessons/index.jsonl\` through the CLI. MEMORY.md is a di
6580
6649
 
6581
6650
  ## Methodology
6582
6651
  1. Review what happened during this cycle (git diff, test results, plan context)
6583
- 2. Spawn the analysis pipeline in an **AgentTeam** (TeamCreate + Task with \`team_name\`):
6652
+ 2. Detect spec drift: compare final implementation against original EARS requirements in the epic description (\`bd show <epic>\`). Note any divergences -- what changed, why, was it justified. If drift reveals a spec was wrong or incomplete, flag that for lesson extraction.
6653
+ 3. Spawn the analysis pipeline in an **AgentTeam** (TeamCreate + Task with \`team_name\`):
6584
6654
  - Role skills: \`.claude/skills/compound/agents/{context-analyzer,lesson-extractor,pattern-matcher,solution-writer,compounding}/SKILL.md\`
6585
6655
  - For large diffs, deploy MULTIPLE context-analyzers and lesson-extractors
6586
6656
  - Pipeline: context-analyzers -> lesson-extractors -> pattern-matcher + solution-writer -> compounding
6587
6657
  - Agents coordinate via SendMessage throughout the pipeline
6588
- 3. Agents pass results through the pipeline via \`SendMessage\`. The lead coordinates: context-analyzer and lesson-extractor feed pattern-matcher and solution-writer, which feed compounding.
6589
- 4. Apply quality filters: novelty check (>0.98 cosine similarity = skip), specificity check
6590
- 5. Classify each item by type: lesson, solution, pattern, or preference
6591
- 6. Classify severity: high (data loss/security/contradictions), medium (workflow/patterns), low (style/optimizations)
6592
- 7. Store via \`npx ca learn\` with supersedes/related links where applicable.
6658
+ 4. Agents pass results through the pipeline via \`SendMessage\`. The lead coordinates: context-analyzer and lesson-extractor feed pattern-matcher and solution-writer, which feed compounding.
6659
+ 5. Apply quality filters: novelty check (>0.98 cosine similarity = skip), specificity check
6660
+ 6. Classify each item by type: lesson, solution, pattern, or preference
6661
+ 7. Classify severity: high (data loss/security/contradictions), medium (workflow/patterns), low (style/optimizations)
6662
+ 8. Store via \`npx ca learn\` with supersedes/related links where applicable.
6593
6663
  At minimum, capture 1 lesson per significant decision made during this cycle
6594
- 8. Delegate to the \`compounding\` subagent to run synthesis: cluster accumulated lessons by similarity and write CCT patterns to \`.claude/lessons/cct-patterns.jsonl\`
6595
- 9. Update outdated docs and deprecate superseded ADRs (set status to \`deprecated\`)
6596
- 10. Use \`AskUserQuestion\` to confirm high-severity items with the user before storing; medium/low items are auto-stored
6664
+ 9. Delegate to the \`compounding\` subagent to run synthesis: cluster accumulated lessons by similarity and write CCT patterns to \`.claude/lessons/cct-patterns.jsonl\`
6665
+ 10. Update outdated docs and deprecate superseded ADRs (set status to \`deprecated\`)
6666
+ 11. Use \`AskUserQuestion\` to confirm high-severity items with the user before storing; medium/low items are auto-stored
6597
6667
 
6598
6668
  ## Docs Integration
6599
6669
  - docs-reviewer checks if \`docs/\` content is outdated after the cycle
6600
6670
  - Check \`docs/decisions/\` for ADRs contradicted by the work done
6601
6671
  - Set ADR status to \`deprecated\` if a decision was reversed, referencing the new ADR
6602
6672
 
6673
+ ## Literature
6674
+ - Consult \`docs/compound/research/learning-systems/\` for knowledge compounding theory, spaced repetition, and lesson extraction methodology
6675
+ - Run \`npx ca knowledge "knowledge compounding"\` for indexed knowledge on learning systems
6676
+ - Run \`npx ca search "compound"\` for lessons from past compounding cycles
6677
+
6603
6678
  ## Common Pitfalls
6604
6679
  - Not spawning the analysis team (analyzing solo misses cross-cutting patterns)
6605
6680
  - Capturing without checking for duplicates via \`npx ca search\`
@@ -6619,6 +6694,7 @@ Lessons go to \`.claude/lessons/index.jsonl\` through the CLI. MEMORY.md is a di
6619
6694
  - User confirmed high-severity items
6620
6695
  - Beads checked for related issues (\`bd\`)
6621
6696
  - Each item gives clear, concrete guidance for future sessions
6697
+ - Spec drift analyzed and captured
6622
6698
 
6623
6699
  ## FINAL GATE -- EPIC CLOSURE
6624
6700
  Before closing the epic:
@@ -6657,7 +6733,7 @@ Conduct deep research on a topic and produce a structured survey document follow
6657
6733
  - References (full citations)
6658
6734
  - Practitioner Resources (annotated tools/repos)
6659
6735
  6. Store output at \`docs/compound/research/<topic-slug>.md\` (kebab-case filename)
6660
- 7. Report key findings back for upstream skill (brainstorm/plan) to act on
6736
+ 7. Report key findings back for upstream skill (spec-dev/plan) to act on
6661
6737
 
6662
6738
  ## Memory Integration
6663
6739
  - Run \`npx ca search\` with topic keywords before starting research
@@ -6734,19 +6810,20 @@ Annotated tools, repos, articles grouped by category.
6734
6810
  - Practitioner resources annotated
6735
6811
  - No recommendations -- landscape presentation only
6736
6812
  `,
6737
- lfg: `---
6738
- name: LFG
6813
+ "cook-it": `---
6814
+ name: Cook It
6739
6815
  description: Full-cycle orchestrator chaining all five phases with gates and controls
6740
6816
  ---
6741
6817
 
6742
- # LFG Skill
6818
+ # Cook It Skill
6743
6819
 
6744
6820
  ## Overview
6745
- Chain all 5 phases end-to-end: Brainstorm, Plan, Work, Review, Compound. This skill governs the orchestration -- phase sequencing, gates, progress tracking, and error recovery.
6821
+
6822
+ Chain all 5 phases end-to-end: Spec Dev, Plan, Work, Review, Compound. This skill governs the orchestration -- phase sequencing, gates, progress tracking, and error recovery.
6746
6823
 
6747
6824
  ## CRITICAL RULE -- READ BEFORE EXECUTE
6748
6825
  Before starting EACH phase, you MUST use the Read tool to open its skill file:
6749
- - .claude/skills/compound/brainstorm/SKILL.md
6826
+ - .claude/skills/compound/spec-dev/SKILL.md
6750
6827
  - .claude/skills/compound/plan/SKILL.md
6751
6828
  - .claude/skills/compound/work/SKILL.md
6752
6829
  - .claude/skills/compound/review/SKILL.md
@@ -6754,6 +6831,21 @@ Before starting EACH phase, you MUST use the Read tool to open its skill file:
6754
6831
 
6755
6832
  Do NOT proceed from memory. Read the skill, then follow it exactly.
6756
6833
 
6834
+ ## Session Start
6835
+ When a cooking session begins, IMMEDIATELY print the chef banner below (copy it verbatim):
6836
+
6837
+ \`\`\`
6838
+
6839
+ \u250C\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510
6840
+ \u2502\u2592\u2592\u2592\u2592\u2592\u2502
6841
+ \u2590\u259B\u2588\u2588\u2588\u259C\u258C o
6842
+ \u259D\u259C\u2588\u2588\u2588\u2588\u2588\u259B\u2598|
6843
+ \u2598\u2598 \u259D\u259D
6844
+ Claude the Cooker
6845
+ \`\`\`
6846
+
6847
+ Then proceed with the protocol below.
6848
+
6757
6849
  ## Phase Execution Protocol
6758
6850
  0. Initialize state: \`npx ca phase-check init <epic-id>\`
6759
6851
  For each phase:
@@ -6780,7 +6872,7 @@ If a gate fails, DO NOT proceed. Fix the issue first.
6780
6872
  - **Progress**: Always announce current phase number before starting.
6781
6873
 
6782
6874
  ## Stop Conditions
6783
- - Brainstorm reveals goal is unclear -- stop, ask user
6875
+ - Spec dev reveals goal is unclear -- stop, ask user
6784
6876
  - Tests produce unresolvable failures -- stop, report
6785
6877
  - Review finds critical security issues -- stop, report
6786
6878
 
@@ -6881,6 +6973,103 @@ Apply the agreed changes:
6881
6973
  - Findings captured in compound-agent memory
6882
6974
  `
6883
6975
  };
6976
+ var PHASE_SKILL_REFERENCES = {
6977
+ "spec-dev/references/spec-guide.md": `# Spec Dev Quick Reference
6978
+
6979
+ ## EARS Notation Patterns
6980
+
6981
+ EARS (Easy Approach to Requirements Syntax) provides five sentence templates:
6982
+
6983
+ | Pattern | Template | Example |
6984
+ |---------|----------|---------|
6985
+ | **Ubiquitous** | The system shall \`<action>\`. | The system shall validate all inputs. |
6986
+ | **Event-driven** | When \`<trigger>\`, the system shall \`<action>\`. | When the user submits the form, the system shall validate all fields. |
6987
+ | **State-driven** | While \`<state>\`, the system shall \`<action>\`. | While the system is in maintenance mode, the system shall reject new connections. |
6988
+ | **Unwanted behavior** | If \`<condition>\`, then the system shall \`<action>\`. | If the database connection fails, then the system shall retry with exponential backoff. |
6989
+ | **Optional** | Where \`<feature>\`, the system shall \`<action>\`. | Where SSO is enabled, the system shall redirect to the identity provider. |
6990
+
6991
+ **Combined ordering**: Where > While > When > If/then > shall
6992
+
6993
+ ### Quality Checks for Each Requirement
6994
+ - [ ] Uses one of the five EARS patterns
6995
+ - [ ] No vague adjectives (fast, efficient, user-friendly, easy)
6996
+ - [ ] Quantities specified where applicable (timeouts, limits, thresholds)
6997
+ - [ ] Edge cases addressed (empty input, max values, concurrent access)
6998
+ - [ ] Testable \u2014 can write a pass/fail test against it
6999
+ - [ ] Single responsibility \u2014 one requirement per sentence
7000
+
7001
+ ---
7002
+
7003
+ ## Mermaid Diagram Selection Guide
7004
+
7005
+ | Diagram Type | Use When | Syntax |
7006
+ |-------------|----------|--------|
7007
+ | \`mindmap\` | Exploring problem domain, brainstorming | Phase 1 (Explore) |
7008
+ | \`sequenceDiagram\` | Showing interactions between components | Phase 2 (Understand) |
7009
+ | \`stateDiagram-v2\` | Modeling lifecycle, state transitions | Phase 2 (Understand) |
7010
+ | \`flowchart\` | Showing decision logic, data flow | Phase 3 (Specify) |
7011
+ | \`erDiagram\` | Defining data models, relationships | Phase 3 (Specify) |
7012
+ | \`C4Context\` | System-level architecture boundaries | Phase 3 (Specify) |
7013
+
7014
+ ### When to Use Diagrams
7015
+ - **Always** use a mindmap in Explore to surface hidden assumptions
7016
+ - **Use sequence diagrams** when 2+ components interact
7017
+ - **Use state diagrams** when an entity has a lifecycle
7018
+ - **Skip diagrams** only for truly trivial features (<1h of work)
7019
+
7020
+ ---
7021
+
7022
+ ## NL Ambiguity Detection Checklist
7023
+
7024
+ Watch for these ambiguity patterns in requirements:
7025
+
7026
+ | Pattern | Example | Fix |
7027
+ |---------|---------|-----|
7028
+ | **Vague adjectives** | "fast response" | Specify: "response within 200ms" |
7029
+ | **Unclear pronouns** | "it should update" | Name the subject: "the cache should update" |
7030
+ | **Passive voice** | "data is validated" | Active: "the API validates data" |
7031
+ | **Compound requirements** | "shall validate and log and notify" | Split into 3 separate requirements |
7032
+ | **Unbounded lists** | "supports CSV, JSON, etc." | Enumerate all formats explicitly |
7033
+ | **Missing quantities** | "handles large files" | Specify: "handles files up to 500MB" |
7034
+ | **Implicit assumptions** | "users can access" | Specify: "authenticated users with role X can access" |
7035
+ | **Temporal ambiguity** | "after processing" | Specify: "within 5s of processing completion" |
7036
+
7037
+ ---
7038
+
7039
+ ## Trade-off Documentation Framework
7040
+
7041
+ When requirements conflict, document the trade-off:
7042
+
7043
+ ### Template
7044
+ \`\`\`
7045
+ ### Trade-off: [Short Title]
7046
+
7047
+ **Tension**: [Requirement A] conflicts with [Requirement B].
7048
+
7049
+ **Options**:
7050
+ 1. [Option 1]: [Description]. Pro: [benefit]. Con: [cost].
7051
+ 2. [Option 2]: [Description]. Pro: [benefit]. Con: [cost].
7052
+
7053
+ **Decision**: [Chosen option] because [rationale].
7054
+
7055
+ **Consequence**: [What this means for implementation].
7056
+ \`\`\`
7057
+
7058
+ ### Common Trade-off Dimensions
7059
+ - **Performance vs. Safety**: Validation adds latency
7060
+ - **Flexibility vs. Simplicity**: Configuration adds complexity
7061
+ - **Consistency vs. Availability**: Strict consistency limits throughput
7062
+ - **Security vs. Usability**: Auth steps add friction
7063
+ - **Completeness vs. Time-to-market**: More features delay delivery
7064
+
7065
+ ### Decision Criteria
7066
+ When evaluating trade-offs, consider:
7067
+ 1. **Reversibility**: Can we change this later? Prefer reversible decisions.
7068
+ 2. **Blast radius**: How many components does this affect?
7069
+ 3. **Evidence**: What data supports each option?
7070
+ 4. **Alignment**: Which option best serves the stated goal?
7071
+ `
7072
+ };
6884
7073
 
6885
7074
  // src/setup/gemini.ts
6886
7075
  var HOOKS = {
@@ -6942,6 +7131,9 @@ function parseDescription(content, fallback) {
6942
7131
  const raw = content.match(/description:\s*(.*)/)?.[1] ?? fallback;
6943
7132
  return raw.replace(/\\/g, "\\\\").replace(/"/g, '\\"');
6944
7133
  }
7134
+ function stripFrontmatter(content) {
7135
+ return content.replace(/^---\r?\n[\s\S]*?\r?\n---\r?\n*/, "");
7136
+ }
6945
7137
  async function writeSettings(geminiDir) {
6946
7138
  const settingsPath = join(geminiDir, "settings.json");
6947
7139
  let settings = SETTINGS_JSON;
@@ -6979,24 +7171,26 @@ async function writeSkills(geminiDir) {
6979
7171
  const skillDir = join(geminiDir, "skills", `compound-${phase}`);
6980
7172
  await mkdir(skillDir, { recursive: true });
6981
7173
  const description = parseDescription(content, `Compound ${phase} skill`);
7174
+ const body = stripFrontmatter(content);
6982
7175
  await writeFile(join(skillDir, "SKILL.md"), `---
6983
7176
  name: compound-${phase}
6984
7177
  description: ${description}
6985
7178
  ---
6986
7179
 
6987
- ${content}
7180
+ ${body}
6988
7181
  `, "utf8");
6989
7182
  }
6990
7183
  for (const [name, content] of Object.entries(AGENT_ROLE_SKILLS)) {
6991
7184
  const skillDir = join(geminiDir, "skills", `compound-agent-${name}`);
6992
7185
  await mkdir(skillDir, { recursive: true });
6993
7186
  const description = parseDescription(content, `Compound agent ${name} skill`);
7187
+ const body = stripFrontmatter(content);
6994
7188
  await writeFile(join(skillDir, "SKILL.md"), `---
6995
7189
  name: compound-agent-${name}
6996
7190
  description: ${description}
6997
7191
  ---
6998
7192
 
6999
- ${content}
7193
+ ${body}
7000
7194
  `, "utf8");
7001
7195
  }
7002
7196
  }
@@ -7207,10 +7401,10 @@ function processToolSuccess(stateDir) {
7207
7401
  var STATE_DIR = ".claude";
7208
7402
  var STATE_FILE = ".ca-phase-state.json";
7209
7403
  var PHASE_STATE_MAX_AGE_MS = 72 * 60 * 60 * 1e3;
7210
- var PHASES = ["brainstorm", "plan", "work", "review", "compound"];
7404
+ var PHASES = ["spec-dev", "plan", "work", "review", "compound"];
7211
7405
  var GATES = ["post-plan", "gate-3", "gate-4", "final"];
7212
7406
  var PHASE_INDEX = {
7213
- brainstorm: 1,
7407
+ "spec-dev": 1,
7214
7408
  plan: 2,
7215
7409
  work: 3,
7216
7410
  review: 4,
@@ -7232,10 +7426,17 @@ function isIsoDate(value) {
7232
7426
  function isStringArray(value) {
7233
7427
  return Array.isArray(value) && value.every((item) => typeof item === "string");
7234
7428
  }
7429
+ function migrateLegacyFields(raw) {
7430
+ if (raw.cookit_active === void 0 && typeof raw.lfg_active === "boolean") {
7431
+ raw.cookit_active = raw.lfg_active;
7432
+ delete raw.lfg_active;
7433
+ }
7434
+ }
7235
7435
  function validatePhaseState(raw) {
7236
7436
  if (typeof raw !== "object" || raw === null) return false;
7237
7437
  const state = raw;
7238
- return typeof state.lfg_active === "boolean" && typeof state.epic_id === "string" && isPhaseName(state.current_phase) && typeof state.phase_index === "number" && state.phase_index >= 1 && state.phase_index <= 5 && isStringArray(state.skills_read) && Array.isArray(state.gates_passed) && state.gates_passed.every((gate) => isGateName(gate)) && isIsoDate(state.started_at);
7438
+ migrateLegacyFields(state);
7439
+ return typeof state.cookit_active === "boolean" && typeof state.epic_id === "string" && isPhaseName(state.current_phase) && typeof state.phase_index === "number" && state.phase_index >= 1 && state.phase_index <= 5 && isStringArray(state.skills_read) && Array.isArray(state.gates_passed) && state.gates_passed.every((gate) => isGateName(gate)) && isIsoDate(state.started_at);
7239
7440
  }
7240
7441
  function expectedGateForPhase(phaseIndex) {
7241
7442
  if (phaseIndex === 2) return "post-plan";
@@ -7248,10 +7449,10 @@ function initPhaseState(repoRoot, epicId) {
7248
7449
  const dir = join(repoRoot, STATE_DIR);
7249
7450
  mkdirSync(dir, { recursive: true });
7250
7451
  const state = {
7251
- lfg_active: true,
7452
+ cookit_active: true,
7252
7453
  epic_id: epicId,
7253
- current_phase: "brainstorm",
7254
- phase_index: PHASE_INDEX.brainstorm,
7454
+ current_phase: "spec-dev",
7455
+ phase_index: PHASE_INDEX["spec-dev"],
7255
7456
  skills_read: [],
7256
7457
  gates_passed: [],
7257
7458
  started_at: (/* @__PURE__ */ new Date()).toISOString()
@@ -7314,10 +7515,10 @@ function recordGatePassed(repoRoot, gate) {
7314
7515
  }
7315
7516
  function printStatusHuman(state) {
7316
7517
  if (state === null) {
7317
- console.log("No active LFG session.");
7518
+ console.log("No active cook-it session.");
7318
7519
  return;
7319
7520
  }
7320
- console.log("Active LFG Session");
7521
+ console.log("Active cook-it Session");
7321
7522
  console.log(` Epic: ${state.epic_id}`);
7322
7523
  console.log(` Phase: ${state.current_phase} (${state.phase_index}/5)`);
7323
7524
  console.log(` Skills read: ${state.skills_read.length === 0 ? "(none)" : state.skills_read.join(", ")}`);
@@ -7336,7 +7537,7 @@ function registerPhaseSubcommands(phaseCheck, getDryRun, repoRoot) {
7336
7537
  return;
7337
7538
  }
7338
7539
  initPhaseState(repoRoot(), epicId);
7339
- console.log(`Phase state initialized for ${epicId}. Current phase: brainstorm (1/5).`);
7540
+ console.log(`Phase state initialized for ${epicId}. Current phase: spec-dev (1/5).`);
7340
7541
  });
7341
7542
  phaseCheck.command("start <phase>").description("Start or resume a phase").action((phase) => {
7342
7543
  if (!isPhaseName(phase)) {
@@ -7381,7 +7582,7 @@ function registerPhaseSubcommands(phaseCheck, getDryRun, repoRoot) {
7381
7582
  phaseCheck.command("status").description("Show current phase state").option("--json", "Output raw JSON").action((options) => {
7382
7583
  const state = getPhaseState(repoRoot());
7383
7584
  if (options.json) {
7384
- console.log(JSON.stringify(state ?? { lfg_active: false }));
7585
+ console.log(JSON.stringify(state ?? { cookit_active: false }));
7385
7586
  return;
7386
7587
  }
7387
7588
  printStatusHuman(state);
@@ -7396,7 +7597,7 @@ function registerPhaseSubcommands(phaseCheck, getDryRun, repoRoot) {
7396
7597
  });
7397
7598
  }
7398
7599
  function registerPhaseCheckCommand(program2) {
7399
- const phaseCheck = program2.command("phase-check").description("Manage LFG phase state").option("--dry-run", "Show what would be done without making changes");
7600
+ const phaseCheck = program2.command("phase-check").description("Manage cook-it phase state").option("--dry-run", "Show what would be done without making changes");
7400
7601
  const getDryRun = () => phaseCheck.opts().dryRun ?? false;
7401
7602
  const repoRoot = () => getRepoRoot();
7402
7603
  registerPhaseSubcommands(phaseCheck, getDryRun, repoRoot);
@@ -7411,14 +7612,14 @@ function processPhaseGuard(repoRoot, toolName, _toolInput) {
7411
7612
  try {
7412
7613
  if (toolName !== "Edit" && toolName !== "Write") return {};
7413
7614
  const state = getPhaseState(repoRoot);
7414
- if (state === null || !state.lfg_active) return {};
7615
+ if (state === null || !state.cookit_active) return {};
7415
7616
  const expectedSkillPath = `.claude/skills/compound/${state.current_phase}/SKILL.md`;
7416
7617
  const skillRead = state.skills_read.includes(expectedSkillPath);
7417
7618
  if (!skillRead) {
7418
7619
  return {
7419
7620
  hookSpecificOutput: {
7420
7621
  hookEventName: "PreToolUse",
7421
- additionalContext: `PHASE GUARD WARNING: You are in LFG phase ${state.phase_index}/5 (${state.current_phase}) but have NOT read the skill file yet. Read ${expectedSkillPath} before continuing.`
7622
+ additionalContext: `PHASE GUARD WARNING: You are in cook-it phase ${state.phase_index}/5 (${state.current_phase}) but have NOT read the skill file yet. Read ${expectedSkillPath} before continuing.`
7422
7623
  }
7423
7624
  };
7424
7625
  }
@@ -7443,7 +7644,7 @@ function processReadTracker(repoRoot, toolName, toolInput) {
7443
7644
  try {
7444
7645
  if (toolName !== "Read") return {};
7445
7646
  const state = getPhaseState(repoRoot);
7446
- if (state === null || !state.lfg_active) return {};
7647
+ if (state === null || !state.cookit_active) return {};
7447
7648
  const filePath = typeof toolInput.file_path === "string" ? toolInput.file_path : null;
7448
7649
  if (filePath === null) return {};
7449
7650
  const canonicalPath = toCanonicalSkillPath(filePath);
@@ -7471,7 +7672,7 @@ function processStopAudit(repoRoot, stopHookActive = false) {
7471
7672
  try {
7472
7673
  if (stopHookActive) return {};
7473
7674
  const state = getPhaseState(repoRoot);
7474
- if (state === null || !state.lfg_active) return {};
7675
+ if (state === null || !state.cookit_active) return {};
7475
7676
  const expectedGate = expectedGateForPhase(state.phase_index);
7476
7677
  if (expectedGate === null) return {};
7477
7678
  if (state.gates_passed.includes(expectedGate)) return {};
@@ -7486,6 +7687,12 @@ function processStopAudit(repoRoot, stopHookActive = false) {
7486
7687
  }
7487
7688
 
7488
7689
  // src/setup/hooks.ts
7690
+ function logHookError(hookName, err) {
7691
+ if (process.env.CA_DEBUG) {
7692
+ const msg = err instanceof Error ? err.message : String(err);
7693
+ console.error(`[CA_DEBUG] Hook ${hookName} error: ${msg}`);
7694
+ }
7695
+ }
7489
7696
  var HOOK_FILE_MODE = 493;
7490
7697
  function hasCompoundAgentHook(content) {
7491
7698
  return content.includes(HOOK_MARKER);
@@ -7621,7 +7828,8 @@ async function runUserPromptHook() {
7621
7828
  }
7622
7829
  const result = processUserPrompt(data.prompt);
7623
7830
  console.log(JSON.stringify(result));
7624
- } catch {
7831
+ } catch (err) {
7832
+ logHookError("user-prompt", err);
7625
7833
  console.log(JSON.stringify({}));
7626
7834
  }
7627
7835
  }
@@ -7636,7 +7844,8 @@ async function runPostToolFailureHook() {
7636
7844
  const stateDir = join(getRepoRoot(), ".claude");
7637
7845
  const result = processToolFailure(data.tool_name, data.tool_input ?? {}, stateDir);
7638
7846
  console.log(JSON.stringify(result));
7639
- } catch {
7847
+ } catch (err) {
7848
+ logHookError("post-tool-failure", err);
7640
7849
  console.log(JSON.stringify({}));
7641
7850
  }
7642
7851
  }
@@ -7646,7 +7855,8 @@ async function runPostToolSuccessHook() {
7646
7855
  const stateDir = join(getRepoRoot(), ".claude");
7647
7856
  processToolSuccess(stateDir);
7648
7857
  console.log(JSON.stringify({}));
7649
- } catch {
7858
+ } catch (err) {
7859
+ logHookError("post-tool-success", err);
7650
7860
  console.log(JSON.stringify({}));
7651
7861
  }
7652
7862
  }
@@ -7659,7 +7869,8 @@ async function runToolHook(processor) {
7659
7869
  return;
7660
7870
  }
7661
7871
  console.log(JSON.stringify(processor(getRepoRoot(), data.tool_name, data.tool_input ?? {})));
7662
- } catch {
7872
+ } catch (err) {
7873
+ logHookError("tool-hook", err);
7663
7874
  console.log(JSON.stringify({}));
7664
7875
  }
7665
7876
  }
@@ -7668,7 +7879,8 @@ async function runStopAuditHook() {
7668
7879
  const input = await readStdin();
7669
7880
  const data = JSON.parse(input);
7670
7881
  console.log(JSON.stringify(processStopAudit(getRepoRoot(), data.stop_hook_active ?? false)));
7671
- } catch {
7882
+ } catch (err) {
7883
+ logHookError("stop-audit", err);
7672
7884
  console.log(JSON.stringify({}));
7673
7885
  }
7674
7886
  }
@@ -7796,6 +8008,14 @@ async function installPhaseSkills(repoRoot) {
7796
8008
  created = true;
7797
8009
  }
7798
8010
  }
8011
+ for (const [relPath, content] of Object.entries(PHASE_SKILL_REFERENCES)) {
8012
+ const filePath = join(repoRoot, ".claude", "skills", "compound", relPath);
8013
+ await mkdir(dirname(filePath), { recursive: true });
8014
+ if (!existsSync(filePath)) {
8015
+ await writeFile(filePath, content, "utf-8");
8016
+ created = true;
8017
+ }
8018
+ }
7799
8019
  return created;
7800
8020
  }
7801
8021
  async function installAgentRoleSkills(repoRoot) {
@@ -8054,7 +8274,7 @@ async function runUninstall(repoRoot, dryRun) {
8054
8274
  return actions;
8055
8275
  }
8056
8276
  var GENERATED_HEADER = "<!-- generated by compound-agent -->\n";
8057
- var DEPRECATED_COMMANDS = ["search.md", "list.md", "show.md", "stats.md", "wrong.md", "learn.md"];
8277
+ var DEPRECATED_COMMANDS = ["search.md", "list.md", "show.md", "stats.md", "wrong.md", "learn.md", "brainstorm.md", "lfg.md"];
8058
8278
  function detectExistingInstall(repoRoot) {
8059
8279
  return existsSync(join(repoRoot, ".claude", "lessons", "index.jsonl"));
8060
8280
  }
@@ -8129,7 +8349,7 @@ async function runUpgrade(repoRoot, dryRun = false) {
8129
8349
  const docVersionUpdated = await upgradeDocVersion(repoRoot, VERSION, dryRun);
8130
8350
  const parts = [];
8131
8351
  if (removedCommands.length > 0) {
8132
- parts.push(`Removed ${removedCommands.length} deprecated command(s)`);
8352
+ parts.push(`Removed ${removedCommands.length} deprecated command(s): ${removedCommands.join(", ")}`);
8133
8353
  }
8134
8354
  if (strippedHeaders > 0) {
8135
8355
  parts.push(`Stripped headers from ${strippedHeaders} file(s)`);
@@ -8814,10 +9034,10 @@ init_storage();
8814
9034
  // src/commands/shared.ts
8815
9035
  init_utils();
8816
9036
  var out = {
8817
- success: (msg) => console.log(chalk4.green("[ok]"), msg),
8818
- error: (msg) => console.error(chalk4.red("[error]"), msg),
8819
- info: (msg) => console.log(chalk4.blue("[info]"), msg),
8820
- warn: (msg) => console.log(chalk4.yellow("[warn]"), msg)
9037
+ success: (msg) => console.log(chalk5.green("[ok]"), msg),
9038
+ error: (msg) => console.error(chalk5.red("[error]"), msg),
9039
+ info: (msg) => console.log(chalk5.blue("[info]"), msg),
9040
+ warn: (msg) => console.log(chalk5.yellow("[warn]"), msg)
8821
9041
  };
8822
9042
  function getGlobalOpts(cmd) {
8823
9043
  const opts = cmd.optsWithGlobals();
@@ -9470,15 +9690,15 @@ function formatLessonForPrime(lesson) {
9470
9690
  return `- **${lesson.insight}**${tags}
9471
9691
  Learned: ${date} via ${source}`;
9472
9692
  }
9473
- function formatActiveLfgSection(repoRoot) {
9693
+ function formatActiveCookitSection(repoRoot) {
9474
9694
  const state = getPhaseState(repoRoot);
9475
- if (state === null || !state.lfg_active) return null;
9695
+ if (state === null || !state.cookit_active) return null;
9476
9696
  const skillsRead = state.skills_read.length === 0 ? "(none)" : state.skills_read.join(", ");
9477
9697
  const gatesPassed = state.gates_passed.length === 0 ? "(none)" : state.gates_passed.join(", ");
9478
9698
  return `
9479
9699
  ---
9480
9700
 
9481
- # ACTIVE LFG SESSION
9701
+ # ACTIVE COOK-IT SESSION
9482
9702
 
9483
9703
  Epic: ${state.epic_id}
9484
9704
  Phase: ${state.current_phase} (${state.phase_index}/5)
@@ -9510,9 +9730,9 @@ Critical lessons from past corrections:
9510
9730
  ${formattedLessons}
9511
9731
  `;
9512
9732
  }
9513
- const lfgSection = formatActiveLfgSection(root);
9514
- if (lfgSection !== null) {
9515
- output += lfgSection;
9733
+ const cookitSection = formatActiveCookitSection(root);
9734
+ if (cookitSection !== null) {
9735
+ output += cookitSection;
9516
9736
  }
9517
9737
  return output;
9518
9738
  }
@@ -9553,13 +9773,13 @@ function registerAuditCommands(program2) {
9553
9773
  const line = formatFinding(finding);
9554
9774
  switch (finding.severity) {
9555
9775
  case "error":
9556
- console.log(chalk4.red(line));
9776
+ console.log(chalk5.red(line));
9557
9777
  break;
9558
9778
  case "warning":
9559
- console.log(chalk4.yellow(line));
9779
+ console.log(chalk5.yellow(line));
9560
9780
  break;
9561
9781
  default:
9562
- console.log(chalk4.blue(line));
9782
+ console.log(chalk5.blue(line));
9563
9783
  break;
9564
9784
  }
9565
9785
  }
@@ -9701,13 +9921,13 @@ function registerRulesCommands(program2) {
9701
9921
  const line = formatViolation(result.rule, violation);
9702
9922
  switch (result.rule.severity) {
9703
9923
  case "error":
9704
- console.log(chalk4.red(line));
9924
+ console.log(chalk5.red(line));
9705
9925
  break;
9706
9926
  case "warning":
9707
- console.log(chalk4.yellow(line));
9927
+ console.log(chalk5.yellow(line));
9708
9928
  break;
9709
9929
  default:
9710
- console.log(chalk4.blue(line));
9930
+ console.log(chalk5.blue(line));
9711
9931
  break;
9712
9932
  }
9713
9933
  }
@@ -9873,7 +10093,7 @@ async function runVerifyGates(epicId, options = {}) {
9873
10093
  const allPassed = checks.every((check) => check.status === "pass");
9874
10094
  if (allPassed) {
9875
10095
  const state = getPhaseState(repoRoot);
9876
- if (state !== null && state.lfg_active && state.gates_passed.includes("final")) {
10096
+ if (state !== null && state.cookit_active && state.gates_passed.includes("final")) {
9877
10097
  cleanPhaseState(repoRoot);
9878
10098
  }
9879
10099
  }
@@ -9914,7 +10134,39 @@ function registerVerifyGatesCommand(program2) {
9914
10134
  }
9915
10135
 
9916
10136
  // src/changelog-data.ts
9917
- var CHANGELOG_RECENT = `## [1.6.0] - 2026-03-02
10137
+ var CHANGELOG_RECENT = `## [1.6.2] - 2026-03-05
10138
+
10139
+ ### Fixed
10140
+
10141
+ - **Cook-it session banner**: The cook-it skill now instructs Claude to print the "Claude the Cooker" ASCII chef banner at the very start of every cooking session.
10142
+
10143
+ ## [1.6.1] - 2026-03-05
10144
+
10145
+ ### Changed
10146
+
10147
+ - **Renamed brainstorm phase to spec-dev**: The \`/compound:brainstorm\` slash command is now \`/compound:spec-dev\`. The phase focuses on structured specification development using EARS notation, Mermaid diagrams, and Socratic dialogue rather than open-ended brainstorming. Old \`brainstorm.md\` command files are auto-cleaned during upgrade.
10148
+ - **Integration test stability**: Reduced integration test parallelism (\`maxForks: 1\`) and increased timeouts to 60s to eliminate non-deterministic ETIMEDOUT failures under load.
10149
+
10150
+ ### Added
10151
+
10152
+ - **Spec reference file**: \`.claude/skills/compound/spec-dev/references/spec-guide.md\` provides quick-reference material for EARS patterns, Mermaid diagram selection, NL ambiguity detection, and trade-off documentation frameworks. Installed automatically during \`ca setup\`.
10153
+ - **Hook error visibility**: Hook runners now log errors to stderr when \`CA_DEBUG\` environment variable is set, instead of silently swallowing all failures.
10154
+ - **check-plan stdin safety**: \`ca check-plan\` now enforces a 30-second timeout and 1MB size limit when reading from stdin, preventing hangs in CI/CD environments.
10155
+ - **Embed lock expiry**: Embedding lock files now expire after 1 hour as a safety valve against zombie processes holding locks indefinitely.
10156
+ - **Phase-state backward compatibility**: Legacy \`lfg_active\` field in phase state files is automatically migrated to \`cookit_active\` on read.
10157
+ - **clean-lessons scope messaging**: \`ca clean-lessons\` now reports when non-lesson items are excluded from analysis.
10158
+
10159
+ ### Fixed
10160
+
10161
+ - **Missing spec-guide.md**: The reference file was declared in skill templates and CHANGELOG but never generated during setup. Now installed alongside phase skills.
10162
+ - **Upgrade cleanup for lfg.md**: Added \`lfg.md\` to deprecated commands list so \`ca setup --update\` removes stale lfg command files from upgraded repos.
10163
+ - **Docs template terminology**: WORKFLOW.md template now uses "Spec Dev" instead of "Brainstorm" for Phase 1.
10164
+ - **Test file naming**: Renamed \`brainstorm-phase.test.ts\` to \`spec-dev-phase.test.ts\` to match the refactored phase name.
10165
+ - **Library bundle cleanup**: Moved CLI-only re-exports (\`registerWatchCommand\`, \`registerLoopCommands\`) out of the library barrel to eliminate unused import warnings in \`dist/index.js\`.
10166
+ - **plan.test.ts embedding guard**: Added \`skipIf(skipEmbedding)\` to unguarded test that calls \`retrieveForPlan\` without mocking.
10167
+ - **Agent template test count**: Updated setup.test.ts to expect 9 agent templates (was 8), matching the actual AGENT_TEMPLATES count after \`lessons-reviewer.md\` was added.
10168
+
10169
+ ## [1.6.0] - 2026-03-02
9918
10170
 
9919
10171
  ### Added
9920
10172
 
@@ -9971,62 +10223,7 @@ var CHANGELOG_RECENT = `## [1.6.0] - 2026-03-02
9971
10223
  - **Eliminate redundant JSONL parsing**: \`searchVector()\` and \`findSimilarLessons()\` now use \`readAllFromSqlite()\` after \`syncIfNeeded()\` instead of re-parsing the JSONL file
9972
10224
  - **Float32Array consistency**: Lesson embedding path now keeps \`Float32Array\` from node-llama-cpp instead of converting via \`Array.from()\` (4x memory savings per vector)
9973
10225
  - **Pre-warm lesson embedding cache**: \`ca init\` now pre-computes embeddings for all lessons with missing or stale cache entries, eliminating cold-start latency on first search
9974
- - **Graceful embedding fallback**: \`ca search\` falls back to keyword-only search on runtime embedding failures instead of crashing
9975
-
9976
- ## [1.5.0] - 2026-02-24
9977
-
9978
- ### Added
9979
-
9980
- - **Gemini CLI compatibility adapter**: \`ca setup gemini\` scaffolds \`.gemini/\` directory with hook scripts, TOML slash commands, and inlined skills -- bridging compound-agent to work with Google's Gemini CLI via the Adapter Pattern
9981
- - **Gemini hooks**: Maps SessionStart, BeforeAgent, BeforeTool, AfterTool to compound-agent's existing hook pipeline (\`ca prime\`, \`ca hooks run user-prompt\`, \`ca hooks run phase-guard\`, \`ca hooks run post-tool-success\`)
9982
- - **Gemini TOML commands**: Auto-generates \`.gemini/commands/compound/*.toml\` using \`@{path}\` file injection to maintain a single source of truth with Claude commands
9983
- - **Gemini skills proxying**: Inlines phase and agent role skill content into \`.gemini/skills/\` with YAML frontmatter
9984
- - **23 integration tests** for the Gemini adapter covering hooks, settings.json, TOML commands, skills, and dry-run mode
9985
-
9986
- ### Fixed
9987
-
9988
- - **Gemini hook stderr leak**: Corrected \`2>&1 > /dev/null\` (leaks stderr to stdout, corrupting JSON) to \`> /dev/null 2>&1\`
9989
- - **Gemini TOML file injection syntax**: Changed \`@path\` to \`@{path}\` (Gemini CLI requires curly braces)
9990
- - **Gemini skill file injection**: Skills now inline content instead of using \`@{path}\` which only works in TOML prompt fields, not SKILL.md
9991
- - **Gemini phase guard always allowing**: Hook now checks \`ca hooks run phase-guard\` exit code and returns structured \`{"decision": "deny"}\` on failure (exit 0, not exit 2, so Gemini parses the reason from stdout)
9992
- - **Gemini BeforeTool matcher incomplete**: Added \`create_file\` to BeforeTool and AfterTool matchers alongside \`replace\` and \`write_file\`
9993
- - **TOML description escaping**: \`parseDescription\` now escapes \`\\\` and \`"\` to prevent malformed TOML output
9994
- - **Flaky embedding test**: Added 15s timeout to \`isModelUsable\` test
9995
-
9996
- ## [1.4.4] - 2026-02-23
9997
-
9998
- ### Added
9999
-
10000
- - **Security arc with P0-P3 severity model**: Security-reviewer promoted from generic OWASP checker to mandatory core-4 reviewer with P0 (blocks merge), P1 (requires ack), P2 (should fix), P3 (nice to have) classification
10001
- - **5 on-demand security specialist skills**: \`/security-injection\`, \`/security-secrets\`, \`/security-auth\`, \`/security-data\`, \`/security-deps\` -- spawned by security-reviewer via SendMessage within the review AgentTeam for deep trace analysis
10002
- - **6 security reference docs** (\`docs/research/security/\`): overview, injection-patterns, secrets-checklist, auth-patterns, data-exposure, dependency-security -- distilled from the secure-coding-failure PhD survey into actionable agent guides
10003
- - **Native addon build injection** (\`scripts/postinstall.mjs\`): Postinstall script auto-patches consumer \`package.json\` with \`pnpm.onlyBuiltDependencies\` config for \`better-sqlite3\` and \`node-llama-cpp\`. Handles indent preservation, BOM stripping, atomic writes
10004
- - **CLI preflight diagnostics** (\`src/cli-preflight.ts\`): Catches native module load failures before commands run, prints PM-specific fix instructions (pnpm: 3 options; npm/yarn: rebuild + build tool hints)
10005
- - **\`ca doctor\` pnpm check**: Verifies \`onlyBuiltDependencies\` is configured correctly for pnpm projects, recognizes wildcard \`["*"]\` as valid
10006
- - **Escalation-wiring tests**: 7 new tests verifying security-reviewer mentions all 5 specialists, each specialist declares "Spawned by security-reviewer", P0 documented as merge-blocking, each specialist has \`npx ca knowledge\` and references correct research doc
10007
- - **better-sqlite3 injection patterns**: Added project-specific \`db.exec()\` vs \`db.prepare().run()\` examples to \`injection-patterns.md\`
10008
-
10009
- ### Fixed
10010
-
10011
- - **Noisy \`node-llama-cpp\` warnings on headless Linux**: Vulkan binary fallback and \`special_eos_id\` tokenizer warnings no longer print during \`ca search\` / \`ca knowledge\` -- GPU auto-detection preserved via \`progressLogs: false\` + \`logLevel: error\`
10012
- - **Resource leak in \`isModelUsable()\`**: \`Llama\` and \`LlamaModel\` instances are now properly disposed after the preflight usability check
10013
- - **Wildcard \`onlyBuiltDependencies\`**: Doctor and postinstall now recognize \`["*"]\` as fully configured (no false positive)
10014
- - **Infinity loop marker injection**: \`--model\` validated against shell metacharacters; grep patterns anchored (\`^EPIC_COMPLETE\`, \`^EPIC_FAILED\`) to prevent false-positive matches from prompt echo in logs
10015
- - **Template-to-deployed SKILL.md drift**: Backported all deployed specialist improvements (output fields, collaboration notes, \`npx ca knowledge\` lines) into source templates so \`ca setup --update\` no longer regresses
10016
- - **SSRF citations**: 3 OWASP references in \`secure-coding-failure.md\` corrected from A01 (Broken Access Control) to A10 (SSRF)
10017
- - **Stale verification docs**: Exit criteria updated from 6 to 8 categories (added Security Clear + Workflow Gates); closed-loop review process updated with security check in Stage 4 flowchart
10018
- - **Broken dual-path reference** in \`subagent-pipeline.md\`: Now documents both \`docs/research/security/\` (source repo) and \`docs/compound/research/security/\` (consumer repos)
10019
- - **Incomplete OWASP mapping** in \`overview.md\`: Completed from 5/10 to 10/10 (added A04, A05, A07, A08, A09)
10020
-
10021
- ### Changed
10022
-
10023
- - **\`getLlama()\` initialization hardened**: Both call sites (\`nomic.ts\`, \`model.ts\`) now pass \`build: 'never'\` to prevent silent compilation from source on exotic platforms; set \`NODE_LLAMA_CPP_DEBUG=true\` to re-enable verbose output
10024
- - **Review skill wired to security arc**: P0 added to severity overview, security specialist skills listed as on-demand members, quality criteria include P0/P1 checks
10025
- - **WORKFLOW template**: Severity classification updated from P1/P2/P3 to P0-P3 with "Fix all P0/P1 findings"
10026
- - **Zero-findings instruction**: All 6 security templates (reviewer + 5 specialists) now include "return CLEAR" instruction when no findings detected
10027
- - **Scope-limiting instruction**: \`security-injection\` prioritizes files with interpreter sinks over pure data/config for large diffs (500+ lines)
10028
- - **Non-web context**: \`security-auth\` includes step for CLI/API-only projects without web routes
10029
- - **Graceful audit skip**: \`security-deps\` handles missing \`pnpm audit\` / \`pip-audit\` gracefully instead of failing`;
10226
+ - **Graceful embedding fallback**: \`ca search\` falls back to keyword-only search on runtime embedding failures instead of crashing`;
10030
10227
 
10031
10228
  // src/commands/about.ts
10032
10229
  function registerAboutCommand(program2) {
@@ -10214,6 +10411,9 @@ async function cleanLessonsAction() {
10214
10411
  await syncIfNeeded(repoRoot);
10215
10412
  const { items } = await readMemoryItems(repoRoot);
10216
10413
  const activeItems = items.filter((item) => !item.invalidatedAt && item.type === "lesson");
10414
+ if (items.length > activeItems.length) {
10415
+ console.log(`Analyzing ${activeItems.length} lesson-type items only (${items.length - activeItems.length} non-lesson items excluded).`);
10416
+ }
10217
10417
  const pairs = await findDuplicatePairs(repoRoot, activeItems);
10218
10418
  if (pairs.length === 0) {
10219
10419
  console.log("No similar lessons found. Your lesson database is clean.");
@@ -10508,115 +10708,430 @@ function registerCaptureCommands(program2) {
10508
10708
  await handleCapture(this, options);
10509
10709
  });
10510
10710
  }
10511
- var LOOP_EPIC_ID_PATTERN = /^[a-zA-Z0-9_.-]+$/;
10512
- var MODEL_PATTERN = /^[a-zA-Z0-9_.:/-]+$/;
10513
- function buildScriptHeader(timestamp, maxRetries, model, epicIds) {
10514
- return `#!/usr/bin/env bash
10515
- # Infinity Loop - Generated by: ca loop
10516
- # Date: ${timestamp}
10517
- # Autonomously processes beads epics via Claude Code sessions.
10518
- #
10519
- # Usage:
10520
- # ./infinity-loop.sh
10521
- # LOOP_DRY_RUN=1 ./infinity-loop.sh # Preview without executing
10522
-
10523
- set -euo pipefail
10524
-
10525
- # Config
10526
- MAX_RETRIES=${maxRetries}
10527
- MODEL="${model}"
10528
- EPIC_IDS="${epicIds}"
10529
- LOG_DIR="agent_logs"
10530
-
10531
- # Helpers
10532
- timestamp() { date '+%Y-%m-%d_%H-%M-%S'; }
10533
- log() { echo "[$(timestamp)] $*"; }
10534
- die() { log "FATAL: $*"; exit 1; }
10535
-
10536
- command -v claude >/dev/null || die "claude CLI required"
10537
- command -v bd >/dev/null || die "bd (beads) CLI required"
10538
-
10539
- # Detect JSON parser: prefer jq, fall back to python3
10540
- HAS_JQ=false
10541
- command -v jq >/dev/null 2>&1 && HAS_JQ=true
10542
- if [ "$HAS_JQ" = false ]; then
10543
- command -v python3 >/dev/null 2>&1 || die "jq or python3 required for JSON parsing"
10544
- fi
10545
-
10546
- # parse_json() - extract a value from JSON stdin
10547
- # Uses jq (primary) with python3 fallback
10548
- # Auto-unwraps single-element arrays (bd show --json returns [...])
10549
- # Usage: echo '[{"status":"open"}]' | parse_json '.status'
10550
- parse_json() {
10551
- local filter="$1"
10552
- if [ "$HAS_JQ" = true ]; then
10553
- jq -r "if type == \\"array\\" then .[0] else . end | $filter"
10554
- else
10555
- python3 -c "
10556
- import sys, json
10557
- data = json.load(sys.stdin)
10558
- if isinstance(data, list):
10559
- data = data[0] if data else {}
10560
- f = '$filter'.strip('.')
10561
- parts = [p for p in f.split('.') if p]
10562
- v = data
10563
- try:
10564
- for p in parts:
10565
- v = v[p]
10566
- except (KeyError, IndexError, TypeError):
10567
- v = None
10568
- print('' if v is None else v)
10569
- "
10570
- fi
10711
+ init_storage();
10712
+ init_search2();
10713
+ function parseLimitOrNull(rawLimit, optionName, commandName) {
10714
+ try {
10715
+ return parseLimit(rawLimit, optionName);
10716
+ } catch (err) {
10717
+ const message = err instanceof Error ? err.message : `Invalid ${optionName}`;
10718
+ console.error(formatError(commandName, "INVALID_LIMIT", message, `Use --${optionName} with a positive integer`));
10719
+ return null;
10720
+ }
10571
10721
  }
10572
-
10573
- mkdir -p "$LOG_DIR"
10574
- ` + buildEpicSelector() + buildPromptFunction();
10722
+ var MAX_STDIN_BYTES = 1048576;
10723
+ var STDIN_TIMEOUT_MS = 3e4;
10724
+ async function readPlanFromStdin() {
10725
+ const { stdin } = await import('process');
10726
+ if (!stdin.isTTY) {
10727
+ const chunks = [];
10728
+ let totalBytes = 0;
10729
+ const timeout = new Promise(
10730
+ (_, reject) => setTimeout(() => reject(new Error("stdin read timed out after 30s")), STDIN_TIMEOUT_MS)
10731
+ );
10732
+ const read = (async () => {
10733
+ for await (const chunk of stdin) {
10734
+ const buf = chunk;
10735
+ totalBytes += buf.length;
10736
+ if (totalBytes > MAX_STDIN_BYTES) {
10737
+ throw new Error(`stdin exceeds ${MAX_STDIN_BYTES} byte limit`);
10738
+ }
10739
+ chunks.push(buf);
10740
+ }
10741
+ return Buffer.concat(chunks).toString("utf-8").trim();
10742
+ })();
10743
+ try {
10744
+ return await Promise.race([read, timeout]);
10745
+ } catch (err) {
10746
+ console.error(`Warning: ${err instanceof Error ? err.message : String(err)}`);
10747
+ return void 0;
10748
+ }
10749
+ }
10750
+ return void 0;
10575
10751
  }
10576
- function buildEpicSelector() {
10577
- return `
10578
- get_next_epic() {
10579
- if [ -n "$EPIC_IDS" ]; then
10580
- # From explicit list, find first still-open epic not yet processed
10581
- for epic_id in $EPIC_IDS; do
10582
- case " $PROCESSED " in *" $epic_id "*) continue ;; esac
10583
- local status
10584
- status=$(bd show "$epic_id" --json 2>/dev/null | parse_json '.status' 2>/dev/null || echo "")
10585
- if [ "$status" = "open" ]; then
10586
- echo "$epic_id"
10587
- return 0
10588
- fi
10589
- done
10590
- return 1
10591
- else
10592
- # Dynamic: get next ready epic from dependency graph, filtering processed
10593
- local epic_id
10594
- if [ "$HAS_JQ" = true ]; then
10595
- epic_id=$(bd list --type=epic --ready --json --limit=10 2>/dev/null | jq -r '.[].id' 2>/dev/null | while read -r id; do
10596
- case " $PROCESSED " in *" $id "*) continue ;; esac
10597
- echo "$id"
10598
- break
10599
- done)
10600
- else
10601
- epic_id=$(bd list --type=epic --ready --json --limit=10 2>/dev/null | python3 -c "
10602
- import sys, json
10603
- processed = set('$PROCESSED'.split())
10604
- items = json.load(sys.stdin)
10605
- for item in items:
10606
- if item['id'] not in processed:
10607
- print(item['id'])
10608
- break" 2>/dev/null || echo "")
10609
- fi
10610
- if [ -z "$epic_id" ]; then
10611
- return 1
10612
- fi
10613
- echo "$epic_id"
10614
- return 0
10615
- fi
10752
+ function outputCheckPlanJson(lessons) {
10753
+ const jsonOutput = {
10754
+ lessons: lessons.map((l) => ({
10755
+ id: l.lesson.id,
10756
+ insight: l.lesson.insight,
10757
+ rankScore: l.finalScore ?? l.score,
10758
+ // Use finalScore if available, fallback to raw score
10759
+ source: l.lesson.source
10760
+ })),
10761
+ count: lessons.length
10762
+ };
10763
+ console.log(JSON.stringify(jsonOutput));
10616
10764
  }
10617
- `;
10765
+ function outputCheckPlanHuman(lessons, quiet) {
10766
+ console.log("## Lessons Check\n");
10767
+ console.log("Relevant to your plan:\n");
10768
+ lessons.forEach((item, i) => {
10769
+ const num = i + 1;
10770
+ console.log(`${num}. ${chalk5.bold(`[${item.lesson.id}]`)} ${item.lesson.insight}`);
10771
+ console.log(` - Source: ${item.lesson.source}`);
10772
+ console.log();
10773
+ });
10774
+ if (!quiet) {
10775
+ console.log("---");
10776
+ console.log("Consider these lessons while implementing.");
10777
+ }
10618
10778
  }
10619
- function buildPromptFunction() {
10779
+ function formatSource2(source) {
10780
+ return source.replace(/_/g, " ");
10781
+ }
10782
+ function outputSessionLessonsHuman(lessons, quiet) {
10783
+ console.log("## Lessons from Past Sessions\n");
10784
+ console.log("These lessons were captured from previous corrections and should inform your work:\n");
10785
+ lessons.forEach((lesson, i) => {
10786
+ const num = i + 1;
10787
+ const date = lesson.created.slice(0, ISO_DATE_PREFIX_LENGTH);
10788
+ const tagsDisplay = lesson.tags.length > 0 ? ` (${lesson.tags.join(", ")})` : "";
10789
+ console.log(`${num}. **${lesson.insight}**${tagsDisplay}`);
10790
+ console.log(` Learned: ${date} via ${formatSource2(lesson.source)}`);
10791
+ console.log();
10792
+ });
10793
+ if (!quiet) {
10794
+ console.log("Consider these lessons when planning and implementing tasks.");
10795
+ }
10796
+ }
10797
+ async function searchAction(cmd, query, options) {
10798
+ const repoRoot = getRepoRoot();
10799
+ const limit = parseLimitOrNull(options.limit, "limit", "search");
10800
+ if (limit === null) {
10801
+ process.exitCode = 1;
10802
+ return;
10803
+ }
10804
+ const { verbose, quiet } = getGlobalOpts(cmd);
10805
+ await syncIfNeeded(repoRoot);
10806
+ let results;
10807
+ if (isModelAvailable()) {
10808
+ try {
10809
+ const candidateLimit = limit * CANDIDATE_MULTIPLIER;
10810
+ const [vectorResults, keywordResults] = await Promise.all([
10811
+ searchVector(repoRoot, query, { limit: candidateLimit }),
10812
+ searchKeywordScored(repoRoot, query, candidateLimit)
10813
+ ]);
10814
+ const merged = mergeHybridResults(vectorResults, keywordResults, { minScore: MIN_HYBRID_SCORE });
10815
+ const ranked = rankLessons(merged);
10816
+ results = ranked.slice(0, limit).map((r) => r.lesson);
10817
+ } catch {
10818
+ results = await searchKeyword(repoRoot, query, limit);
10819
+ }
10820
+ } else {
10821
+ results = await searchKeyword(repoRoot, query, limit);
10822
+ }
10823
+ if (results.length > 0) {
10824
+ incrementRetrievalCount(repoRoot, results.map((lesson) => lesson.id));
10825
+ }
10826
+ if (results.length === 0) {
10827
+ console.log('No lessons match your search. Try a different query or use "list" to see all lessons.');
10828
+ return;
10829
+ }
10830
+ if (!quiet) {
10831
+ out.info(`Found ${results.length} lesson(s):
10832
+ `);
10833
+ }
10834
+ for (const lesson of results) {
10835
+ console.log(`[${chalk5.cyan(lesson.id)}] ${lesson.insight}`);
10836
+ console.log(` Trigger: ${lesson.trigger}`);
10837
+ if (verbose && lesson.context) {
10838
+ console.log(` Context: ${lesson.context.tool} - ${lesson.context.intent}`);
10839
+ console.log(` Created: ${lesson.created}`);
10840
+ }
10841
+ if (lesson.tags.length > 0) {
10842
+ console.log(` Tags: ${lesson.tags.join(", ")}`);
10843
+ }
10844
+ console.log();
10845
+ }
10846
+ }
10847
+ async function listAction(cmd, options) {
10848
+ const repoRoot = getRepoRoot();
10849
+ const limit = parseLimitOrNull(options.limit, "limit", "list");
10850
+ if (limit === null) {
10851
+ process.exitCode = 1;
10852
+ return;
10853
+ }
10854
+ const { verbose, quiet } = getGlobalOpts(cmd);
10855
+ const { items, skippedCount } = await readMemoryItems(repoRoot);
10856
+ const filteredItems = options.invalidated ? items.filter((i) => i.invalidatedAt) : items;
10857
+ if (filteredItems.length === 0) {
10858
+ if (options.invalidated) {
10859
+ console.log("No invalidated lessons found.");
10860
+ } else {
10861
+ console.log('No lessons found. Get started with: learn "Your first lesson"');
10862
+ }
10863
+ if (skippedCount > 0) {
10864
+ out.warn(`${skippedCount} corrupted lesson(s) skipped.`);
10865
+ }
10866
+ return;
10867
+ }
10868
+ const toShow = filteredItems.slice(0, limit);
10869
+ if (!quiet) {
10870
+ const label = options.invalidated ? "invalidated lesson(s)" : "item(s)";
10871
+ out.info(`Showing ${toShow.length} of ${filteredItems.length} ${label}:
10872
+ `);
10873
+ }
10874
+ for (const item of toShow) {
10875
+ const invalidMarker = item.invalidatedAt ? chalk5.red("[INVALID] ") : "";
10876
+ console.log(`[${chalk5.cyan(item.id)}] ${invalidMarker}${item.insight}`);
10877
+ if (verbose) {
10878
+ console.log(` Type: ${item.type} | Source: ${item.source}`);
10879
+ console.log(` Created: ${item.created}`);
10880
+ if (item.context) {
10881
+ console.log(` Context: ${item.context.tool} - ${item.context.intent}`);
10882
+ }
10883
+ if (item.invalidatedAt) {
10884
+ console.log(` Invalidated: ${item.invalidatedAt}`);
10885
+ if (item.invalidationReason) {
10886
+ console.log(` Reason: ${item.invalidationReason}`);
10887
+ }
10888
+ }
10889
+ } else {
10890
+ console.log(` Type: ${item.type} | Source: ${item.source}`);
10891
+ }
10892
+ if (item.tags.length > 0) {
10893
+ console.log(` Tags: ${item.tags.join(", ")}`);
10894
+ }
10895
+ console.log();
10896
+ }
10897
+ if (skippedCount > 0) {
10898
+ out.warn(`${skippedCount} corrupted lesson(s) skipped.`);
10899
+ }
10900
+ }
10901
+ async function loadSessionAction(cmd, options) {
10902
+ const repoRoot = getRepoRoot();
10903
+ const { quiet } = getGlobalOpts(cmd);
10904
+ const lessons = await loadSessionLessons(repoRoot);
10905
+ const { lessons: allLessons } = await readLessons(repoRoot);
10906
+ const totalCount = allLessons.length;
10907
+ if (options.json) {
10908
+ console.log(JSON.stringify({ lessons, count: lessons.length, totalCount }));
10909
+ return;
10910
+ }
10911
+ if (lessons.length === 0) {
10912
+ console.log("No high-severity lessons found.");
10913
+ return;
10914
+ }
10915
+ outputSessionLessonsHuman(lessons, quiet);
10916
+ if (totalCount > LESSON_COUNT_WARNING_THRESHOLD) {
10917
+ console.log("");
10918
+ out.info(`${totalCount} lessons in index. Consider \`ca compact\` to reduce context pollution.`);
10919
+ }
10920
+ const oldLessons = lessons.filter((l) => getLessonAgeDays(l) > AGE_FLAG_THRESHOLD_DAYS);
10921
+ if (oldLessons.length > 0) {
10922
+ console.log("");
10923
+ out.warn(`${oldLessons.length} lesson(s) are over ${AGE_FLAG_THRESHOLD_DAYS} days old. Review for continued validity.`);
10924
+ }
10925
+ }
10926
+ async function checkPlanAction(cmd, options) {
10927
+ const repoRoot = getRepoRoot();
10928
+ const limit = parseLimitOrNull(options.limit, "limit", "check-plan");
10929
+ if (limit === null) {
10930
+ process.exitCode = 1;
10931
+ return;
10932
+ }
10933
+ const { quiet } = getGlobalOpts(cmd);
10934
+ const planText = options.plan ?? await readPlanFromStdin();
10935
+ if (!planText) {
10936
+ console.error(formatError("check-plan", "NO_PLAN", "No plan provided", "Use --plan <text> or pipe text to stdin"));
10937
+ process.exitCode = 1;
10938
+ return;
10939
+ }
10940
+ await syncIfNeeded(repoRoot);
10941
+ if (!isModelAvailable()) {
10942
+ if (options.json) {
10943
+ console.log(JSON.stringify({
10944
+ lessons: [],
10945
+ count: 0,
10946
+ error: "Embedding model not found",
10947
+ action: "Run: npx ca download-model"
10948
+ }));
10949
+ } else {
10950
+ console.error(formatError("check-plan", "MODEL_UNAVAILABLE", "Embedding model not found", "Run: npx ca download-model"));
10951
+ }
10952
+ process.exitCode = 1;
10953
+ return;
10954
+ }
10955
+ try {
10956
+ const result = await retrieveForPlan(repoRoot, planText, limit);
10957
+ if (options.json) {
10958
+ outputCheckPlanJson(result.lessons);
10959
+ return;
10960
+ }
10961
+ if (result.lessons.length === 0) {
10962
+ console.log("No relevant lessons found for this plan.");
10963
+ return;
10964
+ }
10965
+ outputCheckPlanHuman(result.lessons, quiet);
10966
+ } catch (err) {
10967
+ const message = err instanceof Error ? err.message : "Unknown error";
10968
+ if (options.json) {
10969
+ console.log(JSON.stringify({
10970
+ lessons: [],
10971
+ count: 0,
10972
+ error: message
10973
+ }));
10974
+ } else {
10975
+ console.error(formatError("check-plan", "PLAN_CHECK_FAILED", message, "Check model installation and try again"));
10976
+ }
10977
+ process.exitCode = 1;
10978
+ }
10979
+ }
10980
+ function registerRetrievalCommands(program2) {
10981
+ program2.command("search <query>").description("Search lessons").option("-n, --limit <number>", "Maximum results", DEFAULT_SEARCH_LIMIT).action(async function(query, options) {
10982
+ await searchAction(this, query, options);
10983
+ });
10984
+ program2.command("list").description("List all lessons").option("-n, --limit <number>", "Maximum results", DEFAULT_LIST_LIMIT).option("--invalidated", "Show only invalidated lessons").action(async function(options) {
10985
+ await listAction(this, options);
10986
+ });
10987
+ program2.command("load-session").description("Load high-severity lessons for session context").option("--json", "Output as JSON").action(async function(options) {
10988
+ await loadSessionAction(this, options);
10989
+ });
10990
+ program2.command("check-plan").description("Check plan against relevant lessons").option("--plan <text>", "Plan text to check").option("--json", "Output as JSON").option("-n, --limit <number>", "Maximum results", DEFAULT_CHECK_PLAN_LIMIT).action(async function(options) {
10991
+ await checkPlanAction(this, options);
10992
+ });
10993
+ }
10994
+
10995
+ // src/commands/index.ts
10996
+ function registerSetupCommands(program2) {
10997
+ registerInitCommand(program2);
10998
+ registerHooksCommand(program2);
10999
+ const setupCommand = program2.command("setup");
11000
+ registerSetupAllCommand(setupCommand);
11001
+ registerClaudeSubcommand(setupCommand);
11002
+ registerGeminiSubcommand(setupCommand);
11003
+ registerDownloadModelCommand(program2);
11004
+ }
11005
+ function registerManagementCommands(program2) {
11006
+ registerInvalidationCommands(program2);
11007
+ registerMaintenanceCommands(program2);
11008
+ registerIOCommands(program2);
11009
+ registerPrimeCommand(program2);
11010
+ registerCrudCommands(program2);
11011
+ registerAuditCommands(program2);
11012
+ registerDoctorCommand(program2);
11013
+ registerReviewerCommand(program2);
11014
+ registerRulesCommands(program2);
11015
+ registerTestSummaryCommand(program2);
11016
+ registerVerifyGatesCommand(program2);
11017
+ registerAboutCommand(program2);
11018
+ registerKnowledgeCommand(program2);
11019
+ registerKnowledgeIndexCommand(program2);
11020
+ registerCleanLessonsCommand(program2);
11021
+ program2.command("worktree").description("(removed) Use Claude Code native worktree support").action(() => {
11022
+ console.error("ca worktree has been removed. Use Claude Code's native EnterWorktree support instead.");
11023
+ process.exitCode = 1;
11024
+ });
11025
+ }
11026
+ var LOOP_EPIC_ID_PATTERN = /^[a-zA-Z0-9_.-]+$/;
11027
+ var MODEL_PATTERN = /^[a-zA-Z0-9_.:/-]+$/;
11028
+ function buildScriptHeader(timestamp, maxRetries, model, epicIds) {
11029
+ return `#!/usr/bin/env bash
11030
+ # Infinity Loop - Generated by: ca loop
11031
+ # Date: ${timestamp}
11032
+ # Autonomously processes beads epics via Claude Code sessions.
11033
+ #
11034
+ # Usage:
11035
+ # ./infinity-loop.sh
11036
+ # LOOP_DRY_RUN=1 ./infinity-loop.sh # Preview without executing
11037
+
11038
+ set -euo pipefail
11039
+
11040
+ # Config
11041
+ MAX_RETRIES=${maxRetries}
11042
+ MODEL="${model}"
11043
+ EPIC_IDS="${epicIds}"
11044
+ LOG_DIR="agent_logs"
11045
+
11046
+ # Helpers
11047
+ timestamp() { date '+%Y-%m-%d_%H-%M-%S'; }
11048
+ log() { echo "[$(timestamp)] $*"; }
11049
+ die() { log "FATAL: $*"; exit 1; }
11050
+
11051
+ command -v claude >/dev/null || die "claude CLI required"
11052
+ command -v bd >/dev/null || die "bd (beads) CLI required"
11053
+
11054
+ # Detect JSON parser: prefer jq, fall back to python3
11055
+ HAS_JQ=false
11056
+ command -v jq >/dev/null 2>&1 && HAS_JQ=true
11057
+ if [ "$HAS_JQ" = false ]; then
11058
+ command -v python3 >/dev/null 2>&1 || die "jq or python3 required for JSON parsing"
11059
+ fi
11060
+
11061
+ # parse_json() - extract a value from JSON stdin
11062
+ # Uses jq (primary) with python3 fallback
11063
+ # Auto-unwraps single-element arrays (bd show --json returns [...])
11064
+ # Usage: echo '[{"status":"open"}]' | parse_json '.status'
11065
+ parse_json() {
11066
+ local filter="$1"
11067
+ if [ "$HAS_JQ" = true ]; then
11068
+ jq -r "if type == \\"array\\" then .[0] else . end | $filter"
11069
+ else
11070
+ python3 -c "
11071
+ import sys, json
11072
+ data = json.load(sys.stdin)
11073
+ if isinstance(data, list):
11074
+ data = data[0] if data else {}
11075
+ f = '$filter'.strip('.')
11076
+ parts = [p for p in f.split('.') if p]
11077
+ v = data
11078
+ try:
11079
+ for p in parts:
11080
+ v = v[p]
11081
+ except (KeyError, IndexError, TypeError):
11082
+ v = None
11083
+ print('' if v is None else v)
11084
+ "
11085
+ fi
11086
+ }
11087
+
11088
+ mkdir -p "$LOG_DIR"
11089
+ ` + buildEpicSelector() + buildPromptFunction();
11090
+ }
11091
+ function buildEpicSelector() {
11092
+ return `
11093
+ get_next_epic() {
11094
+ if [ -n "$EPIC_IDS" ]; then
11095
+ # From explicit list, find first still-open epic not yet processed
11096
+ for epic_id in $EPIC_IDS; do
11097
+ case " $PROCESSED " in *" $epic_id "*) continue ;; esac
11098
+ local status
11099
+ status=$(bd show "$epic_id" --json 2>/dev/null | parse_json '.status' 2>/dev/null || echo "")
11100
+ if [ "$status" = "open" ]; then
11101
+ echo "$epic_id"
11102
+ return 0
11103
+ fi
11104
+ done
11105
+ return 1
11106
+ else
11107
+ # Dynamic: get next ready epic from dependency graph, filtering processed
11108
+ local epic_id
11109
+ if [ "$HAS_JQ" = true ]; then
11110
+ epic_id=$(bd list --type=epic --ready --json --limit=10 2>/dev/null | jq -r '.[].id' 2>/dev/null | while read -r id; do
11111
+ case " $PROCESSED " in *" $id "*) continue ;; esac
11112
+ echo "$id"
11113
+ break
11114
+ done)
11115
+ else
11116
+ epic_id=$(bd list --type=epic --ready --json --limit=10 2>/dev/null | python3 -c "
11117
+ import sys, json
11118
+ processed = set('$PROCESSED'.split())
11119
+ items = json.load(sys.stdin)
11120
+ for item in items:
11121
+ if item['id'] not in processed:
11122
+ print(item['id'])
11123
+ break" 2>/dev/null || echo "")
11124
+ fi
11125
+ if [ -z "$epic_id" ]; then
11126
+ return 1
11127
+ fi
11128
+ echo "$epic_id"
11129
+ return 0
11130
+ fi
11131
+ }
11132
+ `;
11133
+ }
11134
+ function buildPromptFunction() {
10620
11135
  return `
10621
11136
  build_prompt() {
10622
11137
  local epic_id="$1"
@@ -10636,9 +11151,9 @@ Read the epic details carefully. Understand scope, acceptance criteria, and sub-
10636
11151
 
10637
11152
  ## Step 2: Execute the workflow
10638
11153
  Run the full compound workflow for this epic, starting from the plan phase
10639
- (brainstorm is already done -- the epic exists):
11154
+ (spec-dev is already done -- the epic exists):
10640
11155
 
10641
- /compound:lfg from plan -- Epic: $epic_id
11156
+ /compound:cook-it from plan -- Epic: $epic_id
10642
11157
 
10643
11158
  Work through all phases: plan, work, review, compound.
10644
11159
 
@@ -10788,540 +11303,244 @@ while true; do
10788
11303
  else
10789
11304
  FAILED=$((FAILED + 1))
10790
11305
  log "Epic $EPIC_ID failed after $((MAX_RETRIES + 1)) attempts. Stopping loop."
10791
- PROCESSED="$PROCESSED $EPIC_ID"
10792
- break
10793
- fi
10794
-
10795
- PROCESSED="$PROCESSED $EPIC_ID"
10796
- done
10797
-
10798
- log "Loop finished. Completed: $COMPLETED, Failed: $FAILED, Skipped: $SKIPPED"
10799
- [ $FAILED -eq 0 ] && exit 0 || exit 1`;
10800
- }
10801
- function validateOptions(options) {
10802
- if (!Number.isInteger(options.maxRetries) || options.maxRetries < 0) {
10803
- throw new Error(`Invalid maxRetries: must be a non-negative integer, got ${options.maxRetries}`);
10804
- }
10805
- if (!MODEL_PATTERN.test(options.model)) {
10806
- throw new Error(`Invalid model "${options.model}": must match ${MODEL_PATTERN}`);
10807
- }
10808
- if (options.epics) {
10809
- for (const id of options.epics) {
10810
- if (!LOOP_EPIC_ID_PATTERN.test(id)) {
10811
- throw new Error(`Invalid epic ID "${id}": must match ${LOOP_EPIC_ID_PATTERN}`);
10812
- }
10813
- }
10814
- }
10815
- }
10816
- function generateLoopScript(options) {
10817
- validateOptions(options);
10818
- const epicIds = options.epics?.join(" ") ?? "";
10819
- const timestamp = (/* @__PURE__ */ new Date()).toISOString();
10820
- return buildScriptHeader(timestamp, options.maxRetries, options.model, epicIds) + buildStreamExtractor() + buildMainLoop();
10821
- }
10822
- async function handleLoop(cmd, options) {
10823
- const outputPath = resolve(options.output ?? "./infinity-loop.sh");
10824
- if (existsSync(outputPath) && !options.force) {
10825
- out.error(`File already exists: ${outputPath}`);
10826
- out.info("Use --force to overwrite");
10827
- process.exitCode = 1;
10828
- return;
10829
- }
10830
- const maxRetries = Number(options.maxRetries ?? 1);
10831
- if (!Number.isInteger(maxRetries) || maxRetries < 0) {
10832
- out.error(`Invalid --max-retries: must be a non-negative integer, got "${options.maxRetries}"`);
10833
- process.exitCode = 1;
10834
- return;
10835
- }
10836
- let script;
10837
- try {
10838
- script = generateLoopScript({
10839
- epics: options.epics,
10840
- maxRetries,
10841
- model: options.model ?? "claude-opus-4-6"
10842
- });
10843
- } catch (err) {
10844
- out.error(err.message);
10845
- process.exitCode = 1;
10846
- return;
10847
- }
10848
- await mkdir(dirname(outputPath), { recursive: true });
10849
- await writeFile(outputPath, script, "utf-8");
10850
- await chmod(outputPath, 493);
10851
- out.success(`Generated infinity loop script: ${outputPath}`);
10852
- out.info("Run it with: " + outputPath);
10853
- out.info("Preview with: LOOP_DRY_RUN=1 " + outputPath);
10854
- }
10855
- function registerLoopCommands(program2) {
10856
- program2.command("loop").description("Generate infinity loop script for epic tasks").option("--epics <ids...>", "Specific epic IDs to process").option("-o, --output <path>", "Output script path", "./infinity-loop.sh").option("--max-retries <n>", "Max retries per epic on failure", "1").option("--model <model>", "Claude model to use", "claude-opus-4-6").option("--force", "Overwrite existing script").action(async function(options) {
10857
- await handleLoop(this, options);
10858
- });
10859
- }
10860
- var EPIC_ID_PATTERN2 = /^[a-zA-Z0-9_.-]+$/;
10861
- function formatTime(timestamp) {
10862
- if (!timestamp) {
10863
- const now = /* @__PURE__ */ new Date();
10864
- return now.toTimeString().slice(0, 8);
10865
- }
10866
- try {
10867
- return new Date(timestamp).toTimeString().slice(0, 8);
10868
- } catch {
10869
- return (/* @__PURE__ */ new Date()).toTimeString().slice(0, 8);
10870
- }
10871
- }
10872
- function formatNumber(n) {
10873
- return n.toLocaleString();
10874
- }
10875
- function formatStreamEvent(event) {
10876
- const time = chalk4.dim(formatTime(event.timestamp));
10877
- switch (event.type) {
10878
- case "content_block_start": {
10879
- if (event.content_block?.type === "tool_use") {
10880
- const name = event.content_block.name ?? "unknown";
10881
- return `${time} ${chalk4.cyan("TOOL")} ${name}`;
10882
- }
10883
- if (event.content_block?.type === "thinking") {
10884
- return `${time} ${chalk4.magenta("THINK")} thinking...`;
10885
- }
10886
- return null;
10887
- }
10888
- case "content_block_delta": {
10889
- if (event.delta?.type === "text_delta") {
10890
- const text = event.delta.text ?? "";
10891
- const truncated = text.length > 60 ? text.slice(0, 57) + "..." : text;
10892
- return `${time} ${chalk4.blue("TEXT")} ${truncated.replace(/\n/g, " ")}`;
10893
- }
10894
- return null;
10895
- }
10896
- case "message_delta": {
10897
- const usage = event.usage;
10898
- if (usage?.output_tokens) {
10899
- return `${time} ${chalk4.dim("TOKENS")} ${formatNumber(usage.output_tokens)} out (final)`;
10900
- }
10901
- return null;
10902
- }
10903
- case "message_start": {
10904
- if (event.message?.usage) {
10905
- const { input_tokens, output_tokens } = event.message.usage;
10906
- const inTok = input_tokens ? formatNumber(input_tokens) : "?";
10907
- const outTok = output_tokens ? formatNumber(output_tokens) : "?";
10908
- return `${time} ${chalk4.dim("TOKENS")} ${inTok} in / ${outTok} out`;
10909
- }
10910
- return null;
10911
- }
10912
- case "result": {
10913
- const text = typeof event.result === "string" ? event.result : "";
10914
- const markers = ["EPIC_COMPLETE", "EPIC_FAILED", "HUMAN_REQUIRED"];
10915
- const found = markers.find((m) => text.includes(m));
10916
- if (found) {
10917
- const markerLine = text.split("\n").find((l) => l.includes(found)) ?? found;
10918
- const display = markerLine.length > 120 ? markerLine.slice(0, 117) + "..." : markerLine;
10919
- return `${time} ${chalk4.yellow.bold("MARKER")} ${display}`;
10920
- }
10921
- return null;
10922
- }
10923
- default:
10924
- return null;
10925
- }
10926
- }
10927
- function findLatestTraceFile(logDir) {
10928
- if (!existsSync(logDir)) return null;
10929
- const latestPath = join(logDir, ".latest");
10930
- if (existsSync(latestPath)) {
10931
- try {
10932
- const target = readlinkSync(latestPath);
10933
- const resolved = resolve(logDir, target);
10934
- if (existsSync(resolved)) return resolved;
10935
- } catch {
10936
- }
10937
- }
10938
- try {
10939
- const files = readdirSync(logDir).filter((f) => f.startsWith("trace_") && f.endsWith(".jsonl")).sort().reverse();
10940
- const first = files[0];
10941
- if (first) return join(logDir, first);
10942
- } catch {
10943
- }
10944
- return null;
10945
- }
10946
- function processLine(line) {
10947
- const trimmed = line.trim();
10948
- if (!trimmed) return;
10949
- try {
10950
- const event = JSON.parse(trimmed);
10951
- const formatted = formatStreamEvent(event);
10952
- if (formatted) {
10953
- console.log(formatted);
10954
- }
10955
- } catch {
10956
- }
11306
+ PROCESSED="$PROCESSED $EPIC_ID"
11307
+ break
11308
+ fi
11309
+
11310
+ PROCESSED="$PROCESSED $EPIC_ID"
11311
+ done
11312
+
11313
+ log "Loop finished. Completed: $COMPLETED, Failed: $FAILED, Skipped: $SKIPPED"
11314
+ [ $FAILED -eq 0 ] && exit 0 || exit 1`;
10957
11315
  }
10958
- async function tailFile(filePath, follow) {
10959
- if (follow) {
10960
- const child = spawn("tail", ["-f", "-n", "+1", filePath], { stdio: ["ignore", "pipe", "ignore"] });
10961
- const rl2 = createInterface({ input: child.stdout });
10962
- rl2.on("line", processLine);
10963
- const cleanup2 = () => {
10964
- child.kill("SIGTERM");
10965
- };
10966
- process.on("SIGINT", cleanup2);
10967
- process.on("SIGTERM", cleanup2);
10968
- return new Promise((done) => {
10969
- child.on("close", () => {
10970
- process.off("SIGINT", cleanup2);
10971
- process.off("SIGTERM", cleanup2);
10972
- done();
10973
- });
10974
- });
10975
- }
10976
- const stream = createReadStream(filePath, { encoding: "utf-8" });
10977
- const rl = createInterface({ input: stream });
10978
- try {
10979
- for await (const line of rl) {
10980
- processLine(line);
10981
- }
10982
- } finally {
10983
- rl.close();
10984
- stream.destroy();
11316
+ function validateOptions(options) {
11317
+ if (!Number.isInteger(options.maxRetries) || options.maxRetries < 0) {
11318
+ throw new Error(`Invalid maxRetries: must be a non-negative integer, got ${options.maxRetries}`);
10985
11319
  }
10986
- }
10987
- async function handleWatch(cmd, options) {
10988
- let logDir;
10989
- try {
10990
- logDir = join(getRepoRoot(), "agent_logs");
10991
- } catch {
10992
- logDir = resolve("agent_logs");
11320
+ if (!MODEL_PATTERN.test(options.model)) {
11321
+ throw new Error(`Invalid model "${options.model}": must match ${MODEL_PATTERN}`);
10993
11322
  }
10994
- const follow = options.follow !== false;
10995
- let traceFile = null;
10996
- if (options.epic) {
10997
- if (!EPIC_ID_PATTERN2.test(options.epic)) {
10998
- out.error(`Invalid epic ID: ${options.epic}`);
10999
- process.exitCode = 1;
11000
- return;
11001
- }
11002
- if (existsSync(logDir)) {
11003
- try {
11004
- const files = readdirSync(logDir).filter((f) => f.startsWith(`trace_${options.epic}`) && f.endsWith(".jsonl")).sort().reverse();
11005
- const first = files[0];
11006
- if (first) traceFile = join(logDir, first);
11007
- } catch {
11323
+ if (options.epics) {
11324
+ for (const id of options.epics) {
11325
+ if (!LOOP_EPIC_ID_PATTERN.test(id)) {
11326
+ throw new Error(`Invalid epic ID "${id}": must match ${LOOP_EPIC_ID_PATTERN}`);
11008
11327
  }
11009
11328
  }
11010
- if (!traceFile) {
11011
- out.error(`No trace file found for epic: ${options.epic}`);
11012
- process.exitCode = 1;
11013
- return;
11014
- }
11015
- } else {
11016
- traceFile = findLatestTraceFile(logDir);
11017
- if (!traceFile) {
11018
- out.info("No active trace found. Run `ca loop` to generate a loop script first.");
11019
- process.exitCode = 0;
11020
- return;
11021
- }
11022
- }
11023
- out.info(`Watching: ${traceFile}`);
11024
- await tailFile(traceFile, follow);
11025
- }
11026
- function registerWatchCommand(program2) {
11027
- program2.command("watch").description("Tail and pretty-print live trace from infinity loop sessions").option("--epic <id>", "Watch a specific epic trace").option("--no-follow", "Print existing trace and exit (no live tail)").action(async function(options) {
11028
- await handleWatch(this, options);
11029
- });
11030
- }
11031
- init_storage();
11032
- init_search2();
11033
- function parseLimitOrNull(rawLimit, optionName, commandName) {
11034
- try {
11035
- return parseLimit(rawLimit, optionName);
11036
- } catch (err) {
11037
- const message = err instanceof Error ? err.message : `Invalid ${optionName}`;
11038
- console.error(formatError(commandName, "INVALID_LIMIT", message, `Use --${optionName} with a positive integer`));
11039
- return null;
11040
- }
11041
- }
11042
- async function readPlanFromStdin() {
11043
- const { stdin } = await import('process');
11044
- if (!stdin.isTTY) {
11045
- const chunks = [];
11046
- for await (const chunk of stdin) {
11047
- chunks.push(chunk);
11048
- }
11049
- return Buffer.concat(chunks).toString("utf-8").trim();
11050
- }
11051
- return void 0;
11052
- }
11053
- function outputCheckPlanJson(lessons) {
11054
- const jsonOutput = {
11055
- lessons: lessons.map((l) => ({
11056
- id: l.lesson.id,
11057
- insight: l.lesson.insight,
11058
- rankScore: l.finalScore ?? l.score,
11059
- // Use finalScore if available, fallback to raw score
11060
- source: l.lesson.source
11061
- })),
11062
- count: lessons.length
11063
- };
11064
- console.log(JSON.stringify(jsonOutput));
11065
- }
11066
- function outputCheckPlanHuman(lessons, quiet) {
11067
- console.log("## Lessons Check\n");
11068
- console.log("Relevant to your plan:\n");
11069
- lessons.forEach((item, i) => {
11070
- const num = i + 1;
11071
- console.log(`${num}. ${chalk4.bold(`[${item.lesson.id}]`)} ${item.lesson.insight}`);
11072
- console.log(` - Source: ${item.lesson.source}`);
11073
- console.log();
11074
- });
11075
- if (!quiet) {
11076
- console.log("---");
11077
- console.log("Consider these lessons while implementing.");
11078
11329
  }
11079
11330
  }
11080
- function formatSource2(source) {
11081
- return source.replace(/_/g, " ");
11082
- }
11083
- function outputSessionLessonsHuman(lessons, quiet) {
11084
- console.log("## Lessons from Past Sessions\n");
11085
- console.log("These lessons were captured from previous corrections and should inform your work:\n");
11086
- lessons.forEach((lesson, i) => {
11087
- const num = i + 1;
11088
- const date = lesson.created.slice(0, ISO_DATE_PREFIX_LENGTH);
11089
- const tagsDisplay = lesson.tags.length > 0 ? ` (${lesson.tags.join(", ")})` : "";
11090
- console.log(`${num}. **${lesson.insight}**${tagsDisplay}`);
11091
- console.log(` Learned: ${date} via ${formatSource2(lesson.source)}`);
11092
- console.log();
11093
- });
11094
- if (!quiet) {
11095
- console.log("Consider these lessons when planning and implementing tasks.");
11096
- }
11331
+ function generateLoopScript(options) {
11332
+ validateOptions(options);
11333
+ const epicIds = options.epics?.join(" ") ?? "";
11334
+ const timestamp = (/* @__PURE__ */ new Date()).toISOString();
11335
+ return buildScriptHeader(timestamp, options.maxRetries, options.model, epicIds) + buildStreamExtractor() + buildMainLoop();
11097
11336
  }
11098
- async function searchAction(cmd, query, options) {
11099
- const repoRoot = getRepoRoot();
11100
- const limit = parseLimitOrNull(options.limit, "limit", "search");
11101
- if (limit === null) {
11337
+ async function handleLoop(cmd, options) {
11338
+ const outputPath = resolve(options.output ?? "./infinity-loop.sh");
11339
+ if (existsSync(outputPath) && !options.force) {
11340
+ out.error(`File already exists: ${outputPath}`);
11341
+ out.info("Use --force to overwrite");
11102
11342
  process.exitCode = 1;
11103
11343
  return;
11104
11344
  }
11105
- const { verbose, quiet } = getGlobalOpts(cmd);
11106
- await syncIfNeeded(repoRoot);
11107
- let results;
11108
- if (isModelAvailable()) {
11109
- try {
11110
- const candidateLimit = limit * CANDIDATE_MULTIPLIER;
11111
- const [vectorResults, keywordResults] = await Promise.all([
11112
- searchVector(repoRoot, query, { limit: candidateLimit }),
11113
- searchKeywordScored(repoRoot, query, candidateLimit)
11114
- ]);
11115
- const merged = mergeHybridResults(vectorResults, keywordResults, { minScore: MIN_HYBRID_SCORE });
11116
- const ranked = rankLessons(merged);
11117
- results = ranked.slice(0, limit).map((r) => r.lesson);
11118
- } catch {
11119
- results = await searchKeyword(repoRoot, query, limit);
11120
- }
11121
- } else {
11122
- results = await searchKeyword(repoRoot, query, limit);
11123
- }
11124
- if (results.length > 0) {
11125
- incrementRetrievalCount(repoRoot, results.map((lesson) => lesson.id));
11345
+ const maxRetries = Number(options.maxRetries ?? 1);
11346
+ if (!Number.isInteger(maxRetries) || maxRetries < 0) {
11347
+ out.error(`Invalid --max-retries: must be a non-negative integer, got "${options.maxRetries}"`);
11348
+ process.exitCode = 1;
11349
+ return;
11126
11350
  }
11127
- if (results.length === 0) {
11128
- console.log('No lessons match your search. Try a different query or use "list" to see all lessons.');
11351
+ let script;
11352
+ try {
11353
+ script = generateLoopScript({
11354
+ epics: options.epics,
11355
+ maxRetries,
11356
+ model: options.model ?? "claude-opus-4-6"
11357
+ });
11358
+ } catch (err) {
11359
+ out.error(err.message);
11360
+ process.exitCode = 1;
11129
11361
  return;
11130
11362
  }
11131
- if (!quiet) {
11132
- out.info(`Found ${results.length} lesson(s):
11133
- `);
11363
+ await mkdir(dirname(outputPath), { recursive: true });
11364
+ await writeFile(outputPath, script, "utf-8");
11365
+ await chmod(outputPath, 493);
11366
+ out.success(`Generated infinity loop script: ${outputPath}`);
11367
+ out.info("Run it with: " + outputPath);
11368
+ out.info("Preview with: LOOP_DRY_RUN=1 " + outputPath);
11369
+ }
11370
+ function registerLoopCommands(program2) {
11371
+ program2.command("loop").description("Generate infinity loop script for epic tasks").option("--epics <ids...>", "Specific epic IDs to process").option("-o, --output <path>", "Output script path", "./infinity-loop.sh").option("--max-retries <n>", "Max retries per epic on failure", "1").option("--model <model>", "Claude model to use", "claude-opus-4-6").option("--force", "Overwrite existing script").action(async function(options) {
11372
+ await handleLoop(this, options);
11373
+ });
11374
+ }
11375
+ var EPIC_ID_PATTERN2 = /^[a-zA-Z0-9_.-]+$/;
11376
+ function formatTime(timestamp) {
11377
+ if (!timestamp) {
11378
+ const now = /* @__PURE__ */ new Date();
11379
+ return now.toTimeString().slice(0, 8);
11134
11380
  }
11135
- for (const lesson of results) {
11136
- console.log(`[${chalk4.cyan(lesson.id)}] ${lesson.insight}`);
11137
- console.log(` Trigger: ${lesson.trigger}`);
11138
- if (verbose && lesson.context) {
11139
- console.log(` Context: ${lesson.context.tool} - ${lesson.context.intent}`);
11140
- console.log(` Created: ${lesson.created}`);
11141
- }
11142
- if (lesson.tags.length > 0) {
11143
- console.log(` Tags: ${lesson.tags.join(", ")}`);
11144
- }
11145
- console.log();
11381
+ try {
11382
+ return new Date(timestamp).toTimeString().slice(0, 8);
11383
+ } catch {
11384
+ return (/* @__PURE__ */ new Date()).toTimeString().slice(0, 8);
11146
11385
  }
11147
11386
  }
11148
- async function listAction(cmd, options) {
11149
- const repoRoot = getRepoRoot();
11150
- const limit = parseLimitOrNull(options.limit, "limit", "list");
11151
- if (limit === null) {
11152
- process.exitCode = 1;
11153
- return;
11154
- }
11155
- const { verbose, quiet } = getGlobalOpts(cmd);
11156
- const { items, skippedCount } = await readMemoryItems(repoRoot);
11157
- const filteredItems = options.invalidated ? items.filter((i) => i.invalidatedAt) : items;
11158
- if (filteredItems.length === 0) {
11159
- if (options.invalidated) {
11160
- console.log("No invalidated lessons found.");
11161
- } else {
11162
- console.log('No lessons found. Get started with: learn "Your first lesson"');
11387
+ function formatNumber(n) {
11388
+ return n.toLocaleString();
11389
+ }
11390
+ function formatStreamEvent(event) {
11391
+ const time = chalk5.dim(formatTime(event.timestamp));
11392
+ switch (event.type) {
11393
+ case "content_block_start": {
11394
+ if (event.content_block?.type === "tool_use") {
11395
+ const name = event.content_block.name ?? "unknown";
11396
+ return `${time} ${chalk5.cyan("TOOL")} ${name}`;
11397
+ }
11398
+ if (event.content_block?.type === "thinking") {
11399
+ return `${time} ${chalk5.magenta("THINK")} thinking...`;
11400
+ }
11401
+ return null;
11163
11402
  }
11164
- if (skippedCount > 0) {
11165
- out.warn(`${skippedCount} corrupted lesson(s) skipped.`);
11403
+ case "content_block_delta": {
11404
+ if (event.delta?.type === "text_delta") {
11405
+ const text = event.delta.text ?? "";
11406
+ const truncated = text.length > 60 ? text.slice(0, 57) + "..." : text;
11407
+ return `${time} ${chalk5.blue("TEXT")} ${truncated.replace(/\n/g, " ")}`;
11408
+ }
11409
+ return null;
11166
11410
  }
11167
- return;
11168
- }
11169
- const toShow = filteredItems.slice(0, limit);
11170
- if (!quiet) {
11171
- const label = options.invalidated ? "invalidated lesson(s)" : "item(s)";
11172
- out.info(`Showing ${toShow.length} of ${filteredItems.length} ${label}:
11173
- `);
11174
- }
11175
- for (const item of toShow) {
11176
- const invalidMarker = item.invalidatedAt ? chalk4.red("[INVALID] ") : "";
11177
- console.log(`[${chalk4.cyan(item.id)}] ${invalidMarker}${item.insight}`);
11178
- if (verbose) {
11179
- console.log(` Type: ${item.type} | Source: ${item.source}`);
11180
- console.log(` Created: ${item.created}`);
11181
- if (item.context) {
11182
- console.log(` Context: ${item.context.tool} - ${item.context.intent}`);
11411
+ case "message_delta": {
11412
+ const usage = event.usage;
11413
+ if (usage?.output_tokens) {
11414
+ return `${time} ${chalk5.dim("TOKENS")} ${formatNumber(usage.output_tokens)} out (final)`;
11183
11415
  }
11184
- if (item.invalidatedAt) {
11185
- console.log(` Invalidated: ${item.invalidatedAt}`);
11186
- if (item.invalidationReason) {
11187
- console.log(` Reason: ${item.invalidationReason}`);
11188
- }
11416
+ return null;
11417
+ }
11418
+ case "message_start": {
11419
+ if (event.message?.usage) {
11420
+ const { input_tokens, output_tokens } = event.message.usage;
11421
+ const inTok = input_tokens ? formatNumber(input_tokens) : "?";
11422
+ const outTok = output_tokens ? formatNumber(output_tokens) : "?";
11423
+ return `${time} ${chalk5.dim("TOKENS")} ${inTok} in / ${outTok} out`;
11189
11424
  }
11190
- } else {
11191
- console.log(` Type: ${item.type} | Source: ${item.source}`);
11425
+ return null;
11192
11426
  }
11193
- if (item.tags.length > 0) {
11194
- console.log(` Tags: ${item.tags.join(", ")}`);
11427
+ case "result": {
11428
+ const text = typeof event.result === "string" ? event.result : "";
11429
+ const markers = ["EPIC_COMPLETE", "EPIC_FAILED", "HUMAN_REQUIRED"];
11430
+ const found = markers.find((m) => text.includes(m));
11431
+ if (found) {
11432
+ const markerLine = text.split("\n").find((l) => l.includes(found)) ?? found;
11433
+ const display = markerLine.length > 120 ? markerLine.slice(0, 117) + "..." : markerLine;
11434
+ return `${time} ${chalk5.yellow.bold("MARKER")} ${display}`;
11435
+ }
11436
+ return null;
11195
11437
  }
11196
- console.log();
11197
- }
11198
- if (skippedCount > 0) {
11199
- out.warn(`${skippedCount} corrupted lesson(s) skipped.`);
11438
+ default:
11439
+ return null;
11200
11440
  }
11201
11441
  }
11202
- async function loadSessionAction(cmd, options) {
11203
- const repoRoot = getRepoRoot();
11204
- const { quiet } = getGlobalOpts(cmd);
11205
- const lessons = await loadSessionLessons(repoRoot);
11206
- const { lessons: allLessons } = await readLessons(repoRoot);
11207
- const totalCount = allLessons.length;
11208
- if (options.json) {
11209
- console.log(JSON.stringify({ lessons, count: lessons.length, totalCount }));
11210
- return;
11211
- }
11212
- if (lessons.length === 0) {
11213
- console.log("No high-severity lessons found.");
11214
- return;
11215
- }
11216
- outputSessionLessonsHuman(lessons, quiet);
11217
- if (totalCount > LESSON_COUNT_WARNING_THRESHOLD) {
11218
- console.log("");
11219
- out.info(`${totalCount} lessons in index. Consider \`ca compact\` to reduce context pollution.`);
11442
+ function findLatestTraceFile(logDir) {
11443
+ if (!existsSync(logDir)) return null;
11444
+ const latestPath = join(logDir, ".latest");
11445
+ if (existsSync(latestPath)) {
11446
+ try {
11447
+ const target = readlinkSync(latestPath);
11448
+ const resolved = resolve(logDir, target);
11449
+ if (existsSync(resolved)) return resolved;
11450
+ } catch {
11451
+ }
11220
11452
  }
11221
- const oldLessons = lessons.filter((l) => getLessonAgeDays(l) > AGE_FLAG_THRESHOLD_DAYS);
11222
- if (oldLessons.length > 0) {
11223
- console.log("");
11224
- out.warn(`${oldLessons.length} lesson(s) are over ${AGE_FLAG_THRESHOLD_DAYS} days old. Review for continued validity.`);
11453
+ try {
11454
+ const files = readdirSync(logDir).filter((f) => f.startsWith("trace_") && f.endsWith(".jsonl")).sort().reverse();
11455
+ const first = files[0];
11456
+ if (first) return join(logDir, first);
11457
+ } catch {
11225
11458
  }
11459
+ return null;
11226
11460
  }
11227
- async function checkPlanAction(cmd, options) {
11228
- const repoRoot = getRepoRoot();
11229
- const limit = parseLimitOrNull(options.limit, "limit", "check-plan");
11230
- if (limit === null) {
11231
- process.exitCode = 1;
11232
- return;
11461
+ function processLine(line) {
11462
+ const trimmed = line.trim();
11463
+ if (!trimmed) return;
11464
+ try {
11465
+ const event = JSON.parse(trimmed);
11466
+ const formatted = formatStreamEvent(event);
11467
+ if (formatted) {
11468
+ console.log(formatted);
11469
+ }
11470
+ } catch {
11233
11471
  }
11234
- const { quiet } = getGlobalOpts(cmd);
11235
- const planText = options.plan ?? await readPlanFromStdin();
11236
- if (!planText) {
11237
- console.error(formatError("check-plan", "NO_PLAN", "No plan provided", "Use --plan <text> or pipe text to stdin"));
11238
- process.exitCode = 1;
11239
- return;
11472
+ }
11473
+ async function tailFile(filePath, follow) {
11474
+ if (follow) {
11475
+ const child = spawn("tail", ["-f", "-n", "+1", filePath], { stdio: ["ignore", "pipe", "ignore"] });
11476
+ const rl2 = createInterface({ input: child.stdout });
11477
+ rl2.on("line", processLine);
11478
+ const cleanup2 = () => {
11479
+ child.kill("SIGTERM");
11480
+ };
11481
+ process.on("SIGINT", cleanup2);
11482
+ process.on("SIGTERM", cleanup2);
11483
+ return new Promise((done) => {
11484
+ child.on("close", () => {
11485
+ process.off("SIGINT", cleanup2);
11486
+ process.off("SIGTERM", cleanup2);
11487
+ done();
11488
+ });
11489
+ });
11240
11490
  }
11241
- await syncIfNeeded(repoRoot);
11242
- if (!isModelAvailable()) {
11243
- if (options.json) {
11244
- console.log(JSON.stringify({
11245
- lessons: [],
11246
- count: 0,
11247
- error: "Embedding model not found",
11248
- action: "Run: npx ca download-model"
11249
- }));
11250
- } else {
11251
- console.error(formatError("check-plan", "MODEL_UNAVAILABLE", "Embedding model not found", "Run: npx ca download-model"));
11491
+ const stream = createReadStream(filePath, { encoding: "utf-8" });
11492
+ const rl = createInterface({ input: stream });
11493
+ try {
11494
+ for await (const line of rl) {
11495
+ processLine(line);
11252
11496
  }
11253
- process.exitCode = 1;
11254
- return;
11497
+ } finally {
11498
+ rl.close();
11499
+ stream.destroy();
11255
11500
  }
11501
+ }
11502
+ async function handleWatch(cmd, options) {
11503
+ let logDir;
11256
11504
  try {
11257
- const result = await retrieveForPlan(repoRoot, planText, limit);
11258
- if (options.json) {
11259
- outputCheckPlanJson(result.lessons);
11505
+ logDir = join(getRepoRoot(), "agent_logs");
11506
+ } catch {
11507
+ logDir = resolve("agent_logs");
11508
+ }
11509
+ const follow = options.follow !== false;
11510
+ let traceFile = null;
11511
+ if (options.epic) {
11512
+ if (!EPIC_ID_PATTERN2.test(options.epic)) {
11513
+ out.error(`Invalid epic ID: ${options.epic}`);
11514
+ process.exitCode = 1;
11260
11515
  return;
11261
11516
  }
11262
- if (result.lessons.length === 0) {
11263
- console.log("No relevant lessons found for this plan.");
11517
+ if (existsSync(logDir)) {
11518
+ try {
11519
+ const files = readdirSync(logDir).filter((f) => f.startsWith(`trace_${options.epic}`) && f.endsWith(".jsonl")).sort().reverse();
11520
+ const first = files[0];
11521
+ if (first) traceFile = join(logDir, first);
11522
+ } catch {
11523
+ }
11524
+ }
11525
+ if (!traceFile) {
11526
+ out.error(`No trace file found for epic: ${options.epic}`);
11527
+ process.exitCode = 1;
11264
11528
  return;
11265
11529
  }
11266
- outputCheckPlanHuman(result.lessons, quiet);
11267
- } catch (err) {
11268
- const message = err instanceof Error ? err.message : "Unknown error";
11269
- if (options.json) {
11270
- console.log(JSON.stringify({
11271
- lessons: [],
11272
- count: 0,
11273
- error: message
11274
- }));
11275
- } else {
11276
- console.error(formatError("check-plan", "PLAN_CHECK_FAILED", message, "Check model installation and try again"));
11530
+ } else {
11531
+ traceFile = findLatestTraceFile(logDir);
11532
+ if (!traceFile) {
11533
+ out.info("No active trace found. Run `ca loop` to generate a loop script first.");
11534
+ process.exitCode = 0;
11535
+ return;
11277
11536
  }
11278
- process.exitCode = 1;
11279
11537
  }
11538
+ out.info(`Watching: ${traceFile}`);
11539
+ await tailFile(traceFile, follow);
11280
11540
  }
11281
- function registerRetrievalCommands(program2) {
11282
- program2.command("search <query>").description("Search lessons").option("-n, --limit <number>", "Maximum results", DEFAULT_SEARCH_LIMIT).action(async function(query, options) {
11283
- await searchAction(this, query, options);
11284
- });
11285
- program2.command("list").description("List all lessons").option("-n, --limit <number>", "Maximum results", DEFAULT_LIST_LIMIT).option("--invalidated", "Show only invalidated lessons").action(async function(options) {
11286
- await listAction(this, options);
11287
- });
11288
- program2.command("load-session").description("Load high-severity lessons for session context").option("--json", "Output as JSON").action(async function(options) {
11289
- await loadSessionAction(this, options);
11290
- });
11291
- program2.command("check-plan").description("Check plan against relevant lessons").option("--plan <text>", "Plan text to check").option("--json", "Output as JSON").option("-n, --limit <number>", "Maximum results", DEFAULT_CHECK_PLAN_LIMIT).action(async function(options) {
11292
- await checkPlanAction(this, options);
11293
- });
11294
- }
11295
-
11296
- // src/commands/index.ts
11297
- function registerSetupCommands(program2) {
11298
- registerInitCommand(program2);
11299
- registerHooksCommand(program2);
11300
- const setupCommand = program2.command("setup");
11301
- registerSetupAllCommand(setupCommand);
11302
- registerClaudeSubcommand(setupCommand);
11303
- registerGeminiSubcommand(setupCommand);
11304
- registerDownloadModelCommand(program2);
11305
- }
11306
- function registerManagementCommands(program2) {
11307
- registerInvalidationCommands(program2);
11308
- registerMaintenanceCommands(program2);
11309
- registerIOCommands(program2);
11310
- registerPrimeCommand(program2);
11311
- registerCrudCommands(program2);
11312
- registerAuditCommands(program2);
11313
- registerDoctorCommand(program2);
11314
- registerReviewerCommand(program2);
11315
- registerRulesCommands(program2);
11316
- registerTestSummaryCommand(program2);
11317
- registerVerifyGatesCommand(program2);
11318
- registerAboutCommand(program2);
11319
- registerKnowledgeCommand(program2);
11320
- registerKnowledgeIndexCommand(program2);
11321
- registerCleanLessonsCommand(program2);
11322
- program2.command("worktree").description("(removed) Use Claude Code native worktree support").action(() => {
11323
- console.error("ca worktree has been removed. Use Claude Code's native EnterWorktree support instead.");
11324
- process.exitCode = 1;
11541
+ function registerWatchCommand(program2) {
11542
+ program2.command("watch").description("Tail and pretty-print live trace from infinity loop sessions").option("--epic <id>", "Watch a specific epic trace").option("--no-follow", "Print existing trace and exit (no live tail)").action(async function(options) {
11543
+ await handleWatch(this, options);
11325
11544
  });
11326
11545
  }
11327
11546