shipwright-cli 3.2.0 → 3.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/agents/code-reviewer.md +2 -0
- package/.claude/agents/devops-engineer.md +2 -0
- package/.claude/agents/doc-fleet-agent.md +2 -0
- package/.claude/agents/pipeline-agent.md +2 -0
- package/.claude/agents/shell-script-specialist.md +2 -0
- package/.claude/agents/test-specialist.md +2 -0
- package/.claude/hooks/agent-crash-capture.sh +32 -0
- package/.claude/hooks/post-tool-use.sh +3 -2
- package/.claude/hooks/pre-tool-use.sh +35 -3
- package/README.md +4 -4
- package/claude-code/hooks/config-change.sh +18 -0
- package/claude-code/hooks/instructions-reloaded.sh +7 -0
- package/claude-code/hooks/worktree-create.sh +25 -0
- package/claude-code/hooks/worktree-remove.sh +20 -0
- package/config/code-constitution.json +130 -0
- package/dashboard/middleware/auth.ts +134 -0
- package/dashboard/middleware/constants.ts +21 -0
- package/dashboard/public/index.html +2 -6
- package/dashboard/public/styles.css +100 -97
- package/dashboard/routes/auth.ts +38 -0
- package/dashboard/server.ts +66 -25
- package/dashboard/services/config.ts +26 -0
- package/dashboard/services/db.ts +118 -0
- package/dashboard/src/canvas/pixel-agent.ts +298 -0
- package/dashboard/src/canvas/pixel-sprites.ts +440 -0
- package/dashboard/src/canvas/shipyard-effects.ts +367 -0
- package/dashboard/src/canvas/shipyard-scene.ts +616 -0
- package/dashboard/src/canvas/submarine-layout.ts +267 -0
- package/dashboard/src/components/header.ts +8 -7
- package/dashboard/src/core/router.ts +1 -0
- package/dashboard/src/design/submarine-theme.ts +253 -0
- package/dashboard/src/main.ts +2 -0
- package/dashboard/src/types/api.ts +2 -1
- package/dashboard/src/views/activity.ts +2 -1
- package/dashboard/src/views/shipyard.ts +39 -0
- package/dashboard/types/index.ts +166 -0
- package/docs/plans/2026-02-28-compound-audit-and-shipyard-design.md +186 -0
- package/docs/plans/2026-02-28-skipper-shipwright-implementation-plan.md +1182 -0
- package/docs/plans/2026-02-28-skipper-shipwright-integration-design.md +531 -0
- package/docs/plans/2026-03-01-ai-powered-skill-injection-design.md +298 -0
- package/docs/plans/2026-03-01-ai-powered-skill-injection-plan.md +1109 -0
- package/docs/plans/2026-03-01-capabilities-cleanup-plan.md +658 -0
- package/docs/plans/2026-03-01-clean-architecture-plan.md +924 -0
- package/docs/plans/2026-03-01-compound-audit-cascade-design.md +191 -0
- package/docs/plans/2026-03-01-compound-audit-cascade-plan.md +921 -0
- package/docs/plans/2026-03-01-deep-integration-plan.md +851 -0
- package/docs/plans/2026-03-01-pipeline-audit-trail-design.md +145 -0
- package/docs/plans/2026-03-01-pipeline-audit-trail-plan.md +770 -0
- package/docs/plans/2026-03-01-refined-depths-brand-design.md +382 -0
- package/docs/plans/2026-03-01-refined-depths-implementation.md +599 -0
- package/docs/plans/2026-03-01-skipper-kernel-integration-design.md +203 -0
- package/docs/plans/2026-03-01-unified-platform-design.md +272 -0
- package/docs/plans/2026-03-07-claude-code-feature-integration-design.md +189 -0
- package/docs/plans/2026-03-07-claude-code-feature-integration-plan.md +1165 -0
- package/docs/research/BACKLOG_QUICK_REFERENCE.md +352 -0
- package/docs/research/CUTTING_EDGE_RESEARCH_2026.md +546 -0
- package/docs/research/RESEARCH_INDEX.md +439 -0
- package/docs/research/RESEARCH_SOURCES.md +440 -0
- package/docs/research/RESEARCH_SUMMARY.txt +275 -0
- package/docs/superpowers/specs/2026-03-10-pipeline-quality-revolution-design.md +341 -0
- package/package.json +2 -2
- package/scripts/lib/adaptive-model.sh +427 -0
- package/scripts/lib/adaptive-timeout.sh +316 -0
- package/scripts/lib/audit-trail.sh +309 -0
- package/scripts/lib/auto-recovery.sh +471 -0
- package/scripts/lib/bandit-selector.sh +431 -0
- package/scripts/lib/bootstrap.sh +104 -2
- package/scripts/lib/causal-graph.sh +455 -0
- package/scripts/lib/compat.sh +126 -0
- package/scripts/lib/compound-audit.sh +337 -0
- package/scripts/lib/constitutional.sh +454 -0
- package/scripts/lib/context-budget.sh +359 -0
- package/scripts/lib/convergence.sh +594 -0
- package/scripts/lib/cost-optimizer.sh +634 -0
- package/scripts/lib/daemon-adaptive.sh +10 -0
- package/scripts/lib/daemon-dispatch.sh +106 -17
- package/scripts/lib/daemon-failure.sh +34 -4
- package/scripts/lib/daemon-patrol.sh +23 -2
- package/scripts/lib/daemon-poll-github.sh +361 -0
- package/scripts/lib/daemon-poll-health.sh +299 -0
- package/scripts/lib/daemon-poll.sh +27 -611
- package/scripts/lib/daemon-state.sh +112 -66
- package/scripts/lib/daemon-triage.sh +10 -0
- package/scripts/lib/dod-scorecard.sh +442 -0
- package/scripts/lib/error-actionability.sh +300 -0
- package/scripts/lib/formal-spec.sh +461 -0
- package/scripts/lib/helpers.sh +177 -4
- package/scripts/lib/intent-analysis.sh +409 -0
- package/scripts/lib/loop-convergence.sh +350 -0
- package/scripts/lib/loop-iteration.sh +682 -0
- package/scripts/lib/loop-progress.sh +48 -0
- package/scripts/lib/loop-restart.sh +185 -0
- package/scripts/lib/memory-effectiveness.sh +506 -0
- package/scripts/lib/mutation-executor.sh +352 -0
- package/scripts/lib/outcome-feedback.sh +521 -0
- package/scripts/lib/pipeline-cli.sh +336 -0
- package/scripts/lib/pipeline-commands.sh +1216 -0
- package/scripts/lib/pipeline-detection.sh +100 -2
- package/scripts/lib/pipeline-execution.sh +897 -0
- package/scripts/lib/pipeline-github.sh +28 -3
- package/scripts/lib/pipeline-intelligence-compound.sh +431 -0
- package/scripts/lib/pipeline-intelligence-scoring.sh +407 -0
- package/scripts/lib/pipeline-intelligence-skip.sh +181 -0
- package/scripts/lib/pipeline-intelligence.sh +100 -1136
- package/scripts/lib/pipeline-quality-bash-compat.sh +182 -0
- package/scripts/lib/pipeline-quality-checks.sh +17 -715
- package/scripts/lib/pipeline-quality-gates.sh +563 -0
- package/scripts/lib/pipeline-stages-build.sh +730 -0
- package/scripts/lib/pipeline-stages-delivery.sh +965 -0
- package/scripts/lib/pipeline-stages-intake.sh +1133 -0
- package/scripts/lib/pipeline-stages-monitor.sh +407 -0
- package/scripts/lib/pipeline-stages-review.sh +1022 -0
- package/scripts/lib/pipeline-stages.sh +59 -2929
- package/scripts/lib/pipeline-state.sh +36 -5
- package/scripts/lib/pipeline-util.sh +487 -0
- package/scripts/lib/policy-learner.sh +438 -0
- package/scripts/lib/process-reward.sh +493 -0
- package/scripts/lib/project-detect.sh +649 -0
- package/scripts/lib/quality-profile.sh +334 -0
- package/scripts/lib/recruit-commands.sh +885 -0
- package/scripts/lib/recruit-learning.sh +739 -0
- package/scripts/lib/recruit-roles.sh +648 -0
- package/scripts/lib/reward-aggregator.sh +458 -0
- package/scripts/lib/rl-optimizer.sh +362 -0
- package/scripts/lib/root-cause.sh +427 -0
- package/scripts/lib/scope-enforcement.sh +445 -0
- package/scripts/lib/session-restart.sh +493 -0
- package/scripts/lib/skill-memory.sh +300 -0
- package/scripts/lib/skill-registry.sh +775 -0
- package/scripts/lib/spec-driven.sh +476 -0
- package/scripts/lib/test-helpers.sh +18 -7
- package/scripts/lib/test-holdout.sh +429 -0
- package/scripts/lib/test-optimizer.sh +511 -0
- package/scripts/shipwright-file-suggest.sh +45 -0
- package/scripts/skills/adversarial-quality.md +61 -0
- package/scripts/skills/api-design.md +44 -0
- package/scripts/skills/architecture-design.md +50 -0
- package/scripts/skills/brainstorming.md +43 -0
- package/scripts/skills/data-pipeline.md +44 -0
- package/scripts/skills/deploy-safety.md +64 -0
- package/scripts/skills/documentation.md +38 -0
- package/scripts/skills/frontend-design.md +45 -0
- package/scripts/skills/generated/.gitkeep +0 -0
- package/scripts/skills/generated/_refinements/.gitkeep +0 -0
- package/scripts/skills/generated/_refinements/adversarial-quality.patch.md +3 -0
- package/scripts/skills/generated/_refinements/architecture-design.patch.md +3 -0
- package/scripts/skills/generated/_refinements/brainstorming.patch.md +3 -0
- package/scripts/skills/generated/cli-version-management.md +29 -0
- package/scripts/skills/generated/collection-system-validation.md +99 -0
- package/scripts/skills/generated/large-scale-c-refactoring-coordination.md +97 -0
- package/scripts/skills/generated/pattern-matching-similarity-scoring.md +195 -0
- package/scripts/skills/generated/test-parallelization-detection.md +65 -0
- package/scripts/skills/observability.md +79 -0
- package/scripts/skills/performance.md +48 -0
- package/scripts/skills/pr-quality.md +49 -0
- package/scripts/skills/product-thinking.md +43 -0
- package/scripts/skills/security-audit.md +49 -0
- package/scripts/skills/systematic-debugging.md +40 -0
- package/scripts/skills/testing-strategy.md +47 -0
- package/scripts/skills/two-stage-review.md +52 -0
- package/scripts/skills/validation-thoroughness.md +55 -0
- package/scripts/sw +9 -3
- package/scripts/sw-activity.sh +9 -2
- package/scripts/sw-adaptive.sh +2 -1
- package/scripts/sw-adversarial.sh +2 -1
- package/scripts/sw-architecture-enforcer.sh +3 -1
- package/scripts/sw-auth.sh +12 -2
- package/scripts/sw-autonomous.sh +5 -1
- package/scripts/sw-changelog.sh +4 -1
- package/scripts/sw-checkpoint.sh +2 -1
- package/scripts/sw-ci.sh +5 -1
- package/scripts/sw-cleanup.sh +4 -26
- package/scripts/sw-code-review.sh +10 -4
- package/scripts/sw-connect.sh +2 -1
- package/scripts/sw-context.sh +2 -1
- package/scripts/sw-cost.sh +48 -3
- package/scripts/sw-daemon.sh +66 -9
- package/scripts/sw-dashboard.sh +3 -1
- package/scripts/sw-db.sh +59 -16
- package/scripts/sw-decide.sh +8 -2
- package/scripts/sw-decompose.sh +360 -17
- package/scripts/sw-deps.sh +4 -1
- package/scripts/sw-developer-simulation.sh +4 -1
- package/scripts/sw-discovery.sh +325 -2
- package/scripts/sw-doc-fleet.sh +4 -1
- package/scripts/sw-docs-agent.sh +3 -1
- package/scripts/sw-docs.sh +2 -1
- package/scripts/sw-doctor.sh +453 -2
- package/scripts/sw-dora.sh +4 -1
- package/scripts/sw-durable.sh +4 -3
- package/scripts/sw-e2e-orchestrator.sh +17 -16
- package/scripts/sw-eventbus.sh +7 -1
- package/scripts/sw-evidence.sh +364 -12
- package/scripts/sw-feedback.sh +550 -9
- package/scripts/sw-fix.sh +20 -1
- package/scripts/sw-fleet-discover.sh +6 -2
- package/scripts/sw-fleet-viz.sh +4 -1
- package/scripts/sw-fleet.sh +5 -1
- package/scripts/sw-github-app.sh +16 -3
- package/scripts/sw-github-checks.sh +3 -2
- package/scripts/sw-github-deploy.sh +3 -2
- package/scripts/sw-github-graphql.sh +18 -7
- package/scripts/sw-guild.sh +5 -1
- package/scripts/sw-heartbeat.sh +5 -30
- package/scripts/sw-hello.sh +67 -0
- package/scripts/sw-hygiene.sh +6 -1
- package/scripts/sw-incident.sh +265 -1
- package/scripts/sw-init.sh +18 -2
- package/scripts/sw-instrument.sh +10 -2
- package/scripts/sw-intelligence.sh +42 -6
- package/scripts/sw-jira.sh +5 -1
- package/scripts/sw-launchd.sh +2 -1
- package/scripts/sw-linear.sh +4 -1
- package/scripts/sw-logs.sh +4 -1
- package/scripts/sw-loop.sh +432 -1128
- package/scripts/sw-memory.sh +356 -2
- package/scripts/sw-mission-control.sh +6 -1
- package/scripts/sw-model-router.sh +481 -26
- package/scripts/sw-otel.sh +13 -4
- package/scripts/sw-oversight.sh +14 -5
- package/scripts/sw-patrol-meta.sh +334 -0
- package/scripts/sw-pipeline-composer.sh +5 -1
- package/scripts/sw-pipeline-vitals.sh +2 -1
- package/scripts/sw-pipeline.sh +53 -2664
- package/scripts/sw-pm.sh +12 -5
- package/scripts/sw-pr-lifecycle.sh +2 -1
- package/scripts/sw-predictive.sh +7 -1
- package/scripts/sw-prep.sh +185 -2
- package/scripts/sw-ps.sh +5 -25
- package/scripts/sw-public-dashboard.sh +15 -3
- package/scripts/sw-quality.sh +2 -1
- package/scripts/sw-reaper.sh +8 -25
- package/scripts/sw-recruit.sh +156 -2303
- package/scripts/sw-regression.sh +19 -12
- package/scripts/sw-release-manager.sh +3 -1
- package/scripts/sw-release.sh +4 -1
- package/scripts/sw-remote.sh +3 -1
- package/scripts/sw-replay.sh +7 -1
- package/scripts/sw-retro.sh +158 -1
- package/scripts/sw-review-rerun.sh +3 -1
- package/scripts/sw-scale.sh +10 -3
- package/scripts/sw-security-audit.sh +6 -1
- package/scripts/sw-self-optimize.sh +6 -3
- package/scripts/sw-session.sh +9 -3
- package/scripts/sw-setup.sh +3 -1
- package/scripts/sw-stall-detector.sh +406 -0
- package/scripts/sw-standup.sh +15 -7
- package/scripts/sw-status.sh +3 -1
- package/scripts/sw-strategic.sh +4 -1
- package/scripts/sw-stream.sh +7 -1
- package/scripts/sw-swarm.sh +18 -6
- package/scripts/sw-team-stages.sh +13 -6
- package/scripts/sw-templates.sh +5 -29
- package/scripts/sw-testgen.sh +7 -1
- package/scripts/sw-tmux-pipeline.sh +4 -1
- package/scripts/sw-tmux-role-color.sh +2 -0
- package/scripts/sw-tmux-status.sh +1 -1
- package/scripts/sw-tmux.sh +3 -1
- package/scripts/sw-trace.sh +3 -1
- package/scripts/sw-tracker-github.sh +3 -0
- package/scripts/sw-tracker-jira.sh +3 -0
- package/scripts/sw-tracker-linear.sh +3 -0
- package/scripts/sw-tracker.sh +3 -1
- package/scripts/sw-triage.sh +2 -1
- package/scripts/sw-upgrade.sh +3 -1
- package/scripts/sw-ux.sh +5 -2
- package/scripts/sw-webhook.sh +3 -1
- package/scripts/sw-widgets.sh +3 -1
- package/scripts/sw-worktree.sh +15 -3
- package/scripts/test-skill-injection.sh +1233 -0
- package/templates/pipelines/autonomous.json +27 -3
- package/templates/pipelines/cost-aware.json +34 -8
- package/templates/pipelines/deployed.json +12 -0
- package/templates/pipelines/enterprise.json +12 -0
- package/templates/pipelines/fast.json +6 -0
- package/templates/pipelines/full.json +27 -3
- package/templates/pipelines/hotfix.json +6 -0
- package/templates/pipelines/standard.json +12 -0
- package/templates/pipelines/tdd.json +12 -0
|
@@ -0,0 +1,1133 @@
|
|
|
1
|
+
# pipeline-stages-intake.sh — intake, plan, design stages
|
|
2
|
+
# Source from pipeline-stages.sh. Requires all pipeline globals and dependencies.
|
|
3
|
+
[[ -n "${_PIPELINE_STAGES_INTAKE_LOADED:-}" ]] && return 0
|
|
4
|
+
_PIPELINE_STAGES_INTAKE_LOADED=1
|
|
5
|
+
|
|
6
|
+
# Load intent analysis module for acceptance criteria generation
|
|
7
|
+
SCRIPT_DIR="${SCRIPT_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)}"
|
|
8
|
+
source "${SCRIPT_DIR}/lib/intent-analysis.sh" 2>/dev/null || true
|
|
9
|
+
|
|
10
|
+
stage_intake() {
|
|
11
|
+
CURRENT_STAGE_ID="intake"
|
|
12
|
+
local project_lang
|
|
13
|
+
project_lang=$(detect_project_lang)
|
|
14
|
+
info "Project: ${BOLD}$project_lang${RESET}"
|
|
15
|
+
|
|
16
|
+
# 1. Fetch issue metadata if --issue provided
|
|
17
|
+
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
18
|
+
local meta
|
|
19
|
+
meta=$(gh_get_issue_meta "$ISSUE_NUMBER")
|
|
20
|
+
|
|
21
|
+
# Validate JSON before processing
|
|
22
|
+
if [[ -n "$meta" ]] && jq empty <<< "$meta" 2>/dev/null; then
|
|
23
|
+
GOAL=$(echo "$meta" | jq -r '.title // ""' 2>/dev/null) || GOAL=""
|
|
24
|
+
ISSUE_BODY=$(echo "$meta" | jq -r '.body // ""' 2>/dev/null) || ISSUE_BODY=""
|
|
25
|
+
ISSUE_LABELS=$(echo "$meta" | jq -r '[.labels[].name] | join(",")' 2>/dev/null) || ISSUE_LABELS=""
|
|
26
|
+
ISSUE_MILESTONE=$(echo "$meta" | jq -r '.milestone.title // ""' 2>/dev/null || true)
|
|
27
|
+
ISSUE_ASSIGNEES=$(echo "$meta" | jq -r '[.assignees[].login] | join(",")' 2>/dev/null || true)
|
|
28
|
+
[[ "$ISSUE_MILESTONE" == "null" ]] && ISSUE_MILESTONE=""
|
|
29
|
+
[[ "$ISSUE_LABELS" == "null" ]] && ISSUE_LABELS=""
|
|
30
|
+
# Export for use by intelligence skip functions
|
|
31
|
+
export ISSUE_LABELS
|
|
32
|
+
else
|
|
33
|
+
# Fallback: just get title
|
|
34
|
+
GOAL=$(gh issue view "$ISSUE_NUMBER" --json title --jq '.title' 2>/dev/null) || {
|
|
35
|
+
error "Failed to fetch issue #$ISSUE_NUMBER"
|
|
36
|
+
return 1
|
|
37
|
+
}
|
|
38
|
+
fi
|
|
39
|
+
|
|
40
|
+
GITHUB_ISSUE="#$ISSUE_NUMBER"
|
|
41
|
+
info "Issue #$ISSUE_NUMBER: ${BOLD}$GOAL${RESET}"
|
|
42
|
+
|
|
43
|
+
if [[ -n "$ISSUE_LABELS" ]]; then
|
|
44
|
+
info "Labels: ${DIM}$ISSUE_LABELS${RESET}"
|
|
45
|
+
fi
|
|
46
|
+
if [[ -n "$ISSUE_MILESTONE" ]]; then
|
|
47
|
+
info "Milestone: ${DIM}$ISSUE_MILESTONE${RESET}"
|
|
48
|
+
fi
|
|
49
|
+
|
|
50
|
+
# Self-assign
|
|
51
|
+
gh_assign_self "$ISSUE_NUMBER"
|
|
52
|
+
|
|
53
|
+
# Add in-progress label
|
|
54
|
+
gh_add_labels "$ISSUE_NUMBER" "pipeline/in-progress"
|
|
55
|
+
|
|
56
|
+
# 1a. Generate acceptance criteria from issue intent analysis
|
|
57
|
+
if type analyze_intent >/dev/null 2>&1; then
|
|
58
|
+
info "Analyzing issue intent and generating acceptance criteria..."
|
|
59
|
+
if analyze_intent "$GOAL" "$ISSUE_BODY" "$ISSUE_LABELS" "$ARTIFACTS_DIR" 2>/dev/null; then
|
|
60
|
+
local criteria_preview
|
|
61
|
+
criteria_preview=$(format_acceptance_criteria_for_prompt "$ARTIFACTS_DIR" 2>/dev/null | head -10 || true)
|
|
62
|
+
if [[ -n "$criteria_preview" ]]; then
|
|
63
|
+
info "Acceptance criteria generated: ${DIM}acceptance-criteria.json${RESET}"
|
|
64
|
+
fi
|
|
65
|
+
fi
|
|
66
|
+
fi
|
|
67
|
+
fi
|
|
68
|
+
|
|
69
|
+
# 2. Detect task type
|
|
70
|
+
TASK_TYPE=$(detect_task_type "$GOAL")
|
|
71
|
+
local suggested_template
|
|
72
|
+
suggested_template=$(template_for_type "$TASK_TYPE")
|
|
73
|
+
info "Detected: ${BOLD}$TASK_TYPE${RESET} → team template: ${CYAN}$suggested_template${RESET}"
|
|
74
|
+
|
|
75
|
+
# 3. Auto-detect test command if not provided
|
|
76
|
+
if [[ -z "$TEST_CMD" ]]; then
|
|
77
|
+
TEST_CMD=$(detect_test_cmd)
|
|
78
|
+
if [[ -n "$TEST_CMD" ]]; then
|
|
79
|
+
info "Auto-detected test: ${DIM}$TEST_CMD${RESET}"
|
|
80
|
+
fi
|
|
81
|
+
fi
|
|
82
|
+
|
|
83
|
+
# 4. Create branch with smart prefix
|
|
84
|
+
local prefix
|
|
85
|
+
prefix=$(branch_prefix_for_type "$TASK_TYPE")
|
|
86
|
+
local slug
|
|
87
|
+
slug=$(echo "$GOAL" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/--*/-/g' | cut -c1-40)
|
|
88
|
+
slug="${slug%-}"
|
|
89
|
+
[[ -n "$ISSUE_NUMBER" ]] && slug="${slug}-${ISSUE_NUMBER}"
|
|
90
|
+
GIT_BRANCH="${prefix}/${slug}"
|
|
91
|
+
|
|
92
|
+
git checkout -b "$GIT_BRANCH" 2>/dev/null || {
|
|
93
|
+
info "Branch $GIT_BRANCH exists, checking out"
|
|
94
|
+
git checkout "$GIT_BRANCH" 2>/dev/null || true
|
|
95
|
+
}
|
|
96
|
+
success "Branch: ${BOLD}$GIT_BRANCH${RESET}"
|
|
97
|
+
|
|
98
|
+
# 5. Post initial progress comment on GitHub issue
|
|
99
|
+
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
100
|
+
local body
|
|
101
|
+
body=$(gh_build_progress_body)
|
|
102
|
+
gh_post_progress "$ISSUE_NUMBER" "$body"
|
|
103
|
+
fi
|
|
104
|
+
|
|
105
|
+
# 6. Save artifacts
|
|
106
|
+
save_artifact "intake.json" "$(jq -n \
|
|
107
|
+
--arg goal "$GOAL" --arg type "$TASK_TYPE" \
|
|
108
|
+
--arg template "$suggested_template" --arg branch "$GIT_BRANCH" \
|
|
109
|
+
--arg issue "${GITHUB_ISSUE:-}" --arg lang "$project_lang" \
|
|
110
|
+
--arg test_cmd "${TEST_CMD:-}" --arg labels "${ISSUE_LABELS:-}" \
|
|
111
|
+
--arg milestone "${ISSUE_MILESTONE:-}" --arg body "${ISSUE_BODY:-}" \
|
|
112
|
+
'{goal:$goal, type:$type, template:$template, branch:$branch,
|
|
113
|
+
issue:$issue, language:$lang, test_cmd:$test_cmd,
|
|
114
|
+
labels:$labels, milestone:$milestone, body:$body}' 2>/dev/null)" || true
|
|
115
|
+
|
|
116
|
+
# 7. AI-powered skill analysis (replaces static classification when available)
|
|
117
|
+
if type skill_analyze_issue >/dev/null 2>&1; then
|
|
118
|
+
local _intel_json=""
|
|
119
|
+
[[ -f "$ARTIFACTS_DIR/intelligence-analysis.json" ]] && _intel_json=$(cat "$ARTIFACTS_DIR/intelligence-analysis.json" 2>/dev/null || true)
|
|
120
|
+
|
|
121
|
+
if skill_analyze_issue "$GOAL" "${ISSUE_BODY:-}" "${ISSUE_LABELS:-}" "$ARTIFACTS_DIR" "$_intel_json" 2>/dev/null; then
|
|
122
|
+
info "Skill analysis: AI-powered skill plan written to skill-plan.json"
|
|
123
|
+
# INTELLIGENCE_ISSUE_TYPE and INTELLIGENCE_COMPLEXITY are updated by skill_analyze_issue
|
|
124
|
+
else
|
|
125
|
+
info "Skill analysis: LLM unavailable — using label-based classification"
|
|
126
|
+
fi
|
|
127
|
+
fi
|
|
128
|
+
|
|
129
|
+
# 8. Generate structured specification (dark factory: spec-driven development)
|
|
130
|
+
if type spec_generate >/dev/null 2>&1; then
|
|
131
|
+
SPEC_DIR="${ARTIFACTS_DIR}/specs"
|
|
132
|
+
local spec_file
|
|
133
|
+
spec_file=$(spec_generate "$GOAL" "${ISSUE_BODY:-}" "${ISSUE_NUMBER:-}" "" "$project_lang" 2>/dev/null) || true
|
|
134
|
+
if [[ -n "$spec_file" && -f "$spec_file" ]]; then
|
|
135
|
+
info "Spec generated: ${DIM}$(basename "$spec_file")${RESET}"
|
|
136
|
+
save_artifact "spec.json" "$(cat "$spec_file")" || true
|
|
137
|
+
fi
|
|
138
|
+
fi
|
|
139
|
+
|
|
140
|
+
# 9. Partition tests for holdout validation (dark factory: test-as-holdout)
|
|
141
|
+
if type holdout_partition >/dev/null 2>&1; then
|
|
142
|
+
HOLDOUT_DIR="${ARTIFACTS_DIR}/test-holdout"
|
|
143
|
+
if holdout_partition "." "$project_lang" 2>/dev/null; then
|
|
144
|
+
holdout_seal "." 2>/dev/null || true
|
|
145
|
+
info "Test holdout: ${HOLDOUT_VISIBLE_COUNT:-0} visible, ${HOLDOUT_SEALED_COUNT:-0} sealed"
|
|
146
|
+
fi
|
|
147
|
+
fi
|
|
148
|
+
|
|
149
|
+
log_stage "intake" "Goal: $GOAL
|
|
150
|
+
Type: $TASK_TYPE → template: $suggested_template
|
|
151
|
+
Branch: $GIT_BRANCH
|
|
152
|
+
Language: $project_lang
|
|
153
|
+
Test cmd: ${TEST_CMD:-none detected}
|
|
154
|
+
Issue type: ${INTELLIGENCE_ISSUE_TYPE:-backend}"
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
# ─── Spec Generation Stage (dark factory: spec-driven development) ──────────
|
|
158
|
+
# Runs between intake and plan. Enhances auto-generated spec with Claude,
|
|
159
|
+
# adding missing acceptance criteria, edge cases, security requirements,
|
|
160
|
+
# and affected files estimation.
|
|
161
|
+
stage_spec_generation() {
|
|
162
|
+
CURRENT_STAGE_ID="spec_generation"
|
|
163
|
+
|
|
164
|
+
# Check if spec-driven is disabled via env or daemon config
|
|
165
|
+
if [[ "${SPEC_DRIVEN_ENABLED:-true}" == "false" ]]; then
|
|
166
|
+
info "Spec-driven development disabled — skipping spec generation"
|
|
167
|
+
return 0
|
|
168
|
+
fi
|
|
169
|
+
|
|
170
|
+
local spec_file="${ARTIFACTS_DIR}/spec.json"
|
|
171
|
+
|
|
172
|
+
# If no spec was generated at intake, generate a basic one now
|
|
173
|
+
if [[ ! -f "$spec_file" ]]; then
|
|
174
|
+
if type spec_generate >/dev/null 2>&1 && [[ -n "${GOAL:-}" ]]; then
|
|
175
|
+
local project_lang
|
|
176
|
+
project_lang=$(detect_project_lang 2>/dev/null || echo "unknown")
|
|
177
|
+
SPEC_DIR="${ARTIFACTS_DIR}/specs"
|
|
178
|
+
local generated
|
|
179
|
+
generated=$(spec_generate "$GOAL" "${ISSUE_BODY:-}" "${ISSUE_NUMBER:-}" "" "$project_lang" 2>/dev/null) || true
|
|
180
|
+
if [[ -n "$generated" && -f "$generated" ]]; then
|
|
181
|
+
save_artifact "spec.json" "$(cat "$generated")" || true
|
|
182
|
+
spec_file="${ARTIFACTS_DIR}/spec.json"
|
|
183
|
+
fi
|
|
184
|
+
fi
|
|
185
|
+
fi
|
|
186
|
+
|
|
187
|
+
if [[ ! -f "$spec_file" ]]; then
|
|
188
|
+
warn "No spec available — skipping spec generation enhancement"
|
|
189
|
+
log_stage "spec_generation" "Skipped: no spec available"
|
|
190
|
+
return 0
|
|
191
|
+
fi
|
|
192
|
+
|
|
193
|
+
# Enhance spec with Claude if available
|
|
194
|
+
if command -v claude >/dev/null 2>&1; then
|
|
195
|
+
info "Enhancing specification with AI analysis..."
|
|
196
|
+
|
|
197
|
+
local current_spec
|
|
198
|
+
current_spec=$(cat "$spec_file")
|
|
199
|
+
|
|
200
|
+
local enhance_prompt="You are a specification analyst. Given the following auto-generated specification and issue context, enhance it by:
|
|
201
|
+
|
|
202
|
+
1. Adding missing acceptance criteria (each must have 'criterion', 'testable' (bool), and 'verification_method' fields)
|
|
203
|
+
2. Adding edge cases (each must have 'scenario' and 'expected_behavior' fields)
|
|
204
|
+
3. Adding security requirements (list of strings)
|
|
205
|
+
4. Estimating affected files (list of file path strings)
|
|
206
|
+
|
|
207
|
+
Return ONLY valid JSON matching the same schema as the input spec. Do not add commentary.
|
|
208
|
+
|
|
209
|
+
## Current Spec
|
|
210
|
+
${current_spec}
|
|
211
|
+
|
|
212
|
+
## Issue Context
|
|
213
|
+
Goal: ${GOAL:-}
|
|
214
|
+
Body: ${ISSUE_BODY:-}
|
|
215
|
+
Labels: ${ISSUE_LABELS:-}
|
|
216
|
+
"
|
|
217
|
+
|
|
218
|
+
local enhanced_spec=""
|
|
219
|
+
local spec_model="${MODEL:-sonnet}"
|
|
220
|
+
enhanced_spec=$(claude -p "$enhance_prompt" --model "$spec_model" --output-format json 2>/dev/null) || true
|
|
221
|
+
|
|
222
|
+
if [[ -n "$enhanced_spec" ]]; then
|
|
223
|
+
# Validate the enhanced spec is valid JSON with required fields
|
|
224
|
+
if echo "$enhanced_spec" | jq -e '.version and .title and .goals' >/dev/null 2>&1; then
|
|
225
|
+
# Preserve metadata from original, update timestamp
|
|
226
|
+
enhanced_spec=$(echo "$enhanced_spec" | jq --arg ts "$(now_iso)" '
|
|
227
|
+
.metadata.enhanced_at = $ts |
|
|
228
|
+
.metadata.enhanced = true
|
|
229
|
+
' 2>/dev/null) || enhanced_spec=""
|
|
230
|
+
|
|
231
|
+
if [[ -n "$enhanced_spec" ]]; then
|
|
232
|
+
save_artifact "spec.json" "$enhanced_spec" || true
|
|
233
|
+
success "Spec enhanced with AI analysis"
|
|
234
|
+
fi
|
|
235
|
+
else
|
|
236
|
+
warn "AI-enhanced spec failed validation — keeping original"
|
|
237
|
+
fi
|
|
238
|
+
else
|
|
239
|
+
info "Claude unavailable or returned empty — keeping original spec"
|
|
240
|
+
fi
|
|
241
|
+
else
|
|
242
|
+
info "Claude CLI not available — keeping original spec"
|
|
243
|
+
fi
|
|
244
|
+
|
|
245
|
+
# Validate final spec
|
|
246
|
+
if type spec_validate >/dev/null 2>&1; then
|
|
247
|
+
if ! spec_validate "$spec_file" 2>/dev/null; then
|
|
248
|
+
warn "Spec validation failed after enhancement"
|
|
249
|
+
# Retry: regenerate from scratch if validation failed
|
|
250
|
+
if type spec_generate >/dev/null 2>&1 && [[ -n "${GOAL:-}" ]]; then
|
|
251
|
+
info "Retrying spec generation from scratch..."
|
|
252
|
+
local project_lang
|
|
253
|
+
project_lang=$(detect_project_lang 2>/dev/null || echo "unknown")
|
|
254
|
+
SPEC_DIR="${ARTIFACTS_DIR}/specs"
|
|
255
|
+
local regen
|
|
256
|
+
regen=$(spec_generate "$GOAL" "${ISSUE_BODY:-}" "${ISSUE_NUMBER:-}" "" "$project_lang" 2>/dev/null) || true
|
|
257
|
+
if [[ -n "$regen" && -f "$regen" ]]; then
|
|
258
|
+
save_artifact "spec.json" "$(cat "$regen")" || true
|
|
259
|
+
success "Spec regenerated successfully"
|
|
260
|
+
fi
|
|
261
|
+
fi
|
|
262
|
+
fi
|
|
263
|
+
fi
|
|
264
|
+
|
|
265
|
+
# Count spec richness for logging
|
|
266
|
+
local criteria_count edge_count security_count affected_count
|
|
267
|
+
criteria_count=$(jq '.acceptance_criteria | length' "$spec_file" 2>/dev/null || echo "0")
|
|
268
|
+
edge_count=$(jq '.edge_cases | length' "$spec_file" 2>/dev/null || echo "0")
|
|
269
|
+
security_count=$(jq '.security_requirements | length' "$spec_file" 2>/dev/null || echo "0")
|
|
270
|
+
affected_count=$(jq '.affected_files | length' "$spec_file" 2>/dev/null || echo "0")
|
|
271
|
+
|
|
272
|
+
emit_event "spec_generation.completed" \
|
|
273
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
274
|
+
"criteria=$criteria_count" \
|
|
275
|
+
"edge_cases=$edge_count" \
|
|
276
|
+
"security=$security_count" \
|
|
277
|
+
"affected_files=$affected_count"
|
|
278
|
+
|
|
279
|
+
log_stage "spec_generation" "Spec: ${criteria_count} criteria, ${edge_count} edge cases, ${security_count} security reqs, ${affected_count} affected files"
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
stage_plan() {
|
|
283
|
+
CURRENT_STAGE_ID="plan"
|
|
284
|
+
# Consume retry context if this is a retry attempt
|
|
285
|
+
local _retry_ctx="${ARTIFACTS_DIR}/.retry-context-plan.md"
|
|
286
|
+
local _retry_hints=""
|
|
287
|
+
if [[ -s "$_retry_ctx" ]]; then
|
|
288
|
+
_retry_hints=$(cat "$_retry_ctx" 2>/dev/null || true)
|
|
289
|
+
rm -f "$_retry_ctx" # consumed
|
|
290
|
+
fi
|
|
291
|
+
local plan_file="$ARTIFACTS_DIR/plan.md"
|
|
292
|
+
|
|
293
|
+
if ! command -v claude >/dev/null 2>&1; then
|
|
294
|
+
error "Claude CLI not found — cannot generate plan"
|
|
295
|
+
return 1
|
|
296
|
+
fi
|
|
297
|
+
|
|
298
|
+
info "Generating implementation plan..."
|
|
299
|
+
|
|
300
|
+
# Dark factory: inject RL approach suggestions based on past episodes
|
|
301
|
+
if type rl_suggest_approach >/dev/null 2>&1; then
|
|
302
|
+
local _rl_lang="${INTELLIGENCE_LANGUAGE:-unknown}"
|
|
303
|
+
local _rl_complexity="${INTELLIGENCE_COMPLEXITY:-medium}"
|
|
304
|
+
local _rl_type="${INTELLIGENCE_ISSUE_TYPE:-feature}"
|
|
305
|
+
local _rl_suggestions
|
|
306
|
+
_rl_suggestions=$(rl_suggest_approach "$_rl_lang" "$_rl_type" "$_rl_complexity" 2>/dev/null || true)
|
|
307
|
+
if [[ -n "$_rl_suggestions" ]]; then
|
|
308
|
+
info "RL suggests: $(echo "$_rl_suggestions" | head -1)"
|
|
309
|
+
fi
|
|
310
|
+
fi
|
|
311
|
+
|
|
312
|
+
# ── Gather context bundle (if context engine available) ──
|
|
313
|
+
local context_script="${SCRIPT_DIR}/sw-context.sh"
|
|
314
|
+
if [[ -x "$context_script" ]]; then
|
|
315
|
+
"$context_script" gather --goal "$GOAL" --stage plan 2>/dev/null || true
|
|
316
|
+
fi
|
|
317
|
+
|
|
318
|
+
# Gather rich architecture context (call-graph, dependencies)
|
|
319
|
+
local arch_context=""
|
|
320
|
+
if type gather_architecture_context &>/dev/null; then
|
|
321
|
+
arch_context=$(gather_architecture_context "${PROJECT_ROOT:-.}" 2>/dev/null || true)
|
|
322
|
+
fi
|
|
323
|
+
|
|
324
|
+
# Build rich prompt with all available context
|
|
325
|
+
local plan_prompt="You are an autonomous development agent. Analyze this codebase and create a detailed implementation plan.
|
|
326
|
+
|
|
327
|
+
## Goal
|
|
328
|
+
${GOAL}
|
|
329
|
+
"
|
|
330
|
+
|
|
331
|
+
# Add issue context
|
|
332
|
+
if [[ -n "$ISSUE_BODY" ]]; then
|
|
333
|
+
plan_prompt="${plan_prompt}
|
|
334
|
+
## Issue Description
|
|
335
|
+
${ISSUE_BODY}
|
|
336
|
+
"
|
|
337
|
+
fi
|
|
338
|
+
|
|
339
|
+
# Inject architecture context (import graph, modules, test map)
|
|
340
|
+
if [[ -n "$arch_context" ]]; then
|
|
341
|
+
arch_context=$(prune_context_section "architecture" "$arch_context" 5000)
|
|
342
|
+
plan_prompt="${plan_prompt}
|
|
343
|
+
## Architecture Context
|
|
344
|
+
${arch_context}
|
|
345
|
+
"
|
|
346
|
+
fi
|
|
347
|
+
|
|
348
|
+
# Inject context bundle from context engine (if available)
|
|
349
|
+
local _context_bundle="${ARTIFACTS_DIR}/context-bundle.md"
|
|
350
|
+
if [[ -f "$_context_bundle" ]]; then
|
|
351
|
+
local _cb_content
|
|
352
|
+
_cb_content=$(cat "$_context_bundle" 2>/dev/null | head -100 || true)
|
|
353
|
+
_cb_content=$(prune_context_section "context-bundle" "$_cb_content" 8000)
|
|
354
|
+
if [[ -n "$_cb_content" ]]; then
|
|
355
|
+
plan_prompt="${plan_prompt}
|
|
356
|
+
## Pipeline Context
|
|
357
|
+
${_cb_content}
|
|
358
|
+
"
|
|
359
|
+
fi
|
|
360
|
+
fi
|
|
361
|
+
|
|
362
|
+
# Inject intelligence memory context for similar past plans
|
|
363
|
+
if type intelligence_search_memory >/dev/null 2>&1; then
|
|
364
|
+
local plan_memory
|
|
365
|
+
plan_memory=$(intelligence_search_memory "plan stage for ${TASK_TYPE:-feature}: ${GOAL:-}" "${HOME}/.shipwright/memory" 5 2>/dev/null) || true
|
|
366
|
+
if [[ -n "$plan_memory" && "$plan_memory" != *'"results":[]'* && "$plan_memory" != *'"error"'* ]]; then
|
|
367
|
+
local memory_summary
|
|
368
|
+
memory_summary=$(echo "$plan_memory" | jq -r '.results[]? | "- \(.)"' 2>/dev/null | head -10 || true)
|
|
369
|
+
memory_summary=$(prune_context_section "memory" "$memory_summary" 10000)
|
|
370
|
+
if [[ -n "$memory_summary" ]]; then
|
|
371
|
+
plan_prompt="${plan_prompt}
|
|
372
|
+
## Historical Context (from previous pipelines)
|
|
373
|
+
Previous similar issues were planned as:
|
|
374
|
+
${memory_summary}
|
|
375
|
+
"
|
|
376
|
+
fi
|
|
377
|
+
fi
|
|
378
|
+
fi
|
|
379
|
+
|
|
380
|
+
# Self-aware pipeline: inject hint when plan stage has been failing recently
|
|
381
|
+
local plan_hint
|
|
382
|
+
plan_hint=$(get_stage_self_awareness_hint "plan" 2>/dev/null || true)
|
|
383
|
+
if [[ -n "$plan_hint" ]]; then
|
|
384
|
+
plan_prompt="${plan_prompt}
|
|
385
|
+
## Self-Assessment (recent plan stage performance)
|
|
386
|
+
${plan_hint}
|
|
387
|
+
"
|
|
388
|
+
fi
|
|
389
|
+
|
|
390
|
+
# Inject retry context from previous failed attempt
|
|
391
|
+
if [[ -n "$_retry_hints" ]]; then
|
|
392
|
+
plan_prompt="${plan_prompt}
|
|
393
|
+
## Previous Attempt Analysis (RETRY)
|
|
394
|
+
This stage previously failed. Analysis of the failure:
|
|
395
|
+
${_retry_hints}
|
|
396
|
+
|
|
397
|
+
Use this analysis to avoid repeating the same mistake. Address the root cause in your approach.
|
|
398
|
+
"
|
|
399
|
+
fi
|
|
400
|
+
|
|
401
|
+
# Inject cross-pipeline discoveries (from other concurrent/similar pipelines)
|
|
402
|
+
if [[ -x "$SCRIPT_DIR/sw-discovery.sh" ]]; then
|
|
403
|
+
local plan_discoveries
|
|
404
|
+
plan_discoveries=$("$SCRIPT_DIR/sw-discovery.sh" inject "*.md,*.json" 2>/dev/null | head -20 || true)
|
|
405
|
+
plan_discoveries=$(prune_context_section "discoveries" "$plan_discoveries" 3000)
|
|
406
|
+
if [[ -n "$plan_discoveries" ]]; then
|
|
407
|
+
plan_prompt="${plan_prompt}
|
|
408
|
+
## Discoveries from Other Pipelines
|
|
409
|
+
${plan_discoveries}
|
|
410
|
+
"
|
|
411
|
+
fi
|
|
412
|
+
fi
|
|
413
|
+
|
|
414
|
+
# Inject architecture patterns from intelligence layer
|
|
415
|
+
local repo_hash_plan
|
|
416
|
+
repo_hash_plan=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
|
|
417
|
+
local arch_file_plan="${HOME}/.shipwright/memory/${repo_hash_plan}/architecture.json"
|
|
418
|
+
if [[ -f "$arch_file_plan" ]]; then
|
|
419
|
+
local arch_patterns
|
|
420
|
+
arch_patterns=$(jq -r '
|
|
421
|
+
"Language: \(.language // "unknown")",
|
|
422
|
+
"Framework: \(.framework // "unknown")",
|
|
423
|
+
"Patterns: \((.patterns // []) | join(", "))",
|
|
424
|
+
"Rules: \((.rules // []) | join("; "))"
|
|
425
|
+
' "$arch_file_plan" 2>/dev/null || true)
|
|
426
|
+
arch_patterns=$(prune_context_section "intelligence" "$arch_patterns" 5000)
|
|
427
|
+
if [[ -n "$arch_patterns" ]]; then
|
|
428
|
+
plan_prompt="${plan_prompt}
|
|
429
|
+
## Architecture Patterns
|
|
430
|
+
${arch_patterns}
|
|
431
|
+
"
|
|
432
|
+
fi
|
|
433
|
+
fi
|
|
434
|
+
|
|
435
|
+
# Inject skill prompts based on issue type classification
|
|
436
|
+
# Prefer adaptive selection when available (combines body analysis + complexity weighting)
|
|
437
|
+
if type skill_select_adaptive >/dev/null 2>&1; then
|
|
438
|
+
local _skill_files _skill_prompts
|
|
439
|
+
_skill_files=$(skill_select_adaptive "${INTELLIGENCE_ISSUE_TYPE:-backend}" "plan" "${ISSUE_BODY:-}" "${INTELLIGENCE_COMPLEXITY:-5}" 2>/dev/null || true)
|
|
440
|
+
if [[ -n "$_skill_files" ]]; then
|
|
441
|
+
# Load content from skill files
|
|
442
|
+
_skill_prompts=$(while IFS= read -r _path; do
|
|
443
|
+
[[ -z "$_path" ]] && continue
|
|
444
|
+
[[ -f "$_path" ]] && cat "$_path" 2>/dev/null
|
|
445
|
+
done <<< "$_skill_files")
|
|
446
|
+
if [[ -n "$_skill_prompts" ]]; then
|
|
447
|
+
_skill_prompts=$(prune_context_section "skills" "$_skill_prompts" 8000)
|
|
448
|
+
plan_prompt="${plan_prompt}
|
|
449
|
+
## Skill Guidance (${INTELLIGENCE_ISSUE_TYPE:-backend} issue)
|
|
450
|
+
${_skill_prompts}
|
|
451
|
+
"
|
|
452
|
+
fi
|
|
453
|
+
fi
|
|
454
|
+
elif type skill_load_prompts >/dev/null 2>&1; then
|
|
455
|
+
# Fallback to static selection
|
|
456
|
+
local _skill_prompts
|
|
457
|
+
_skill_prompts=$(skill_load_prompts "${INTELLIGENCE_ISSUE_TYPE:-backend}" "plan" 2>/dev/null || true)
|
|
458
|
+
if [[ -n "$_skill_prompts" ]]; then
|
|
459
|
+
_skill_prompts=$(prune_context_section "skills" "$_skill_prompts" 8000)
|
|
460
|
+
plan_prompt="${plan_prompt}
|
|
461
|
+
## Skill Guidance (${INTELLIGENCE_ISSUE_TYPE:-backend} issue)
|
|
462
|
+
${_skill_prompts}
|
|
463
|
+
"
|
|
464
|
+
fi
|
|
465
|
+
fi
|
|
466
|
+
|
|
467
|
+
# Task-type-specific guidance
|
|
468
|
+
case "${TASK_TYPE:-feature}" in
|
|
469
|
+
bug)
|
|
470
|
+
plan_prompt="${plan_prompt}
|
|
471
|
+
## Task Type: Bug Fix
|
|
472
|
+
Focus on: reproducing the bug, identifying root cause, minimal targeted fix, regression tests.
|
|
473
|
+
" ;;
|
|
474
|
+
refactor)
|
|
475
|
+
plan_prompt="${plan_prompt}
|
|
476
|
+
## Task Type: Refactor
|
|
477
|
+
Focus on: preserving all existing behavior, incremental changes, comprehensive test coverage.
|
|
478
|
+
" ;;
|
|
479
|
+
security)
|
|
480
|
+
plan_prompt="${plan_prompt}
|
|
481
|
+
## Task Type: Security
|
|
482
|
+
Focus on: threat modeling, OWASP top 10, input validation, authentication/authorization.
|
|
483
|
+
" ;;
|
|
484
|
+
esac
|
|
485
|
+
|
|
486
|
+
# Add project context
|
|
487
|
+
local project_lang
|
|
488
|
+
project_lang=$(detect_project_lang)
|
|
489
|
+
plan_prompt="${plan_prompt}
|
|
490
|
+
## Project Context
|
|
491
|
+
- Language: ${project_lang}
|
|
492
|
+
- Test command: ${TEST_CMD:-not configured}
|
|
493
|
+
- Task type: ${TASK_TYPE:-feature}
|
|
494
|
+
|
|
495
|
+
## Context Efficiency
|
|
496
|
+
- Batch independent tool calls in parallel when possible
|
|
497
|
+
- Read specific file sections (offset/limit) instead of entire large files
|
|
498
|
+
- Use targeted grep searches — avoid scanning entire codebases into context
|
|
499
|
+
- Delegate multi-file analysis to subagents when available
|
|
500
|
+
|
|
501
|
+
## Required Output
|
|
502
|
+
Create a Markdown plan with these sections:
|
|
503
|
+
|
|
504
|
+
### Files to Modify
|
|
505
|
+
List every file to create or modify with full paths.
|
|
506
|
+
|
|
507
|
+
### Implementation Steps
|
|
508
|
+
Numbered steps in order of execution. Be specific about what code to write.
|
|
509
|
+
|
|
510
|
+
### Task Checklist
|
|
511
|
+
A checkbox list of discrete tasks that can be tracked:
|
|
512
|
+
- [ ] Task 1: Description
|
|
513
|
+
- [ ] Task 2: Description
|
|
514
|
+
(Include 5-15 tasks covering the full implementation)
|
|
515
|
+
|
|
516
|
+
### Testing Approach
|
|
517
|
+
How to verify the implementation works.
|
|
518
|
+
|
|
519
|
+
### Definition of Done
|
|
520
|
+
Checklist of completion criteria.
|
|
521
|
+
"
|
|
522
|
+
|
|
523
|
+
# Inject acceptance criteria as constraints
|
|
524
|
+
if type format_acceptance_criteria_for_prompt >/dev/null 2>&1; then
|
|
525
|
+
local criteria_section
|
|
526
|
+
criteria_section=$(format_acceptance_criteria_for_prompt "$ARTIFACTS_DIR" 2>/dev/null || true)
|
|
527
|
+
if [[ -n "$criteria_section" ]]; then
|
|
528
|
+
plan_prompt="${plan_prompt}
|
|
529
|
+
## Acceptance Criteria (External Constraints)
|
|
530
|
+
${criteria_section}
|
|
531
|
+
"
|
|
532
|
+
fi
|
|
533
|
+
fi
|
|
534
|
+
|
|
535
|
+
# Inject mandatory failure mode analysis requirement
|
|
536
|
+
if type inject_failure_mode_analysis >/dev/null 2>&1; then
|
|
537
|
+
local quality_profile_path="${PROJECT_ROOT:-.}/.claude/quality-profile.json"
|
|
538
|
+
plan_prompt=$(inject_failure_mode_analysis "$plan_prompt" "$quality_profile_path" 2>/dev/null || echo "$plan_prompt")
|
|
539
|
+
fi
|
|
540
|
+
|
|
541
|
+
# Inject skill prompts — prefer AI-powered plan, fallback to adaptive, then static
|
|
542
|
+
local _skill_prompts=""
|
|
543
|
+
if type skill_load_from_plan >/dev/null 2>&1; then
|
|
544
|
+
_skill_prompts=$(skill_load_from_plan "plan" 2>/dev/null || true)
|
|
545
|
+
elif type skill_select_adaptive >/dev/null 2>&1; then
|
|
546
|
+
local _skill_files
|
|
547
|
+
_skill_files=$(skill_select_adaptive "${INTELLIGENCE_ISSUE_TYPE:-backend}" "plan" "${ISSUE_BODY:-}" "${INTELLIGENCE_COMPLEXITY:-5}" 2>/dev/null || true)
|
|
548
|
+
if [[ -n "$_skill_files" ]]; then
|
|
549
|
+
_skill_prompts=$(while IFS= read -r _path; do
|
|
550
|
+
[[ -z "$_path" || ! -f "$_path" ]] && continue
|
|
551
|
+
cat "$_path" 2>/dev/null
|
|
552
|
+
done <<< "$_skill_files")
|
|
553
|
+
fi
|
|
554
|
+
elif type skill_load_prompts >/dev/null 2>&1; then
|
|
555
|
+
_skill_prompts=$(skill_load_prompts "${INTELLIGENCE_ISSUE_TYPE:-backend}" "plan" 2>/dev/null || true)
|
|
556
|
+
fi
|
|
557
|
+
if [[ -n "$_skill_prompts" ]]; then
|
|
558
|
+
_skill_prompts=$(prune_context_section "skills" "$_skill_prompts" 8000)
|
|
559
|
+
plan_prompt="${plan_prompt}
|
|
560
|
+
## Skill Guidance (${INTELLIGENCE_ISSUE_TYPE:-backend} issue, AI-selected)
|
|
561
|
+
${_skill_prompts}
|
|
562
|
+
"
|
|
563
|
+
fi
|
|
564
|
+
|
|
565
|
+
# Guard total prompt size
|
|
566
|
+
plan_prompt=$(guard_prompt_size "plan" "$plan_prompt")
|
|
567
|
+
|
|
568
|
+
local plan_model
|
|
569
|
+
plan_model=$(jq -r --arg id "plan" '(.stages[] | select(.id == $id) | .config.model) // .defaults.model // "opus"' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
570
|
+
[[ -n "$MODEL" ]] && plan_model="$MODEL"
|
|
571
|
+
[[ -z "$plan_model" || "$plan_model" == "null" ]] && plan_model="opus"
|
|
572
|
+
# Intelligence model routing (when no explicit CLI --model override)
|
|
573
|
+
if [[ -z "$MODEL" && -n "${CLAUDE_MODEL:-}" ]]; then
|
|
574
|
+
plan_model="$CLAUDE_MODEL"
|
|
575
|
+
fi
|
|
576
|
+
|
|
577
|
+
local _token_log="${ARTIFACTS_DIR}/.claude-tokens-plan.log"
|
|
578
|
+
local _plan_flags
|
|
579
|
+
_plan_flags="$(_pipeline_claude_flags "plan" "$plan_model")"
|
|
580
|
+
# shellcheck disable=SC2086
|
|
581
|
+
claude --print $_plan_flags --max-turns "$(_smart_int "max_turns.pipeline_stage" 25)" --dangerously-skip-permissions \
|
|
582
|
+
"$plan_prompt" < /dev/null > "$plan_file" 2>"$_token_log" || true
|
|
583
|
+
parse_claude_tokens "$_token_log"
|
|
584
|
+
|
|
585
|
+
# Claude may write to disk via tools instead of stdout — rescue those files
|
|
586
|
+
local _plan_rescue
|
|
587
|
+
for _plan_rescue in "${PROJECT_ROOT}/PLAN.md" "${PROJECT_ROOT}/plan.md" \
|
|
588
|
+
"${PROJECT_ROOT}/implementation-plan.md"; do
|
|
589
|
+
if [[ -s "$_plan_rescue" ]] && [[ $(wc -l < "$plan_file" 2>/dev/null | xargs) -lt 10 ]]; then
|
|
590
|
+
info "Plan written to ${_plan_rescue} via tools — adopting as plan artifact"
|
|
591
|
+
cat "$_plan_rescue" >> "$plan_file"
|
|
592
|
+
rm -f "$_plan_rescue"
|
|
593
|
+
break
|
|
594
|
+
fi
|
|
595
|
+
done
|
|
596
|
+
|
|
597
|
+
if [[ ! -s "$plan_file" ]]; then
|
|
598
|
+
error "Plan generation failed — empty output"
|
|
599
|
+
return 1
|
|
600
|
+
fi
|
|
601
|
+
|
|
602
|
+
# Validate plan content — detect API/CLI errors masquerading as plans
|
|
603
|
+
local _plan_fatal="Invalid API key|invalid_api_key|authentication_error|API key expired"
|
|
604
|
+
_plan_fatal="${_plan_fatal}|rate_limit_error|overloaded_error|Could not resolve host|ANTHROPIC_API_KEY"
|
|
605
|
+
if grep -qiE "$_plan_fatal" "$plan_file" 2>/dev/null; then
|
|
606
|
+
error "Plan stage produced API/CLI error instead of a plan: $(head -1 "$plan_file" | cut -c1-100)"
|
|
607
|
+
return 1
|
|
608
|
+
fi
|
|
609
|
+
|
|
610
|
+
local line_count
|
|
611
|
+
line_count=$(wc -l < "$plan_file" | xargs)
|
|
612
|
+
if [[ "$line_count" -lt 3 ]]; then
|
|
613
|
+
error "Plan too short (${line_count} lines) — likely an error, not a real plan"
|
|
614
|
+
return 1
|
|
615
|
+
fi
|
|
616
|
+
info "Plan saved: ${DIM}$plan_file${RESET} (${line_count} lines)"
|
|
617
|
+
|
|
618
|
+
# Extract task checklist for GitHub issue and task tracking
|
|
619
|
+
local checklist
|
|
620
|
+
checklist=$(sed -n '/### Task Checklist/,/^###/p' "$plan_file" 2>/dev/null | \
|
|
621
|
+
grep '^\s*- \[' | head -20)
|
|
622
|
+
|
|
623
|
+
if [[ -z "$checklist" ]]; then
|
|
624
|
+
# Fallback: extract any checkbox lines
|
|
625
|
+
checklist=$(grep '^\s*- \[' "$plan_file" 2>/dev/null | head -20)
|
|
626
|
+
fi
|
|
627
|
+
|
|
628
|
+
# Write local task file for Claude Code build stage
|
|
629
|
+
if [[ -n "$checklist" ]]; then
|
|
630
|
+
cat > "$TASKS_FILE" <<TASKS_EOF
|
|
631
|
+
# Pipeline Tasks — ${GOAL}
|
|
632
|
+
|
|
633
|
+
## Implementation Checklist
|
|
634
|
+
${checklist}
|
|
635
|
+
|
|
636
|
+
## Context
|
|
637
|
+
- Pipeline: ${PIPELINE_NAME}
|
|
638
|
+
- Branch: ${GIT_BRANCH}
|
|
639
|
+
- Issue: ${GITHUB_ISSUE:-none}
|
|
640
|
+
- Generated: $(now_iso)
|
|
641
|
+
TASKS_EOF
|
|
642
|
+
info "Task list: ${DIM}$TASKS_FILE${RESET} ($(echo "$checklist" | wc -l | xargs) tasks)"
|
|
643
|
+
fi
|
|
644
|
+
|
|
645
|
+
# Post plan + task checklist to GitHub issue
|
|
646
|
+
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
647
|
+
local plan_summary
|
|
648
|
+
plan_summary=$(head -50 "$plan_file")
|
|
649
|
+
local gh_body="## 📋 Implementation Plan
|
|
650
|
+
|
|
651
|
+
<details>
|
|
652
|
+
<summary>Click to expand full plan (${line_count} lines)</summary>
|
|
653
|
+
|
|
654
|
+
${plan_summary}
|
|
655
|
+
|
|
656
|
+
</details>
|
|
657
|
+
"
|
|
658
|
+
if [[ -n "$checklist" ]]; then
|
|
659
|
+
gh_body="${gh_body}
|
|
660
|
+
## ✅ Task Checklist
|
|
661
|
+
${checklist}
|
|
662
|
+
"
|
|
663
|
+
fi
|
|
664
|
+
|
|
665
|
+
gh_body="${gh_body}
|
|
666
|
+
---
|
|
667
|
+
_Generated by \`shipwright pipeline\` at $(now_iso)_"
|
|
668
|
+
|
|
669
|
+
gh_comment_issue "$ISSUE_NUMBER" "$gh_body"
|
|
670
|
+
info "Plan posted to issue #$ISSUE_NUMBER"
|
|
671
|
+
fi
|
|
672
|
+
|
|
673
|
+
# Push plan to wiki
|
|
674
|
+
gh_wiki_page "Pipeline-Plan-${ISSUE_NUMBER:-inline}" "$(<"$plan_file")"
|
|
675
|
+
|
|
676
|
+
# Generate Claude Code task list
|
|
677
|
+
local cc_tasks_file="$PROJECT_ROOT/.claude/tasks.md"
|
|
678
|
+
if [[ -n "$checklist" ]]; then
|
|
679
|
+
cat > "$cc_tasks_file" <<CC_TASKS_EOF
|
|
680
|
+
# Tasks — ${GOAL}
|
|
681
|
+
|
|
682
|
+
## Status: In Progress
|
|
683
|
+
Pipeline: ${PIPELINE_NAME} | Branch: ${GIT_BRANCH}
|
|
684
|
+
|
|
685
|
+
## Checklist
|
|
686
|
+
${checklist}
|
|
687
|
+
|
|
688
|
+
## Notes
|
|
689
|
+
- Generated from pipeline plan at $(now_iso)
|
|
690
|
+
- Pipeline will update status as tasks complete
|
|
691
|
+
CC_TASKS_EOF
|
|
692
|
+
info "Claude Code tasks: ${DIM}$cc_tasks_file${RESET}"
|
|
693
|
+
fi
|
|
694
|
+
|
|
695
|
+
# Extract definition of done for quality gates
|
|
696
|
+
sed -n '/[Dd]efinition [Oo]f [Dd]one/,/^#/p' "$plan_file" | head -20 > "$ARTIFACTS_DIR/dod.md" 2>/dev/null || true
|
|
697
|
+
|
|
698
|
+
# ── Plan Validation Gate ──
|
|
699
|
+
# Ask Claude to validate the plan before proceeding
|
|
700
|
+
if command -v claude >/dev/null 2>&1 && [[ -s "$plan_file" ]]; then
|
|
701
|
+
# Pre-validate: check failure mode analysis is present and adequate
|
|
702
|
+
local failure_mode_status=""
|
|
703
|
+
if type get_failure_mode_validation_status >/dev/null 2>&1; then
|
|
704
|
+
failure_mode_status=$(get_failure_mode_validation_status "$plan_file" 2>/dev/null || echo "missing_section")
|
|
705
|
+
if [[ "$failure_mode_status" != "valid" ]]; then
|
|
706
|
+
warn "Plan missing adequate failure mode analysis (status: ${failure_mode_status})"
|
|
707
|
+
# Log this as a validation rejection reason
|
|
708
|
+
echo "missing_failure_analysis" > "$ARTIFACTS_DIR/.plan-failure-sig.txt"
|
|
709
|
+
fi
|
|
710
|
+
fi
|
|
711
|
+
|
|
712
|
+
local validation_attempts=0
|
|
713
|
+
local max_validation_attempts=2
|
|
714
|
+
local plan_valid=false
|
|
715
|
+
|
|
716
|
+
while [[ "$validation_attempts" -lt "$max_validation_attempts" ]]; do
|
|
717
|
+
validation_attempts=$((validation_attempts + 1))
|
|
718
|
+
info "Validating plan (attempt ${validation_attempts}/${max_validation_attempts})..."
|
|
719
|
+
|
|
720
|
+
# Build enriched validation prompt with learned context
|
|
721
|
+
local validation_extra=""
|
|
722
|
+
|
|
723
|
+
# Inject rejected plan history from memory
|
|
724
|
+
if type intelligence_search_memory >/dev/null 2>&1; then
|
|
725
|
+
local rejected_plans
|
|
726
|
+
rejected_plans=$(intelligence_search_memory "rejected plan validation failures for: ${GOAL:-}" "${HOME}/.shipwright/memory" 3 2>/dev/null) || true
|
|
727
|
+
if [[ -n "$rejected_plans" ]]; then
|
|
728
|
+
validation_extra="${validation_extra}
|
|
729
|
+
## Previously Rejected Plans
|
|
730
|
+
These issues were found in past plan validations for similar tasks:
|
|
731
|
+
${rejected_plans}
|
|
732
|
+
"
|
|
733
|
+
fi
|
|
734
|
+
fi
|
|
735
|
+
|
|
736
|
+
# Inject repo conventions contextually
|
|
737
|
+
local claudemd="$PROJECT_ROOT/.claude/CLAUDE.md"
|
|
738
|
+
if [[ -f "$claudemd" ]]; then
|
|
739
|
+
local conventions_summary
|
|
740
|
+
conventions_summary=$(head -100 "$claudemd" 2>/dev/null | grep -E '^##|^-|^\*' | head -15 || true)
|
|
741
|
+
if [[ -n "$conventions_summary" ]]; then
|
|
742
|
+
validation_extra="${validation_extra}
|
|
743
|
+
## Repo Conventions
|
|
744
|
+
${conventions_summary}
|
|
745
|
+
"
|
|
746
|
+
fi
|
|
747
|
+
fi
|
|
748
|
+
|
|
749
|
+
# Inject complexity estimate
|
|
750
|
+
local complexity_hint=""
|
|
751
|
+
if [[ -n "${INTELLIGENCE_COMPLEXITY:-}" && "${INTELLIGENCE_COMPLEXITY:-0}" -gt 0 ]]; then
|
|
752
|
+
complexity_hint="This is estimated as complexity ${INTELLIGENCE_COMPLEXITY}/10. Plans for this complexity typically need ${INTELLIGENCE_COMPLEXITY} or more tasks."
|
|
753
|
+
fi
|
|
754
|
+
|
|
755
|
+
local validation_prompt="You are a plan validator. Review this implementation plan and determine if it is valid.
|
|
756
|
+
|
|
757
|
+
## Goal
|
|
758
|
+
${GOAL}
|
|
759
|
+
${complexity_hint:+
|
|
760
|
+
## Complexity Estimate
|
|
761
|
+
${complexity_hint}
|
|
762
|
+
}
|
|
763
|
+
## Plan
|
|
764
|
+
$(cat "$plan_file")
|
|
765
|
+
${validation_extra}
|
|
766
|
+
Evaluate:
|
|
767
|
+
1. Are all requirements from the goal addressed?
|
|
768
|
+
2. Is the plan decomposed into clear, achievable tasks?
|
|
769
|
+
3. Are the implementation steps specific enough to execute?
|
|
770
|
+
|
|
771
|
+
Respond with EXACTLY one of these on the first line:
|
|
772
|
+
VALID: true
|
|
773
|
+
VALID: false
|
|
774
|
+
|
|
775
|
+
Then explain your reasoning briefly."
|
|
776
|
+
|
|
777
|
+
local validation_model="${plan_model:-opus}"
|
|
778
|
+
local validation_result
|
|
779
|
+
local _val_flags
|
|
780
|
+
_val_flags="$(_pipeline_claude_flags "plan" "$validation_model")"
|
|
781
|
+
# shellcheck disable=SC2086
|
|
782
|
+
validation_result=$(claude --print --output-format text $_val_flags -p "$validation_prompt" < /dev/null 2>"${ARTIFACTS_DIR}/.claude-tokens-plan-validate.log" || true)
|
|
783
|
+
parse_claude_tokens "${ARTIFACTS_DIR}/.claude-tokens-plan-validate.log"
|
|
784
|
+
|
|
785
|
+
# Save validation result
|
|
786
|
+
echo "$validation_result" > "$ARTIFACTS_DIR/plan-validation.md"
|
|
787
|
+
|
|
788
|
+
if echo "$validation_result" | head -5 | grep -qi "VALID: true"; then
|
|
789
|
+
success "Plan validation passed"
|
|
790
|
+
plan_valid=true
|
|
791
|
+
break
|
|
792
|
+
fi
|
|
793
|
+
|
|
794
|
+
warn "Plan validation failed (attempt ${validation_attempts}/${max_validation_attempts})"
|
|
795
|
+
|
|
796
|
+
# Analyze failure mode to decide how to recover
|
|
797
|
+
local failure_mode="unknown"
|
|
798
|
+
local validation_lower
|
|
799
|
+
validation_lower=$(echo "$validation_result" | tr '[:upper:]' '[:lower:]')
|
|
800
|
+
if echo "$validation_lower" | grep -qE 'failure mode|risk.*analysis|fail|risk'; then
|
|
801
|
+
failure_mode="missing_failure_analysis"
|
|
802
|
+
elif echo "$validation_lower" | grep -qE 'requirements? unclear|goal.*vague|ambiguous|underspecified'; then
|
|
803
|
+
failure_mode="requirements_unclear"
|
|
804
|
+
elif echo "$validation_lower" | grep -qE 'insufficient detail|not specific|too high.level|missing.*steps|lacks.*detail'; then
|
|
805
|
+
failure_mode="insufficient_detail"
|
|
806
|
+
elif echo "$validation_lower" | grep -qE 'scope too (large|broad)|too many|overly complex|break.*down'; then
|
|
807
|
+
failure_mode="scope_too_large"
|
|
808
|
+
fi
|
|
809
|
+
|
|
810
|
+
emit_event "plan.validation_failure" \
|
|
811
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
812
|
+
"attempt=$validation_attempts" \
|
|
813
|
+
"failure_mode=$failure_mode"
|
|
814
|
+
|
|
815
|
+
# Track repeated failures — escalate if stuck in a loop
|
|
816
|
+
if [[ -f "$ARTIFACTS_DIR/.plan-failure-sig.txt" ]]; then
|
|
817
|
+
local prev_sig
|
|
818
|
+
prev_sig=$(cat "$ARTIFACTS_DIR/.plan-failure-sig.txt" 2>/dev/null || true)
|
|
819
|
+
if [[ "$failure_mode" == "$prev_sig" && "$failure_mode" != "unknown" ]]; then
|
|
820
|
+
warn "Same validation failure mode repeated ($failure_mode) — escalating"
|
|
821
|
+
emit_event "plan.validation_escalated" \
|
|
822
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
823
|
+
"failure_mode=$failure_mode"
|
|
824
|
+
break
|
|
825
|
+
fi
|
|
826
|
+
fi
|
|
827
|
+
echo "$failure_mode" > "$ARTIFACTS_DIR/.plan-failure-sig.txt"
|
|
828
|
+
|
|
829
|
+
if [[ "$validation_attempts" -lt "$max_validation_attempts" ]]; then
|
|
830
|
+
info "Regenerating plan with validation feedback (mode: ${failure_mode})..."
|
|
831
|
+
|
|
832
|
+
# Tailor regeneration prompt based on failure mode
|
|
833
|
+
local failure_guidance=""
|
|
834
|
+
case "$failure_mode" in
|
|
835
|
+
missing_failure_analysis)
|
|
836
|
+
failure_guidance="The validator found the failure mode analysis insufficient. The plan MUST include a Failure Mode Analysis section with at least 3 concrete failure modes specific to this codebase (not generic). For each major component, identify runtime failures, concurrency risks, scale risks, and rollback story. Reference specific architecture constraints and project technologies." ;;
|
|
837
|
+
requirements_unclear)
|
|
838
|
+
failure_guidance="The validator found the requirements unclear. Add more specific acceptance criteria, input/output examples, and concrete success metrics." ;;
|
|
839
|
+
insufficient_detail)
|
|
840
|
+
failure_guidance="The validator found the plan lacks detail. Break each task into smaller, more specific implementation steps with exact file paths and function names." ;;
|
|
841
|
+
scope_too_large)
|
|
842
|
+
failure_guidance="The validator found the scope too large. Focus on the minimal viable implementation and defer non-essential features to follow-up tasks." ;;
|
|
843
|
+
esac
|
|
844
|
+
|
|
845
|
+
local regen_prompt="${plan_prompt}
|
|
846
|
+
|
|
847
|
+
IMPORTANT: A previous plan was rejected by validation. Issues found:
|
|
848
|
+
$(echo "$validation_result" | tail -20)
|
|
849
|
+
${failure_guidance:+
|
|
850
|
+
GUIDANCE: ${failure_guidance}}
|
|
851
|
+
|
|
852
|
+
Fix these issues in the new plan."
|
|
853
|
+
|
|
854
|
+
local _regen_flags
|
|
855
|
+
_regen_flags="$(_pipeline_claude_flags "plan" "$plan_model")"
|
|
856
|
+
# shellcheck disable=SC2086
|
|
857
|
+
claude --print $_regen_flags --max-turns "$(_smart_int "max_turns.pipeline_stage" 25)" \
|
|
858
|
+
"$regen_prompt" < /dev/null > "$plan_file" 2>"$_token_log" || true
|
|
859
|
+
parse_claude_tokens "$_token_log"
|
|
860
|
+
|
|
861
|
+
line_count=$(wc -l < "$plan_file" | xargs)
|
|
862
|
+
info "Regenerated plan: ${DIM}$plan_file${RESET} (${line_count} lines)"
|
|
863
|
+
fi
|
|
864
|
+
done
|
|
865
|
+
|
|
866
|
+
if [[ "$plan_valid" != "true" ]]; then
|
|
867
|
+
warn "Plan validation did not pass after ${max_validation_attempts} attempts — proceeding anyway"
|
|
868
|
+
fi
|
|
869
|
+
|
|
870
|
+
emit_event "plan.validated" \
|
|
871
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
872
|
+
"valid=${plan_valid}" \
|
|
873
|
+
"attempts=${validation_attempts}"
|
|
874
|
+
fi
|
|
875
|
+
|
|
876
|
+
log_stage "plan" "Generated plan.md (${line_count} lines, $(echo "$checklist" | wc -l | xargs) tasks)"
|
|
877
|
+
}
|
|
878
|
+
|
|
879
|
+
stage_design() {
|
|
880
|
+
CURRENT_STAGE_ID="design"
|
|
881
|
+
# Consume retry context if this is a retry attempt
|
|
882
|
+
local _retry_ctx="${ARTIFACTS_DIR}/.retry-context-design.md"
|
|
883
|
+
local _design_retry_hints=""
|
|
884
|
+
if [[ -s "$_retry_ctx" ]]; then
|
|
885
|
+
_design_retry_hints=$(cat "$_retry_ctx" 2>/dev/null || true)
|
|
886
|
+
rm -f "$_retry_ctx"
|
|
887
|
+
fi
|
|
888
|
+
local plan_file="$ARTIFACTS_DIR/plan.md"
|
|
889
|
+
local design_file="$ARTIFACTS_DIR/design.md"
|
|
890
|
+
|
|
891
|
+
if [[ ! -s "$plan_file" ]]; then
|
|
892
|
+
warn "No plan found — skipping design stage"
|
|
893
|
+
return 0
|
|
894
|
+
fi
|
|
895
|
+
|
|
896
|
+
if ! command -v claude >/dev/null 2>&1; then
|
|
897
|
+
error "Claude CLI not found — cannot generate design"
|
|
898
|
+
return 1
|
|
899
|
+
fi
|
|
900
|
+
|
|
901
|
+
info "Generating Architecture Decision Record..."
|
|
902
|
+
|
|
903
|
+
# Gather rich architecture context (call-graph, dependencies)
|
|
904
|
+
local arch_struct_context=""
|
|
905
|
+
if type gather_architecture_context &>/dev/null; then
|
|
906
|
+
arch_struct_context=$(gather_architecture_context "${PROJECT_ROOT:-.}" 2>/dev/null || true)
|
|
907
|
+
fi
|
|
908
|
+
arch_struct_context=$(prune_context_section "architecture" "$arch_struct_context" 5000)
|
|
909
|
+
|
|
910
|
+
# Memory integration — inject context if memory system available
|
|
911
|
+
local memory_context=""
|
|
912
|
+
if type intelligence_search_memory >/dev/null 2>&1; then
|
|
913
|
+
local mem_dir="${HOME}/.shipwright/memory"
|
|
914
|
+
memory_context=$(intelligence_search_memory "design stage architecture patterns for: ${GOAL:-}" "$mem_dir" 5 2>/dev/null) || true
|
|
915
|
+
fi
|
|
916
|
+
if [[ -z "$memory_context" ]] && [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
|
|
917
|
+
memory_context=$(bash "$SCRIPT_DIR/sw-memory.sh" inject "design" 2>/dev/null) || true
|
|
918
|
+
fi
|
|
919
|
+
memory_context=$(prune_context_section "memory" "$memory_context" 10000)
|
|
920
|
+
|
|
921
|
+
# Inject cross-pipeline discoveries for design stage
|
|
922
|
+
local design_discoveries=""
|
|
923
|
+
if [[ -x "$SCRIPT_DIR/sw-discovery.sh" ]]; then
|
|
924
|
+
design_discoveries=$("$SCRIPT_DIR/sw-discovery.sh" inject "*.md,*.ts,*.tsx,*.js" 2>/dev/null | head -20 || true)
|
|
925
|
+
fi
|
|
926
|
+
design_discoveries=$(prune_context_section "discoveries" "$design_discoveries" 3000)
|
|
927
|
+
|
|
928
|
+
# Inject architecture model patterns if available
|
|
929
|
+
local arch_context=""
|
|
930
|
+
local repo_hash
|
|
931
|
+
repo_hash=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
|
|
932
|
+
local arch_model_file="${HOME}/.shipwright/memory/${repo_hash}/architecture.json"
|
|
933
|
+
if [[ -f "$arch_model_file" ]]; then
|
|
934
|
+
local arch_patterns
|
|
935
|
+
arch_patterns=$(jq -r '
|
|
936
|
+
[.patterns // [] | .[] | "- \(.name // "unnamed"): \(.description // "no description")"] | join("\n")
|
|
937
|
+
' "$arch_model_file" 2>/dev/null) || true
|
|
938
|
+
local arch_layers
|
|
939
|
+
arch_layers=$(jq -r '
|
|
940
|
+
[.layers // [] | .[] | "- \(.name // "unnamed"): \(.path // "")"] | join("\n")
|
|
941
|
+
' "$arch_model_file" 2>/dev/null) || true
|
|
942
|
+
if [[ -n "$arch_patterns" || -n "$arch_layers" ]]; then
|
|
943
|
+
arch_context="Previous designs in this repo follow these patterns:
|
|
944
|
+
${arch_patterns:+Patterns:
|
|
945
|
+
${arch_patterns}
|
|
946
|
+
}${arch_layers:+Layers:
|
|
947
|
+
${arch_layers}}"
|
|
948
|
+
fi
|
|
949
|
+
fi
|
|
950
|
+
arch_context=$(prune_context_section "intelligence" "$arch_context" 5000)
|
|
951
|
+
|
|
952
|
+
# Inject rejected design approaches and anti-patterns from memory
|
|
953
|
+
local design_antipatterns=""
|
|
954
|
+
if type intelligence_search_memory >/dev/null 2>&1; then
|
|
955
|
+
local rejected_designs
|
|
956
|
+
rejected_designs=$(intelligence_search_memory "rejected design approaches anti-patterns for: ${GOAL:-}" "${HOME}/.shipwright/memory" 3 2>/dev/null) || true
|
|
957
|
+
if [[ -n "$rejected_designs" ]]; then
|
|
958
|
+
rejected_designs=$(prune_context_section "antipatterns" "$rejected_designs" 5000)
|
|
959
|
+
design_antipatterns="
|
|
960
|
+
## Rejected Approaches (from past reviews)
|
|
961
|
+
These design approaches were rejected in past reviews. Avoid repeating them:
|
|
962
|
+
${rejected_designs}
|
|
963
|
+
"
|
|
964
|
+
fi
|
|
965
|
+
fi
|
|
966
|
+
|
|
967
|
+
# Build design prompt with plan + project context
|
|
968
|
+
local project_lang
|
|
969
|
+
project_lang=$(detect_project_lang)
|
|
970
|
+
|
|
971
|
+
local design_prompt="You are a senior software architect. Review the implementation plan below and produce an Architecture Decision Record (ADR).
|
|
972
|
+
|
|
973
|
+
## Goal
|
|
974
|
+
${GOAL}
|
|
975
|
+
|
|
976
|
+
## Implementation Plan
|
|
977
|
+
$(cat "$plan_file")
|
|
978
|
+
|
|
979
|
+
## Project Context
|
|
980
|
+
- Language: ${project_lang}
|
|
981
|
+
- Test command: ${TEST_CMD:-not configured}
|
|
982
|
+
- Task type: ${TASK_TYPE:-feature}
|
|
983
|
+
${arch_struct_context:+
|
|
984
|
+
## Architecture Context (import graph, modules, test map)
|
|
985
|
+
${arch_struct_context}
|
|
986
|
+
}${memory_context:+
|
|
987
|
+
## Historical Context (from memory)
|
|
988
|
+
${memory_context}
|
|
989
|
+
}${arch_context:+
|
|
990
|
+
## Architecture Model (from previous designs)
|
|
991
|
+
${arch_context}
|
|
992
|
+
}${design_antipatterns}${design_discoveries:+
|
|
993
|
+
## Discoveries from Other Pipelines
|
|
994
|
+
${design_discoveries}
|
|
995
|
+
}
|
|
996
|
+
## Required Output — Architecture Decision Record
|
|
997
|
+
|
|
998
|
+
Produce this EXACT format:
|
|
999
|
+
|
|
1000
|
+
# Design: ${GOAL}
|
|
1001
|
+
|
|
1002
|
+
## Context
|
|
1003
|
+
[What problem we're solving, constraints from the codebase]
|
|
1004
|
+
|
|
1005
|
+
## Decision
|
|
1006
|
+
[The chosen approach — be specific about patterns, data flow, error handling]
|
|
1007
|
+
|
|
1008
|
+
## Alternatives Considered
|
|
1009
|
+
1. [Alternative A] — Pros: ... / Cons: ...
|
|
1010
|
+
2. [Alternative B] — Pros: ... / Cons: ...
|
|
1011
|
+
|
|
1012
|
+
## Implementation Plan
|
|
1013
|
+
- Files to create: [list with full paths]
|
|
1014
|
+
- Files to modify: [list with full paths]
|
|
1015
|
+
- Dependencies: [new deps if any]
|
|
1016
|
+
- Risk areas: [fragile code, performance concerns]
|
|
1017
|
+
|
|
1018
|
+
## Validation Criteria
|
|
1019
|
+
- [ ] [How we'll know the design is correct — testable criteria]
|
|
1020
|
+
- [ ] [Additional validation items]
|
|
1021
|
+
|
|
1022
|
+
Be concrete and specific. Reference actual file paths in the codebase. Consider edge cases and failure modes."
|
|
1023
|
+
|
|
1024
|
+
# Inject skill prompts for design stage
|
|
1025
|
+
local _skill_prompts=""
|
|
1026
|
+
if type skill_load_from_plan >/dev/null 2>&1; then
|
|
1027
|
+
_skill_prompts=$(skill_load_from_plan "design" 2>/dev/null || true)
|
|
1028
|
+
elif type skill_select_adaptive >/dev/null 2>&1; then
|
|
1029
|
+
local _skill_files
|
|
1030
|
+
_skill_files=$(skill_select_adaptive "${INTELLIGENCE_ISSUE_TYPE:-backend}" "design" "${ISSUE_BODY:-}" "${INTELLIGENCE_COMPLEXITY:-5}" 2>/dev/null || true)
|
|
1031
|
+
if [[ -n "$_skill_files" ]]; then
|
|
1032
|
+
_skill_prompts=$(while IFS= read -r _path; do
|
|
1033
|
+
[[ -z "$_path" || ! -f "$_path" ]] && continue
|
|
1034
|
+
cat "$_path" 2>/dev/null
|
|
1035
|
+
done <<< "$_skill_files")
|
|
1036
|
+
fi
|
|
1037
|
+
elif type skill_load_prompts >/dev/null 2>&1; then
|
|
1038
|
+
_skill_prompts=$(skill_load_prompts "${INTELLIGENCE_ISSUE_TYPE:-backend}" "design" 2>/dev/null || true)
|
|
1039
|
+
fi
|
|
1040
|
+
if [[ -n "$_skill_prompts" ]]; then
|
|
1041
|
+
_skill_prompts=$(prune_context_section "skills" "$_skill_prompts" 8000)
|
|
1042
|
+
design_prompt="${design_prompt}
|
|
1043
|
+
## Skill Guidance (${INTELLIGENCE_ISSUE_TYPE:-backend} issue, AI-selected)
|
|
1044
|
+
${_skill_prompts}
|
|
1045
|
+
"
|
|
1046
|
+
fi
|
|
1047
|
+
|
|
1048
|
+
# Guard total prompt size
|
|
1049
|
+
design_prompt=$(guard_prompt_size "design" "$design_prompt")
|
|
1050
|
+
|
|
1051
|
+
local design_model
|
|
1052
|
+
design_model=$(jq -r --arg id "design" '(.stages[] | select(.id == $id) | .config.model) // .defaults.model // "opus"' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
1053
|
+
[[ -n "$MODEL" ]] && design_model="$MODEL"
|
|
1054
|
+
[[ -z "$design_model" || "$design_model" == "null" ]] && design_model="opus"
|
|
1055
|
+
# Intelligence model routing (when no explicit CLI --model override)
|
|
1056
|
+
if [[ -z "$MODEL" && -n "${CLAUDE_MODEL:-}" ]]; then
|
|
1057
|
+
design_model="$CLAUDE_MODEL"
|
|
1058
|
+
fi
|
|
1059
|
+
|
|
1060
|
+
local _token_log="${ARTIFACTS_DIR}/.claude-tokens-design.log"
|
|
1061
|
+
local _design_flags
|
|
1062
|
+
_design_flags="$(_pipeline_claude_flags "design" "$design_model")"
|
|
1063
|
+
# shellcheck disable=SC2086
|
|
1064
|
+
claude --print $_design_flags --max-turns "$(_smart_int "max_turns.pipeline_stage" 25)" --dangerously-skip-permissions \
|
|
1065
|
+
"$design_prompt" < /dev/null > "$design_file" 2>"$_token_log" || true
|
|
1066
|
+
parse_claude_tokens "$_token_log"
|
|
1067
|
+
|
|
1068
|
+
# Claude may write to disk via tools instead of stdout — rescue those files
|
|
1069
|
+
local _design_rescue
|
|
1070
|
+
for _design_rescue in "${PROJECT_ROOT}/design-adr.md" "${PROJECT_ROOT}/design.md" \
|
|
1071
|
+
"${PROJECT_ROOT}/ADR.md" "${PROJECT_ROOT}/DESIGN.md"; do
|
|
1072
|
+
if [[ -s "$_design_rescue" ]] && [[ $(wc -l < "$design_file" 2>/dev/null | xargs) -lt 10 ]]; then
|
|
1073
|
+
info "Design written to ${_design_rescue} via tools — adopting as design artifact"
|
|
1074
|
+
cat "$_design_rescue" >> "$design_file"
|
|
1075
|
+
rm -f "$_design_rescue"
|
|
1076
|
+
break
|
|
1077
|
+
fi
|
|
1078
|
+
done
|
|
1079
|
+
|
|
1080
|
+
if [[ ! -s "$design_file" ]]; then
|
|
1081
|
+
error "Design generation failed — empty output"
|
|
1082
|
+
return 1
|
|
1083
|
+
fi
|
|
1084
|
+
|
|
1085
|
+
# Validate design content — detect API/CLI errors masquerading as designs
|
|
1086
|
+
local _design_fatal="Invalid API key|invalid_api_key|authentication_error|API key expired"
|
|
1087
|
+
_design_fatal="${_design_fatal}|rate_limit_error|overloaded_error|Could not resolve host|ANTHROPIC_API_KEY"
|
|
1088
|
+
if grep -qiE "$_design_fatal" "$design_file" 2>/dev/null; then
|
|
1089
|
+
error "Design stage produced API/CLI error instead of a design: $(head -1 "$design_file" | cut -c1-100)"
|
|
1090
|
+
return 1
|
|
1091
|
+
fi
|
|
1092
|
+
|
|
1093
|
+
local line_count
|
|
1094
|
+
line_count=$(wc -l < "$design_file" | xargs)
|
|
1095
|
+
if [[ "$line_count" -lt 3 ]]; then
|
|
1096
|
+
error "Design too short (${line_count} lines) — likely an error, not a real design"
|
|
1097
|
+
return 1
|
|
1098
|
+
fi
|
|
1099
|
+
info "Design saved: ${DIM}$design_file${RESET} (${line_count} lines)"
|
|
1100
|
+
|
|
1101
|
+
# Extract file lists for build stage awareness
|
|
1102
|
+
local files_to_create files_to_modify
|
|
1103
|
+
files_to_create=$(sed -n '/Files to create/,/^-\|^#\|^$/p' "$design_file" 2>/dev/null | grep -E '^\s*-' | head -20 || true)
|
|
1104
|
+
files_to_modify=$(sed -n '/Files to modify/,/^-\|^#\|^$/p' "$design_file" 2>/dev/null | grep -E '^\s*-' | head -20 || true)
|
|
1105
|
+
|
|
1106
|
+
if [[ -n "$files_to_create" || -n "$files_to_modify" ]]; then
|
|
1107
|
+
info "Design scope: ${DIM}$(echo "$files_to_create $files_to_modify" | grep -c '^\s*-' || true) file(s)${RESET}"
|
|
1108
|
+
fi
|
|
1109
|
+
|
|
1110
|
+
# Post design to GitHub issue
|
|
1111
|
+
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
1112
|
+
local design_summary
|
|
1113
|
+
design_summary=$(head -60 "$design_file")
|
|
1114
|
+
gh_comment_issue "$ISSUE_NUMBER" "## 📐 Architecture Decision Record
|
|
1115
|
+
|
|
1116
|
+
<details>
|
|
1117
|
+
<summary>Click to expand ADR (${line_count} lines)</summary>
|
|
1118
|
+
|
|
1119
|
+
${design_summary}
|
|
1120
|
+
|
|
1121
|
+
</details>
|
|
1122
|
+
|
|
1123
|
+
---
|
|
1124
|
+
_Generated by \`shipwright pipeline\` design stage at $(now_iso)_"
|
|
1125
|
+
fi
|
|
1126
|
+
|
|
1127
|
+
# Push design to wiki
|
|
1128
|
+
gh_wiki_page "Pipeline-Design-${ISSUE_NUMBER:-inline}" "$(<"$design_file")"
|
|
1129
|
+
|
|
1130
|
+
log_stage "design" "Generated design.md (${line_count} lines)"
|
|
1131
|
+
}
|
|
1132
|
+
|
|
1133
|
+
# ─── TDD: Generate tests before implementation ─────────────────────────────────
|