shipwright-cli 3.1.0 → 3.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/agents/code-reviewer.md +2 -0
- package/.claude/agents/devops-engineer.md +2 -0
- package/.claude/agents/doc-fleet-agent.md +2 -0
- package/.claude/agents/pipeline-agent.md +2 -0
- package/.claude/agents/shell-script-specialist.md +2 -0
- package/.claude/agents/test-specialist.md +2 -0
- package/.claude/hooks/agent-crash-capture.sh +32 -0
- package/.claude/hooks/post-tool-use.sh +3 -2
- package/.claude/hooks/pre-tool-use.sh +35 -3
- package/README.md +22 -8
- package/claude-code/hooks/config-change.sh +18 -0
- package/claude-code/hooks/instructions-reloaded.sh +7 -0
- package/claude-code/hooks/worktree-create.sh +25 -0
- package/claude-code/hooks/worktree-remove.sh +20 -0
- package/config/code-constitution.json +130 -0
- package/config/defaults.json +25 -2
- package/config/policy.json +1 -1
- package/dashboard/middleware/auth.ts +134 -0
- package/dashboard/middleware/constants.ts +21 -0
- package/dashboard/public/index.html +8 -6
- package/dashboard/public/styles.css +176 -97
- package/dashboard/routes/auth.ts +38 -0
- package/dashboard/server.ts +117 -25
- package/dashboard/services/config.ts +26 -0
- package/dashboard/services/db.ts +118 -0
- package/dashboard/src/canvas/pixel-agent.ts +298 -0
- package/dashboard/src/canvas/pixel-sprites.ts +440 -0
- package/dashboard/src/canvas/shipyard-effects.ts +367 -0
- package/dashboard/src/canvas/shipyard-scene.ts +616 -0
- package/dashboard/src/canvas/submarine-layout.ts +267 -0
- package/dashboard/src/components/header.ts +8 -7
- package/dashboard/src/core/api.ts +5 -0
- package/dashboard/src/core/router.ts +1 -0
- package/dashboard/src/design/submarine-theme.ts +253 -0
- package/dashboard/src/main.ts +2 -0
- package/dashboard/src/types/api.ts +12 -1
- package/dashboard/src/views/activity.ts +2 -1
- package/dashboard/src/views/metrics.ts +69 -1
- package/dashboard/src/views/shipyard.ts +39 -0
- package/dashboard/types/index.ts +166 -0
- package/docs/plans/2026-02-28-compound-audit-and-shipyard-design.md +186 -0
- package/docs/plans/2026-02-28-skipper-shipwright-implementation-plan.md +1182 -0
- package/docs/plans/2026-02-28-skipper-shipwright-integration-design.md +531 -0
- package/docs/plans/2026-03-01-ai-powered-skill-injection-design.md +298 -0
- package/docs/plans/2026-03-01-ai-powered-skill-injection-plan.md +1109 -0
- package/docs/plans/2026-03-01-capabilities-cleanup-plan.md +658 -0
- package/docs/plans/2026-03-01-clean-architecture-plan.md +924 -0
- package/docs/plans/2026-03-01-compound-audit-cascade-design.md +191 -0
- package/docs/plans/2026-03-01-compound-audit-cascade-plan.md +921 -0
- package/docs/plans/2026-03-01-deep-integration-plan.md +851 -0
- package/docs/plans/2026-03-01-pipeline-audit-trail-design.md +145 -0
- package/docs/plans/2026-03-01-pipeline-audit-trail-plan.md +770 -0
- package/docs/plans/2026-03-01-refined-depths-brand-design.md +382 -0
- package/docs/plans/2026-03-01-refined-depths-implementation.md +599 -0
- package/docs/plans/2026-03-01-skipper-kernel-integration-design.md +203 -0
- package/docs/plans/2026-03-01-unified-platform-design.md +272 -0
- package/docs/plans/2026-03-07-claude-code-feature-integration-design.md +189 -0
- package/docs/plans/2026-03-07-claude-code-feature-integration-plan.md +1165 -0
- package/docs/research/BACKLOG_QUICK_REFERENCE.md +352 -0
- package/docs/research/CUTTING_EDGE_RESEARCH_2026.md +546 -0
- package/docs/research/RESEARCH_INDEX.md +439 -0
- package/docs/research/RESEARCH_SOURCES.md +440 -0
- package/docs/research/RESEARCH_SUMMARY.txt +275 -0
- package/docs/superpowers/specs/2026-03-10-pipeline-quality-revolution-design.md +341 -0
- package/package.json +2 -2
- package/scripts/lib/adaptive-model.sh +427 -0
- package/scripts/lib/adaptive-timeout.sh +316 -0
- package/scripts/lib/audit-trail.sh +309 -0
- package/scripts/lib/auto-recovery.sh +471 -0
- package/scripts/lib/bandit-selector.sh +431 -0
- package/scripts/lib/bootstrap.sh +104 -2
- package/scripts/lib/causal-graph.sh +455 -0
- package/scripts/lib/compat.sh +126 -0
- package/scripts/lib/compound-audit.sh +337 -0
- package/scripts/lib/constitutional.sh +454 -0
- package/scripts/lib/context-budget.sh +359 -0
- package/scripts/lib/convergence.sh +594 -0
- package/scripts/lib/cost-optimizer.sh +634 -0
- package/scripts/lib/daemon-adaptive.sh +14 -2
- package/scripts/lib/daemon-dispatch.sh +106 -17
- package/scripts/lib/daemon-failure.sh +34 -4
- package/scripts/lib/daemon-patrol.sh +25 -4
- package/scripts/lib/daemon-poll-github.sh +361 -0
- package/scripts/lib/daemon-poll-health.sh +299 -0
- package/scripts/lib/daemon-poll.sh +27 -611
- package/scripts/lib/daemon-state.sh +119 -66
- package/scripts/lib/daemon-triage.sh +10 -0
- package/scripts/lib/dod-scorecard.sh +442 -0
- package/scripts/lib/error-actionability.sh +300 -0
- package/scripts/lib/formal-spec.sh +461 -0
- package/scripts/lib/helpers.sh +180 -5
- package/scripts/lib/intent-analysis.sh +409 -0
- package/scripts/lib/loop-convergence.sh +350 -0
- package/scripts/lib/loop-iteration.sh +682 -0
- package/scripts/lib/loop-progress.sh +48 -0
- package/scripts/lib/loop-restart.sh +185 -0
- package/scripts/lib/memory-effectiveness.sh +506 -0
- package/scripts/lib/mutation-executor.sh +352 -0
- package/scripts/lib/outcome-feedback.sh +521 -0
- package/scripts/lib/pipeline-cli.sh +336 -0
- package/scripts/lib/pipeline-commands.sh +1216 -0
- package/scripts/lib/pipeline-detection.sh +101 -3
- package/scripts/lib/pipeline-execution.sh +897 -0
- package/scripts/lib/pipeline-github.sh +28 -3
- package/scripts/lib/pipeline-intelligence-compound.sh +431 -0
- package/scripts/lib/pipeline-intelligence-scoring.sh +407 -0
- package/scripts/lib/pipeline-intelligence-skip.sh +181 -0
- package/scripts/lib/pipeline-intelligence.sh +104 -1138
- package/scripts/lib/pipeline-quality-bash-compat.sh +182 -0
- package/scripts/lib/pipeline-quality-checks.sh +17 -711
- package/scripts/lib/pipeline-quality-gates.sh +563 -0
- package/scripts/lib/pipeline-stages-build.sh +730 -0
- package/scripts/lib/pipeline-stages-delivery.sh +965 -0
- package/scripts/lib/pipeline-stages-intake.sh +1133 -0
- package/scripts/lib/pipeline-stages-monitor.sh +407 -0
- package/scripts/lib/pipeline-stages-review.sh +1022 -0
- package/scripts/lib/pipeline-stages.sh +161 -2901
- package/scripts/lib/pipeline-state.sh +36 -5
- package/scripts/lib/pipeline-util.sh +487 -0
- package/scripts/lib/policy-learner.sh +438 -0
- package/scripts/lib/process-reward.sh +493 -0
- package/scripts/lib/project-detect.sh +649 -0
- package/scripts/lib/quality-profile.sh +334 -0
- package/scripts/lib/recruit-commands.sh +885 -0
- package/scripts/lib/recruit-learning.sh +739 -0
- package/scripts/lib/recruit-roles.sh +648 -0
- package/scripts/lib/reward-aggregator.sh +458 -0
- package/scripts/lib/rl-optimizer.sh +362 -0
- package/scripts/lib/root-cause.sh +427 -0
- package/scripts/lib/scope-enforcement.sh +445 -0
- package/scripts/lib/session-restart.sh +493 -0
- package/scripts/lib/skill-memory.sh +300 -0
- package/scripts/lib/skill-registry.sh +775 -0
- package/scripts/lib/spec-driven.sh +476 -0
- package/scripts/lib/test-helpers.sh +18 -7
- package/scripts/lib/test-holdout.sh +429 -0
- package/scripts/lib/test-optimizer.sh +511 -0
- package/scripts/shipwright-file-suggest.sh +45 -0
- package/scripts/skills/adversarial-quality.md +61 -0
- package/scripts/skills/api-design.md +44 -0
- package/scripts/skills/architecture-design.md +50 -0
- package/scripts/skills/brainstorming.md +43 -0
- package/scripts/skills/data-pipeline.md +44 -0
- package/scripts/skills/deploy-safety.md +64 -0
- package/scripts/skills/documentation.md +38 -0
- package/scripts/skills/frontend-design.md +45 -0
- package/scripts/skills/generated/.gitkeep +0 -0
- package/scripts/skills/generated/_refinements/.gitkeep +0 -0
- package/scripts/skills/generated/_refinements/adversarial-quality.patch.md +3 -0
- package/scripts/skills/generated/_refinements/architecture-design.patch.md +3 -0
- package/scripts/skills/generated/_refinements/brainstorming.patch.md +3 -0
- package/scripts/skills/generated/cli-version-management.md +29 -0
- package/scripts/skills/generated/collection-system-validation.md +99 -0
- package/scripts/skills/generated/large-scale-c-refactoring-coordination.md +97 -0
- package/scripts/skills/generated/pattern-matching-similarity-scoring.md +195 -0
- package/scripts/skills/generated/test-parallelization-detection.md +65 -0
- package/scripts/skills/observability.md +79 -0
- package/scripts/skills/performance.md +48 -0
- package/scripts/skills/pr-quality.md +49 -0
- package/scripts/skills/product-thinking.md +43 -0
- package/scripts/skills/security-audit.md +49 -0
- package/scripts/skills/systematic-debugging.md +40 -0
- package/scripts/skills/testing-strategy.md +47 -0
- package/scripts/skills/two-stage-review.md +52 -0
- package/scripts/skills/validation-thoroughness.md +55 -0
- package/scripts/sw +9 -3
- package/scripts/sw-activity.sh +9 -8
- package/scripts/sw-adaptive.sh +8 -7
- package/scripts/sw-adversarial.sh +2 -1
- package/scripts/sw-architecture-enforcer.sh +3 -1
- package/scripts/sw-auth.sh +12 -2
- package/scripts/sw-autonomous.sh +5 -1
- package/scripts/sw-changelog.sh +4 -1
- package/scripts/sw-checkpoint.sh +2 -1
- package/scripts/sw-ci.sh +15 -6
- package/scripts/sw-cleanup.sh +4 -26
- package/scripts/sw-code-review.sh +45 -20
- package/scripts/sw-connect.sh +2 -1
- package/scripts/sw-context.sh +2 -1
- package/scripts/sw-cost.sh +107 -5
- package/scripts/sw-daemon.sh +71 -11
- package/scripts/sw-dashboard.sh +3 -1
- package/scripts/sw-db.sh +71 -20
- package/scripts/sw-decide.sh +8 -2
- package/scripts/sw-decompose.sh +360 -17
- package/scripts/sw-deps.sh +4 -1
- package/scripts/sw-developer-simulation.sh +4 -1
- package/scripts/sw-discovery.sh +378 -5
- package/scripts/sw-doc-fleet.sh +4 -1
- package/scripts/sw-docs-agent.sh +3 -1
- package/scripts/sw-docs.sh +2 -1
- package/scripts/sw-doctor.sh +453 -2
- package/scripts/sw-dora.sh +4 -1
- package/scripts/sw-durable.sh +12 -7
- package/scripts/sw-e2e-orchestrator.sh +17 -16
- package/scripts/sw-eventbus.sh +13 -4
- package/scripts/sw-evidence.sh +364 -12
- package/scripts/sw-feedback.sh +550 -9
- package/scripts/sw-fix.sh +20 -1
- package/scripts/sw-fleet-discover.sh +6 -2
- package/scripts/sw-fleet-viz.sh +9 -4
- package/scripts/sw-fleet.sh +5 -1
- package/scripts/sw-github-app.sh +18 -4
- package/scripts/sw-github-checks.sh +3 -2
- package/scripts/sw-github-deploy.sh +3 -2
- package/scripts/sw-github-graphql.sh +18 -7
- package/scripts/sw-guild.sh +5 -1
- package/scripts/sw-heartbeat.sh +5 -30
- package/scripts/sw-hello.sh +67 -0
- package/scripts/sw-hygiene.sh +10 -3
- package/scripts/sw-incident.sh +273 -5
- package/scripts/sw-init.sh +18 -2
- package/scripts/sw-instrument.sh +10 -2
- package/scripts/sw-intelligence.sh +44 -7
- package/scripts/sw-jira.sh +5 -1
- package/scripts/sw-launchd.sh +2 -1
- package/scripts/sw-linear.sh +4 -1
- package/scripts/sw-logs.sh +4 -1
- package/scripts/sw-loop.sh +436 -1076
- package/scripts/sw-memory.sh +357 -3
- package/scripts/sw-mission-control.sh +6 -1
- package/scripts/sw-model-router.sh +483 -27
- package/scripts/sw-otel.sh +15 -4
- package/scripts/sw-oversight.sh +14 -5
- package/scripts/sw-patrol-meta.sh +334 -0
- package/scripts/sw-pipeline-composer.sh +7 -1
- package/scripts/sw-pipeline-vitals.sh +12 -6
- package/scripts/sw-pipeline.sh +54 -2653
- package/scripts/sw-pm.sh +16 -8
- package/scripts/sw-pr-lifecycle.sh +2 -1
- package/scripts/sw-predictive.sh +17 -5
- package/scripts/sw-prep.sh +185 -2
- package/scripts/sw-ps.sh +5 -25
- package/scripts/sw-public-dashboard.sh +17 -4
- package/scripts/sw-quality.sh +14 -6
- package/scripts/sw-reaper.sh +8 -25
- package/scripts/sw-recruit.sh +156 -2303
- package/scripts/sw-regression.sh +19 -12
- package/scripts/sw-release-manager.sh +3 -1
- package/scripts/sw-release.sh +4 -1
- package/scripts/sw-remote.sh +3 -1
- package/scripts/sw-replay.sh +7 -1
- package/scripts/sw-retro.sh +158 -1
- package/scripts/sw-review-rerun.sh +3 -1
- package/scripts/sw-scale.sh +14 -5
- package/scripts/sw-security-audit.sh +6 -1
- package/scripts/sw-self-optimize.sh +173 -6
- package/scripts/sw-session.sh +9 -3
- package/scripts/sw-setup.sh +3 -1
- package/scripts/sw-stall-detector.sh +406 -0
- package/scripts/sw-standup.sh +15 -7
- package/scripts/sw-status.sh +3 -1
- package/scripts/sw-strategic.sh +14 -6
- package/scripts/sw-stream.sh +13 -4
- package/scripts/sw-swarm.sh +20 -7
- package/scripts/sw-team-stages.sh +13 -6
- package/scripts/sw-templates.sh +7 -31
- package/scripts/sw-testgen.sh +17 -6
- package/scripts/sw-tmux-pipeline.sh +4 -1
- package/scripts/sw-tmux-role-color.sh +2 -0
- package/scripts/sw-tmux-status.sh +1 -1
- package/scripts/sw-tmux.sh +37 -1
- package/scripts/sw-trace.sh +3 -1
- package/scripts/sw-tracker-github.sh +3 -0
- package/scripts/sw-tracker-jira.sh +3 -0
- package/scripts/sw-tracker-linear.sh +3 -0
- package/scripts/sw-tracker.sh +3 -1
- package/scripts/sw-triage.sh +3 -2
- package/scripts/sw-upgrade.sh +3 -1
- package/scripts/sw-ux.sh +5 -2
- package/scripts/sw-webhook.sh +5 -2
- package/scripts/sw-widgets.sh +9 -4
- package/scripts/sw-worktree.sh +15 -3
- package/scripts/test-skill-injection.sh +1233 -0
- package/templates/pipelines/autonomous.json +27 -3
- package/templates/pipelines/cost-aware.json +34 -8
- package/templates/pipelines/deployed.json +12 -0
- package/templates/pipelines/enterprise.json +12 -0
- package/templates/pipelines/fast.json +6 -0
- package/templates/pipelines/full.json +27 -3
- package/templates/pipelines/hotfix.json +6 -0
- package/templates/pipelines/standard.json +12 -0
- package/templates/pipelines/tdd.json +12 -0
|
@@ -0,0 +1,775 @@
|
|
|
1
|
+
# skill-registry.sh — Maps (issue_type, stage) → skill prompt fragment files
|
|
2
|
+
# Source from pipeline-stages.sh. Skills are prompt fragments in scripts/skills/*.md
|
|
3
|
+
[[ -n "${_SKILL_REGISTRY_LOADED:-}" ]] && return 0
|
|
4
|
+
_SKILL_REGISTRY_LOADED=1
|
|
5
|
+
|
|
6
|
+
_skill_registry_base="$(dirname "${BASH_SOURCE[0]}")/../skills"
|
|
7
|
+
SKILLS_DIR="${SKILLS_DIR:-$( cd "$_skill_registry_base" 2>/dev/null && pwd || echo "$_skill_registry_base" )}"
|
|
8
|
+
unset _skill_registry_base
|
|
9
|
+
|
|
10
|
+
# skill_get_prompts — Returns newline-separated list of skill file paths for a given (issue_type, stage).
|
|
11
|
+
# $1: issue_type (frontend|backend|api|database|infrastructure|documentation|security|performance|refactor|testing)
|
|
12
|
+
# $2: stage (plan|design|build|review|compound_quality|pr|deploy|validate|monitor)
|
|
13
|
+
# Prints absolute paths to skill .md files, one per line. Skips missing files silently.
|
|
14
|
+
skill_get_prompts() {
|
|
15
|
+
local issue_type="${1:-backend}" stage="${2:-plan}"
|
|
16
|
+
local skills=()
|
|
17
|
+
|
|
18
|
+
case "${stage}" in
|
|
19
|
+
plan)
|
|
20
|
+
case "${issue_type}" in
|
|
21
|
+
frontend) skills=(brainstorming frontend-design product-thinking) ;;
|
|
22
|
+
api) skills=(brainstorming api-design) ;;
|
|
23
|
+
database) skills=(brainstorming data-pipeline) ;;
|
|
24
|
+
security) skills=(brainstorming security-audit) ;;
|
|
25
|
+
performance) skills=(brainstorming performance) ;;
|
|
26
|
+
testing) skills=(testing-strategy) ;;
|
|
27
|
+
documentation) skills=(documentation) ;;
|
|
28
|
+
backend) skills=(brainstorming) ;;
|
|
29
|
+
refactor) skills=(brainstorming) ;;
|
|
30
|
+
infrastructure) skills=(brainstorming) ;;
|
|
31
|
+
*) skills=(brainstorming) ;;
|
|
32
|
+
esac
|
|
33
|
+
;;
|
|
34
|
+
build)
|
|
35
|
+
case "${issue_type}" in
|
|
36
|
+
frontend) skills=(frontend-design) ;;
|
|
37
|
+
api) skills=(api-design) ;;
|
|
38
|
+
database) skills=(data-pipeline) ;;
|
|
39
|
+
security) skills=(security-audit) ;;
|
|
40
|
+
performance) skills=(performance) ;;
|
|
41
|
+
testing) skills=(testing-strategy) ;;
|
|
42
|
+
documentation) skills=(documentation) ;;
|
|
43
|
+
*) skills=() ;;
|
|
44
|
+
esac
|
|
45
|
+
;;
|
|
46
|
+
review)
|
|
47
|
+
case "${issue_type}" in
|
|
48
|
+
frontend) skills=(two-stage-review) ;;
|
|
49
|
+
api) skills=(two-stage-review security-audit) ;;
|
|
50
|
+
database) skills=(two-stage-review) ;;
|
|
51
|
+
security) skills=(two-stage-review security-audit) ;;
|
|
52
|
+
performance) skills=(two-stage-review) ;;
|
|
53
|
+
testing) skills=(two-stage-review) ;;
|
|
54
|
+
documentation) skills=() ;;
|
|
55
|
+
backend) skills=(two-stage-review) ;;
|
|
56
|
+
refactor) skills=(two-stage-review) ;;
|
|
57
|
+
infrastructure) skills=(two-stage-review) ;;
|
|
58
|
+
*) skills=(two-stage-review) ;;
|
|
59
|
+
esac
|
|
60
|
+
;;
|
|
61
|
+
design)
|
|
62
|
+
case "${issue_type}" in
|
|
63
|
+
frontend) skills=(architecture-design frontend-design) ;;
|
|
64
|
+
api) skills=(architecture-design api-design) ;;
|
|
65
|
+
database) skills=(architecture-design data-pipeline) ;;
|
|
66
|
+
security) skills=(architecture-design security-audit) ;;
|
|
67
|
+
performance) skills=(architecture-design performance) ;;
|
|
68
|
+
documentation) skills=() ;;
|
|
69
|
+
*) skills=(architecture-design) ;;
|
|
70
|
+
esac
|
|
71
|
+
;;
|
|
72
|
+
compound_quality)
|
|
73
|
+
case "${issue_type}" in
|
|
74
|
+
frontend) skills=(adversarial-quality testing-strategy) ;;
|
|
75
|
+
api) skills=(adversarial-quality security-audit) ;;
|
|
76
|
+
security) skills=(adversarial-quality security-audit) ;;
|
|
77
|
+
performance) skills=(adversarial-quality performance) ;;
|
|
78
|
+
documentation) skills=() ;;
|
|
79
|
+
*) skills=(adversarial-quality) ;;
|
|
80
|
+
esac
|
|
81
|
+
;;
|
|
82
|
+
pr)
|
|
83
|
+
case "${issue_type}" in
|
|
84
|
+
documentation) skills=(pr-quality) ;;
|
|
85
|
+
*) skills=(pr-quality) ;;
|
|
86
|
+
esac
|
|
87
|
+
;;
|
|
88
|
+
deploy)
|
|
89
|
+
case "${issue_type}" in
|
|
90
|
+
frontend) skills=(deploy-safety) ;;
|
|
91
|
+
api) skills=(deploy-safety security-audit) ;;
|
|
92
|
+
database) skills=(deploy-safety data-pipeline) ;;
|
|
93
|
+
security) skills=(deploy-safety security-audit) ;;
|
|
94
|
+
infrastructure) skills=(deploy-safety) ;;
|
|
95
|
+
documentation) skills=() ;;
|
|
96
|
+
*) skills=(deploy-safety) ;;
|
|
97
|
+
esac
|
|
98
|
+
;;
|
|
99
|
+
validate)
|
|
100
|
+
case "${issue_type}" in
|
|
101
|
+
frontend) skills=(validation-thoroughness) ;;
|
|
102
|
+
api) skills=(validation-thoroughness security-audit) ;;
|
|
103
|
+
security) skills=(validation-thoroughness security-audit) ;;
|
|
104
|
+
documentation) skills=() ;;
|
|
105
|
+
*) skills=(validation-thoroughness) ;;
|
|
106
|
+
esac
|
|
107
|
+
;;
|
|
108
|
+
monitor)
|
|
109
|
+
case "${issue_type}" in
|
|
110
|
+
frontend) skills=(observability) ;;
|
|
111
|
+
api) skills=(observability) ;;
|
|
112
|
+
database) skills=(observability) ;;
|
|
113
|
+
security) skills=(observability) ;;
|
|
114
|
+
performance) skills=(observability performance) ;;
|
|
115
|
+
infrastructure) skills=(observability) ;;
|
|
116
|
+
documentation) skills=() ;;
|
|
117
|
+
*) skills=(observability) ;;
|
|
118
|
+
esac
|
|
119
|
+
;;
|
|
120
|
+
*)
|
|
121
|
+
skills=()
|
|
122
|
+
;;
|
|
123
|
+
esac
|
|
124
|
+
|
|
125
|
+
[[ ${#skills[@]} -eq 0 ]] && return 0
|
|
126
|
+
local skill
|
|
127
|
+
for skill in "${skills[@]}"; do
|
|
128
|
+
local path="${SKILLS_DIR}/${skill}.md"
|
|
129
|
+
if [[ -f "$path" ]]; then
|
|
130
|
+
echo "$path"
|
|
131
|
+
fi
|
|
132
|
+
done
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
# skill_load_prompts — Concatenates all skill prompt fragments for a given (issue_type, stage).
|
|
136
|
+
# $1: issue_type
|
|
137
|
+
# $2: stage
|
|
138
|
+
# Returns the combined prompt text. Returns empty string if no skills match.
|
|
139
|
+
skill_load_prompts() {
|
|
140
|
+
local issue_type="${1:-backend}" stage="${2:-plan}"
|
|
141
|
+
local combined=""
|
|
142
|
+
local path
|
|
143
|
+
|
|
144
|
+
while IFS= read -r path; do
|
|
145
|
+
[[ -z "$path" ]] && continue
|
|
146
|
+
if [[ -f "$path" ]]; then
|
|
147
|
+
local content
|
|
148
|
+
content=$(cat "$path" 2>/dev/null || true)
|
|
149
|
+
if [[ -n "$content" ]]; then
|
|
150
|
+
combined="${combined}
|
|
151
|
+
${content}
|
|
152
|
+
"
|
|
153
|
+
fi
|
|
154
|
+
fi
|
|
155
|
+
done < <(skill_get_prompts "$issue_type" "$stage")
|
|
156
|
+
|
|
157
|
+
echo "$combined"
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
# skill_has_two_stage_review — Check if the issue type uses two-stage review.
|
|
161
|
+
# $1: issue_type
|
|
162
|
+
# Returns 0 (true) if two-stage review is active, 1 (false) otherwise.
|
|
163
|
+
skill_has_two_stage_review() {
|
|
164
|
+
local issue_type="${1:-backend}"
|
|
165
|
+
local paths
|
|
166
|
+
paths=$(skill_get_prompts "$issue_type" "review")
|
|
167
|
+
echo "$paths" | grep -q "two-stage-review" 2>/dev/null
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
171
|
+
# ADAPTIVE SKILL SELECTION ENHANCEMENTS (Level 2)
|
|
172
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
173
|
+
|
|
174
|
+
# skill_detect_from_body — Analyze issue body text to detect additional relevant skills.
|
|
175
|
+
# $1: issue_body text
|
|
176
|
+
# $2: stage (default "plan")
|
|
177
|
+
# Returns newline-separated additional skill file paths beyond label-based skills.
|
|
178
|
+
# Gracefully returns empty if no body provided.
|
|
179
|
+
skill_detect_from_body() {
|
|
180
|
+
local body="${1:-}" stage="${2:-plan}"
|
|
181
|
+
local extra_skills=()
|
|
182
|
+
|
|
183
|
+
[[ -z "$body" ]] && return 0
|
|
184
|
+
|
|
185
|
+
# Convert to lowercase for case-insensitive matching
|
|
186
|
+
local body_lower=$(printf '%s' "$body" | tr '[:upper:]' '[:lower:]')
|
|
187
|
+
|
|
188
|
+
# Keyword patterns → skill names (not paths)
|
|
189
|
+
# These will be resolved to SKILLS_DIR/${skill}.md paths
|
|
190
|
+
local skill_candidates=()
|
|
191
|
+
|
|
192
|
+
# Accessibility/UX patterns
|
|
193
|
+
if echo "$body_lower" | grep -qE '(accessibility|a11y|wcag|aria|keyboard|screen.?reader|color.?blind|dyslexia|contrast)'; then
|
|
194
|
+
skill_candidates+=(frontend-design)
|
|
195
|
+
fi
|
|
196
|
+
|
|
197
|
+
# Migration/Schema patterns
|
|
198
|
+
if echo "$body_lower" | grep -qE '(migration|schema|database.?(refactor|redesign)|column|index|constraint)'; then
|
|
199
|
+
skill_candidates+=(data-pipeline)
|
|
200
|
+
fi
|
|
201
|
+
|
|
202
|
+
# Security/Auth patterns
|
|
203
|
+
if echo "$body_lower" | grep -qE '(security|auth|owasp|xss|injection|csrf|vulnerability|encryption|ssl|tls)'; then
|
|
204
|
+
skill_candidates+=(security-audit)
|
|
205
|
+
fi
|
|
206
|
+
|
|
207
|
+
# Performance/Latency patterns
|
|
208
|
+
if echo "$body_lower" | grep -qE '(performance|latency|slow|timeout|p95|p99|benchmark|memory.?leak|cache)'; then
|
|
209
|
+
skill_candidates+=(performance)
|
|
210
|
+
fi
|
|
211
|
+
|
|
212
|
+
# API/REST/GraphQL patterns
|
|
213
|
+
if echo "$body_lower" | grep -qE '(api|endpoint|rest|graphql|http|json|query|mutation)'; then
|
|
214
|
+
skill_candidates+=(api-design)
|
|
215
|
+
fi
|
|
216
|
+
|
|
217
|
+
# Testing patterns
|
|
218
|
+
if echo "$body_lower" | grep -qE '(test|coverage|unit|integration|e2e|mock|stub|fixture)'; then
|
|
219
|
+
skill_candidates+=(testing-strategy)
|
|
220
|
+
fi
|
|
221
|
+
|
|
222
|
+
# Architecture/Design patterns
|
|
223
|
+
if echo "$body_lower" | grep -qE '(architecture|component|module|layer|boundary|dependency|coupling|cohesion)'; then
|
|
224
|
+
skill_candidates+=(architecture-design)
|
|
225
|
+
fi
|
|
226
|
+
|
|
227
|
+
# Debugging patterns (useful for troubleshooting in most stages)
|
|
228
|
+
if echo "$body_lower" | grep -qE '(debug|trace|log|monitor|observe|metric|alert)'; then
|
|
229
|
+
[[ "$stage" == "build" || "$stage" == "test" ]] && skill_candidates+=(systematic-debugging)
|
|
230
|
+
fi
|
|
231
|
+
|
|
232
|
+
# Convert candidates to file paths
|
|
233
|
+
local skill
|
|
234
|
+
for skill in "${skill_candidates[@]}"; do
|
|
235
|
+
local path="${SKILLS_DIR}/${skill}.md"
|
|
236
|
+
if [[ -f "$path" ]]; then
|
|
237
|
+
echo "$path"
|
|
238
|
+
fi
|
|
239
|
+
done
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
# skill_weight_by_complexity — Adjust skill set based on issue complexity.
|
|
243
|
+
# $1: complexity (1-10, from INTELLIGENCE_COMPLEXITY)
|
|
244
|
+
# $2: skills (newline-separated file paths)
|
|
245
|
+
# Returns filtered skill paths:
|
|
246
|
+
# - Complexity 1-3: only essential skills (first skill in list)
|
|
247
|
+
# - Complexity 4-7: all standard skills (no change)
|
|
248
|
+
# - Complexity 8-10: add cross-cutting concerns (security-audit, performance if not present)
|
|
249
|
+
skill_weight_by_complexity() {
|
|
250
|
+
local complexity="${1:-5}" skills="${2:-}"
|
|
251
|
+
|
|
252
|
+
[[ -z "$skills" ]] && return 0
|
|
253
|
+
|
|
254
|
+
# Parse complexity level
|
|
255
|
+
complexity=$(printf '%d' "$complexity" 2>/dev/null || echo "5")
|
|
256
|
+
[[ "$complexity" -lt 1 ]] && complexity=1
|
|
257
|
+
[[ "$complexity" -gt 10 ]] && complexity=10
|
|
258
|
+
|
|
259
|
+
# Simple issues: only first (essential) skill
|
|
260
|
+
if [[ "$complexity" -le 3 ]]; then
|
|
261
|
+
echo "$skills" | head -1
|
|
262
|
+
return 0
|
|
263
|
+
fi
|
|
264
|
+
|
|
265
|
+
# Standard complexity: return all skills as-is
|
|
266
|
+
if [[ "$complexity" -le 7 ]]; then
|
|
267
|
+
echo "$skills"
|
|
268
|
+
return 0
|
|
269
|
+
fi
|
|
270
|
+
|
|
271
|
+
# Complex issues: add cross-cutting concerns
|
|
272
|
+
# Echo all provided skills first
|
|
273
|
+
echo "$skills"
|
|
274
|
+
|
|
275
|
+
# Add security-audit if not already present
|
|
276
|
+
if ! echo "$skills" | grep -q "security-audit.md" 2>/dev/null; then
|
|
277
|
+
local sec_path="${SKILLS_DIR}/security-audit.md"
|
|
278
|
+
[[ -f "$sec_path" ]] && echo "$sec_path"
|
|
279
|
+
fi
|
|
280
|
+
|
|
281
|
+
# Add performance if not already present
|
|
282
|
+
if ! echo "$skills" | grep -q "performance.md" 2>/dev/null; then
|
|
283
|
+
local perf_path="${SKILLS_DIR}/performance.md"
|
|
284
|
+
[[ -f "$perf_path" ]] && echo "$perf_path"
|
|
285
|
+
fi
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
# skill_select_adaptive — Intelligent skill selection combining all signals.
|
|
289
|
+
# $1: issue_type
|
|
290
|
+
# $2: stage
|
|
291
|
+
# $3: issue_body (optional)
|
|
292
|
+
# $4: complexity (optional, 1-10, default 5)
|
|
293
|
+
# Returns newline-separated skill file paths, deduplicated and ordered.
|
|
294
|
+
# Combines: static registry + body analysis + complexity weighting.
|
|
295
|
+
skill_select_adaptive() {
|
|
296
|
+
local issue_type="${1:-backend}" stage="${2:-plan}"
|
|
297
|
+
local body="${3:-}" complexity="${4:-5}"
|
|
298
|
+
|
|
299
|
+
# 1. Get base skills from static registry
|
|
300
|
+
local base_skills
|
|
301
|
+
base_skills=$(skill_get_prompts "$issue_type" "$stage")
|
|
302
|
+
|
|
303
|
+
# 2. Detect additional skills from issue body
|
|
304
|
+
local body_skills=""
|
|
305
|
+
if [[ -n "$body" ]]; then
|
|
306
|
+
body_skills=$(skill_detect_from_body "$body" "$stage")
|
|
307
|
+
fi
|
|
308
|
+
|
|
309
|
+
# 3. Merge and deduplicate
|
|
310
|
+
local all_skills
|
|
311
|
+
all_skills=$(printf '%s\n%s' "$base_skills" "$body_skills" | sort -u | grep -v '^$')
|
|
312
|
+
|
|
313
|
+
# 4. Weight by complexity
|
|
314
|
+
all_skills=$(skill_weight_by_complexity "$complexity" "$all_skills")
|
|
315
|
+
|
|
316
|
+
# 5. Final deduplication (complexity weighting may have added duplicates)
|
|
317
|
+
all_skills=$(echo "$all_skills" | sort -u | grep -v '^$')
|
|
318
|
+
|
|
319
|
+
echo "$all_skills"
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
323
|
+
# AI-POWERED SKILL SELECTION (Tier 1)
|
|
324
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
325
|
+
|
|
326
|
+
GENERATED_SKILLS_DIR="${SKILLS_DIR}/generated"
|
|
327
|
+
REFINEMENTS_DIR="${GENERATED_SKILLS_DIR}/_refinements"
|
|
328
|
+
|
|
329
|
+
# skill_build_catalog — Build a compact skill index for the LLM router prompt.
|
|
330
|
+
# $1: issue_type (optional — for memory context)
|
|
331
|
+
# $2: stage (optional — for memory context)
|
|
332
|
+
# Returns: multi-line text, one skill per line with description and optional memory stats.
|
|
333
|
+
skill_build_catalog() {
|
|
334
|
+
local issue_type="${1:-}" stage="${2:-}"
|
|
335
|
+
local catalog=""
|
|
336
|
+
|
|
337
|
+
# Scan curated skills
|
|
338
|
+
local skill_file
|
|
339
|
+
for skill_file in "$SKILLS_DIR"/*.md; do
|
|
340
|
+
[[ ! -f "$skill_file" ]] && continue
|
|
341
|
+
local name
|
|
342
|
+
name=$(basename "$skill_file" .md)
|
|
343
|
+
# Extract first meaningful line as description (skip headers, blank lines)
|
|
344
|
+
local desc
|
|
345
|
+
desc=$(grep -v '^#\|^$\|^---\|^\*\*IMPORTANT' "$skill_file" 2>/dev/null | head -1 | cut -c1-120 || echo "")
|
|
346
|
+
[[ -z "$desc" ]] && desc=$(head -1 "$skill_file" | sed 's/^#* *//' | cut -c1-120)
|
|
347
|
+
|
|
348
|
+
local memory_hint=""
|
|
349
|
+
if [[ -n "$issue_type" && -n "$stage" ]] && type skill_memory_get_success_rate >/dev/null 2>&1; then
|
|
350
|
+
local rate
|
|
351
|
+
rate=$(skill_memory_get_success_rate "$issue_type" "$stage" "$name" 2>/dev/null || true)
|
|
352
|
+
[[ -n "$rate" ]] && memory_hint=" [${rate}% success for ${issue_type}/${stage}]"
|
|
353
|
+
fi
|
|
354
|
+
|
|
355
|
+
catalog="${catalog}
|
|
356
|
+
- ${name}: ${desc}${memory_hint}"
|
|
357
|
+
done
|
|
358
|
+
|
|
359
|
+
# Scan generated skills
|
|
360
|
+
if [[ -d "$GENERATED_SKILLS_DIR" ]]; then
|
|
361
|
+
for skill_file in "$GENERATED_SKILLS_DIR"/*.md; do
|
|
362
|
+
[[ ! -f "$skill_file" ]] && continue
|
|
363
|
+
local name
|
|
364
|
+
name=$(basename "$skill_file" .md)
|
|
365
|
+
local desc
|
|
366
|
+
desc=$(grep -v '^#\|^$\|^---\|^\*\*IMPORTANT' "$skill_file" 2>/dev/null | head -1 | cut -c1-120 || echo "")
|
|
367
|
+
[[ -z "$desc" ]] && desc=$(head -1 "$skill_file" | sed 's/^#* *//' | cut -c1-120)
|
|
368
|
+
|
|
369
|
+
local memory_hint=""
|
|
370
|
+
if [[ -n "$issue_type" && -n "$stage" ]] && type skill_memory_get_success_rate >/dev/null 2>&1; then
|
|
371
|
+
local rate
|
|
372
|
+
rate=$(skill_memory_get_success_rate "$issue_type" "$stage" "$name" 2>/dev/null || true)
|
|
373
|
+
[[ -n "$rate" ]] && memory_hint=" [${rate}% success for ${issue_type}/${stage}]"
|
|
374
|
+
fi
|
|
375
|
+
|
|
376
|
+
catalog="${catalog}
|
|
377
|
+
- ${name} [generated]: ${desc}${memory_hint}"
|
|
378
|
+
done
|
|
379
|
+
fi
|
|
380
|
+
|
|
381
|
+
echo "$catalog"
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
# skill_analyze_issue — LLM-powered skill selection and gap detection.
|
|
385
|
+
# $1: issue_title
|
|
386
|
+
# $2: issue_body
|
|
387
|
+
# $3: issue_labels
|
|
388
|
+
# $4: artifacts_dir (where to write skill-plan.json)
|
|
389
|
+
# $5: intelligence_json (optional — reuse from intelligence_analyze_issue)
|
|
390
|
+
# Returns: 0 on success (skill-plan.json written), 1 on failure (caller should fallback)
|
|
391
|
+
# Requires: _intelligence_call_claude() from sw-intelligence.sh
|
|
392
|
+
skill_analyze_issue() {
|
|
393
|
+
local title="${1:-}" body="${2:-}" labels="${3:-}"
|
|
394
|
+
local artifacts_dir="${4:-${ARTIFACTS_DIR:-.claude/pipeline-artifacts}}"
|
|
395
|
+
local intelligence_json="${5:-}"
|
|
396
|
+
|
|
397
|
+
# Verify we have the LLM call function
|
|
398
|
+
if ! type _intelligence_call_claude >/dev/null 2>&1; then
|
|
399
|
+
return 1
|
|
400
|
+
fi
|
|
401
|
+
|
|
402
|
+
# Build the skill catalog
|
|
403
|
+
local catalog
|
|
404
|
+
catalog=$(skill_build_catalog "" "" 2>/dev/null || true)
|
|
405
|
+
[[ -z "$catalog" ]] && return 1
|
|
406
|
+
|
|
407
|
+
# Build memory recommendations
|
|
408
|
+
local memory_context=""
|
|
409
|
+
if type skill_memory_get_recommendations >/dev/null 2>&1; then
|
|
410
|
+
local recs
|
|
411
|
+
recs=$(skill_memory_get_recommendations "backend" "plan" 2>/dev/null || true)
|
|
412
|
+
[[ -n "$recs" ]] && memory_context="Historical skill performance: $recs"
|
|
413
|
+
fi
|
|
414
|
+
|
|
415
|
+
# Build the prompt
|
|
416
|
+
local prompt
|
|
417
|
+
prompt="You are a pipeline skill router. Analyze this GitHub issue and select the best skills for each pipeline stage.
|
|
418
|
+
|
|
419
|
+
## Issue
|
|
420
|
+
Title: ${title}
|
|
421
|
+
Labels: ${labels}
|
|
422
|
+
Body:
|
|
423
|
+
${body}
|
|
424
|
+
|
|
425
|
+
## Available Skills
|
|
426
|
+
${catalog}
|
|
427
|
+
|
|
428
|
+
${memory_context:+## Historical Context
|
|
429
|
+
$memory_context
|
|
430
|
+
}
|
|
431
|
+
${intelligence_json:+## Intelligence Analysis
|
|
432
|
+
$intelligence_json
|
|
433
|
+
}
|
|
434
|
+
## Pipeline Stages
|
|
435
|
+
Skills can be assigned to: plan, design, build, review, compound_quality, pr, deploy, validate, monitor
|
|
436
|
+
|
|
437
|
+
## Instructions
|
|
438
|
+
1. Classify the issue type (frontend|backend|api|database|infrastructure|documentation|security|performance|refactor|testing)
|
|
439
|
+
2. Select 1-4 skills per stage from the catalog. Only select skills relevant to that stage.
|
|
440
|
+
3. For each selected skill, write a one-sentence rationale explaining WHY this skill matters for THIS specific issue (not generic advice).
|
|
441
|
+
4. If the issue needs expertise not covered by any existing skill, generate a new skill with focused, actionable content (200-400 words).
|
|
442
|
+
5. Identify specific review focus areas and risk areas for this issue.
|
|
443
|
+
|
|
444
|
+
## Response Format (JSON only, no markdown)
|
|
445
|
+
{
|
|
446
|
+
\"issue_type\": \"frontend\",
|
|
447
|
+
\"confidence\": 0.92,
|
|
448
|
+
\"secondary_domains\": [\"accessibility\", \"real-time\"],
|
|
449
|
+
\"complexity_assessment\": {
|
|
450
|
+
\"score\": 6,
|
|
451
|
+
\"reasoning\": \"Brief explanation\"
|
|
452
|
+
},
|
|
453
|
+
\"skill_plan\": {
|
|
454
|
+
\"plan\": [\"skill-name-1\", \"skill-name-2\"],
|
|
455
|
+
\"design\": [\"skill-name\"],
|
|
456
|
+
\"build\": [\"skill-name\"],
|
|
457
|
+
\"review\": [\"skill-name\"],
|
|
458
|
+
\"compound_quality\": [\"skill-name\"],
|
|
459
|
+
\"pr\": [\"skill-name\"],
|
|
460
|
+
\"deploy\": [\"skill-name\"],
|
|
461
|
+
\"validate\": [],
|
|
462
|
+
\"monitor\": []
|
|
463
|
+
},
|
|
464
|
+
\"skill_rationale\": {
|
|
465
|
+
\"skill-name-1\": \"Why this skill matters for this specific issue\",
|
|
466
|
+
\"skill-name-2\": \"Why this skill matters\"
|
|
467
|
+
},
|
|
468
|
+
\"generated_skills\": [
|
|
469
|
+
{
|
|
470
|
+
\"name\": \"new-skill-name\",
|
|
471
|
+
\"reason\": \"Why no existing skill covers this\",
|
|
472
|
+
\"content\": \"## Skill Title\\n\\nActionable guidance...\"
|
|
473
|
+
}
|
|
474
|
+
],
|
|
475
|
+
\"review_focus\": [\"specific area 1\", \"specific area 2\"],
|
|
476
|
+
\"risk_areas\": [\"specific risk 1\"]
|
|
477
|
+
}"
|
|
478
|
+
|
|
479
|
+
# Call the LLM
|
|
480
|
+
local cache_key="skill_analysis_$(echo "${title}${body}" | md5sum 2>/dev/null | cut -c1-16 || echo "${RANDOM}")"
|
|
481
|
+
local result
|
|
482
|
+
if ! result=$(_intelligence_call_claude "$prompt" "$cache_key" 3600 "haiku"); then
|
|
483
|
+
return 1
|
|
484
|
+
fi
|
|
485
|
+
|
|
486
|
+
# Validate the response has required fields
|
|
487
|
+
local valid
|
|
488
|
+
valid=$(echo "$result" | jq 'has("issue_type") and has("skill_plan") and has("skill_rationale")' 2>/dev/null || echo "false")
|
|
489
|
+
if [[ "$valid" != "true" ]]; then
|
|
490
|
+
warn "Skill analysis returned invalid JSON — falling back to static selection"
|
|
491
|
+
return 1
|
|
492
|
+
fi
|
|
493
|
+
|
|
494
|
+
# Write skill-plan.json
|
|
495
|
+
mkdir -p "$artifacts_dir"
|
|
496
|
+
echo "$result" | jq '.' > "$artifacts_dir/skill-plan.json"
|
|
497
|
+
|
|
498
|
+
# Save any generated skills to disk
|
|
499
|
+
local gen_count
|
|
500
|
+
gen_count=$(echo "$result" | jq '.generated_skills | length' 2>/dev/null || echo "0")
|
|
501
|
+
if [[ "$gen_count" -gt 0 ]]; then
|
|
502
|
+
mkdir -p "$GENERATED_SKILLS_DIR"
|
|
503
|
+
local i
|
|
504
|
+
for i in $(seq 0 $((gen_count - 1))); do
|
|
505
|
+
local gen_name gen_content
|
|
506
|
+
gen_name=$(echo "$result" | jq -r ".generated_skills[$i].name" 2>/dev/null)
|
|
507
|
+
gen_content=$(echo "$result" | jq -r ".generated_skills[$i].content" 2>/dev/null)
|
|
508
|
+
if [[ -n "$gen_name" && "$gen_name" != "null" && -n "$gen_content" && "$gen_content" != "null" ]]; then
|
|
509
|
+
# Only write if doesn't already exist (don't overwrite improved versions)
|
|
510
|
+
if [[ ! -f "$GENERATED_SKILLS_DIR/${gen_name}.md" ]]; then
|
|
511
|
+
printf '%b\n' "$gen_content" > "$GENERATED_SKILLS_DIR/${gen_name}.md"
|
|
512
|
+
info "Generated new skill: ${gen_name}"
|
|
513
|
+
fi
|
|
514
|
+
fi
|
|
515
|
+
done
|
|
516
|
+
fi
|
|
517
|
+
|
|
518
|
+
# Update INTELLIGENCE_ISSUE_TYPE from analysis
|
|
519
|
+
local analyzed_type
|
|
520
|
+
analyzed_type=$(echo "$result" | jq -r '.issue_type // empty' 2>/dev/null)
|
|
521
|
+
if [[ -n "$analyzed_type" ]]; then
|
|
522
|
+
export INTELLIGENCE_ISSUE_TYPE="$analyzed_type"
|
|
523
|
+
fi
|
|
524
|
+
|
|
525
|
+
# Update INTELLIGENCE_COMPLEXITY from analysis
|
|
526
|
+
local analyzed_complexity
|
|
527
|
+
analyzed_complexity=$(echo "$result" | jq -r '.complexity_assessment.score // empty' 2>/dev/null)
|
|
528
|
+
if [[ -n "$analyzed_complexity" ]]; then
|
|
529
|
+
export INTELLIGENCE_COMPLEXITY="$analyzed_complexity"
|
|
530
|
+
fi
|
|
531
|
+
|
|
532
|
+
return 0
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
# skill_load_from_plan — Load skill content for a stage from skill-plan.json artifact.
|
|
536
|
+
# $1: stage (plan|design|build|review|compound_quality|pr|deploy|validate|monitor)
|
|
537
|
+
# Reads: $ARTIFACTS_DIR/skill-plan.json
|
|
538
|
+
# Returns: combined prompt text with rationale + skill content + refinements.
|
|
539
|
+
# Falls back to skill_select_adaptive() if skill-plan.json is missing.
|
|
540
|
+
skill_load_from_plan() {
|
|
541
|
+
local stage="${1:-plan}"
|
|
542
|
+
local plan_file="${ARTIFACTS_DIR}/skill-plan.json"
|
|
543
|
+
|
|
544
|
+
# Fallback if no plan file
|
|
545
|
+
if [[ ! -f "$plan_file" ]]; then
|
|
546
|
+
if type skill_select_adaptive >/dev/null 2>&1; then
|
|
547
|
+
local _fallback_files _fallback_content
|
|
548
|
+
_fallback_files=$(skill_select_adaptive "${INTELLIGENCE_ISSUE_TYPE:-backend}" "$stage" "${ISSUE_BODY:-}" "${INTELLIGENCE_COMPLEXITY:-5}" 2>/dev/null || true)
|
|
549
|
+
if [[ -n "$_fallback_files" ]]; then
|
|
550
|
+
while IFS= read -r _path; do
|
|
551
|
+
[[ -z "$_path" || ! -f "$_path" ]] && continue
|
|
552
|
+
cat "$_path" 2>/dev/null
|
|
553
|
+
done <<< "$_fallback_files"
|
|
554
|
+
fi
|
|
555
|
+
fi
|
|
556
|
+
return 0
|
|
557
|
+
fi
|
|
558
|
+
|
|
559
|
+
# Extract skill names for this stage
|
|
560
|
+
local skill_names
|
|
561
|
+
skill_names=$(jq -r ".skill_plan.${stage}[]? // empty" "$plan_file" 2>/dev/null)
|
|
562
|
+
[[ -z "$skill_names" ]] && return 0
|
|
563
|
+
|
|
564
|
+
local issue_type
|
|
565
|
+
issue_type=$(jq -r '.issue_type // "unknown"' "$plan_file" 2>/dev/null)
|
|
566
|
+
|
|
567
|
+
# Build rationale header
|
|
568
|
+
local rationale_header=""
|
|
569
|
+
rationale_header="### Why these skills were selected (AI-analyzed):
|
|
570
|
+
"
|
|
571
|
+
while IFS= read -r skill_name; do
|
|
572
|
+
[[ -z "$skill_name" ]] && continue
|
|
573
|
+
local rat
|
|
574
|
+
rat=$(jq -r ".skill_rationale[\"${skill_name}\"] // empty" "$plan_file" 2>/dev/null)
|
|
575
|
+
[[ -n "$rat" ]] && rationale_header="${rationale_header}- **${skill_name}**: ${rat}
|
|
576
|
+
"
|
|
577
|
+
done <<< "$skill_names"
|
|
578
|
+
|
|
579
|
+
# Output rationale header
|
|
580
|
+
echo "$rationale_header"
|
|
581
|
+
|
|
582
|
+
# Load each skill's content
|
|
583
|
+
while IFS= read -r skill_name; do
|
|
584
|
+
[[ -z "$skill_name" ]] && continue
|
|
585
|
+
|
|
586
|
+
local skill_path=""
|
|
587
|
+
# Check curated directory first
|
|
588
|
+
if [[ -f "${SKILLS_DIR}/${skill_name}.md" ]]; then
|
|
589
|
+
skill_path="${SKILLS_DIR}/${skill_name}.md"
|
|
590
|
+
# Then check generated directory
|
|
591
|
+
elif [[ -f "${GENERATED_SKILLS_DIR}/${skill_name}.md" ]]; then
|
|
592
|
+
skill_path="${GENERATED_SKILLS_DIR}/${skill_name}.md"
|
|
593
|
+
fi
|
|
594
|
+
|
|
595
|
+
if [[ -n "$skill_path" ]]; then
|
|
596
|
+
cat "$skill_path" 2>/dev/null
|
|
597
|
+
echo ""
|
|
598
|
+
|
|
599
|
+
# Append refinement if exists
|
|
600
|
+
local refinement_path="${REFINEMENTS_DIR}/${skill_name}.patch.md"
|
|
601
|
+
if [[ -f "$refinement_path" ]]; then
|
|
602
|
+
echo ""
|
|
603
|
+
cat "$refinement_path" 2>/dev/null
|
|
604
|
+
echo ""
|
|
605
|
+
fi
|
|
606
|
+
fi
|
|
607
|
+
done <<< "$skill_names"
|
|
608
|
+
}
|
|
609
|
+
|
|
610
|
+
# skill_analyze_outcome — LLM-powered outcome analysis and learning.
|
|
611
|
+
# $1: pipeline_result ("success" or "failure")
|
|
612
|
+
# $2: artifacts_dir
|
|
613
|
+
# $3: failed_stage (optional — only for failures)
|
|
614
|
+
# $4: error_context (optional — last N lines of error output)
|
|
615
|
+
# Reads: $artifacts_dir/skill-plan.json, review artifacts
|
|
616
|
+
# Writes: $artifacts_dir/skill-outcome.json, refinement patches, lifecycle verdicts
|
|
617
|
+
# Returns: 0 on success, 1 on failure (falls back to boolean recording)
|
|
618
|
+
skill_analyze_outcome() {
|
|
619
|
+
local pipeline_result="${1:-success}"
|
|
620
|
+
local artifacts_dir="${2:-${ARTIFACTS_DIR:-.claude/pipeline-artifacts}}"
|
|
621
|
+
local failed_stage="${3:-}"
|
|
622
|
+
local error_context="${4:-}"
|
|
623
|
+
|
|
624
|
+
local plan_file="$artifacts_dir/skill-plan.json"
|
|
625
|
+
[[ ! -f "$plan_file" ]] && return 1
|
|
626
|
+
|
|
627
|
+
if ! type _intelligence_call_claude >/dev/null 2>&1; then
|
|
628
|
+
return 1
|
|
629
|
+
fi
|
|
630
|
+
|
|
631
|
+
# Gather context for analysis
|
|
632
|
+
local skill_plan
|
|
633
|
+
skill_plan=$(cat "$plan_file" 2>/dev/null)
|
|
634
|
+
|
|
635
|
+
local review_feedback=""
|
|
636
|
+
[[ -f "$artifacts_dir/review-results.log" ]] && review_feedback=$(tail -50 "$artifacts_dir/review-results.log" 2>/dev/null || true)
|
|
637
|
+
|
|
638
|
+
local prompt
|
|
639
|
+
prompt="You are a pipeline learning system. Analyze the outcome of this pipeline run and provide skill effectiveness feedback.
|
|
640
|
+
|
|
641
|
+
## Skill Plan Used
|
|
642
|
+
${skill_plan}
|
|
643
|
+
|
|
644
|
+
## Pipeline Result: ${pipeline_result}
|
|
645
|
+
${failed_stage:+Failed at stage: ${failed_stage}}
|
|
646
|
+
${error_context:+Error context:
|
|
647
|
+
${error_context}}
|
|
648
|
+
${review_feedback:+## Review Feedback
|
|
649
|
+
${review_feedback}}
|
|
650
|
+
|
|
651
|
+
## Instructions
|
|
652
|
+
1. For each skill in the plan, assess whether it was effective, partially effective, or ineffective.
|
|
653
|
+
2. Provide evidence for each verdict (what in the output shows the skill helped or didn't help).
|
|
654
|
+
3. Extract a one-sentence learning that would improve future use of this skill.
|
|
655
|
+
4. If any skill content could be improved, provide a specific refinement (one sentence to append).
|
|
656
|
+
5. For any generated skills, provide a lifecycle verdict: keep, keep_and_refine, or prune.
|
|
657
|
+
|
|
658
|
+
## Response Format (JSON only, no markdown)
|
|
659
|
+
{
|
|
660
|
+
\"skill_effectiveness\": {
|
|
661
|
+
\"skill-name\": {
|
|
662
|
+
\"verdict\": \"effective|partially_effective|ineffective\",
|
|
663
|
+
\"evidence\": \"What in the output shows this\",
|
|
664
|
+
\"learning\": \"One-sentence takeaway for future runs\"
|
|
665
|
+
}
|
|
666
|
+
},
|
|
667
|
+
\"refinements\": [
|
|
668
|
+
{
|
|
669
|
+
\"skill\": \"skill-name\",
|
|
670
|
+
\"addition\": \"One sentence to append to this skill for future use\"
|
|
671
|
+
}
|
|
672
|
+
],
|
|
673
|
+
\"generated_skill_verdict\": {
|
|
674
|
+
\"generated-skill-name\": \"keep|keep_and_refine|prune\"
|
|
675
|
+
}
|
|
676
|
+
}"
|
|
677
|
+
|
|
678
|
+
local cache_key="skill_outcome_$(echo "${skill_plan}${pipeline_result}" | md5sum 2>/dev/null | cut -c1-16 || echo "${RANDOM}")"
|
|
679
|
+
local result
|
|
680
|
+
if ! result=$(_intelligence_call_claude "$prompt" "$cache_key" 3600 "haiku"); then
|
|
681
|
+
return 1
|
|
682
|
+
fi
|
|
683
|
+
|
|
684
|
+
# Validate response
|
|
685
|
+
local valid
|
|
686
|
+
valid=$(echo "$result" | jq 'has("skill_effectiveness")' 2>/dev/null || echo "false")
|
|
687
|
+
if [[ "$valid" != "true" ]]; then
|
|
688
|
+
return 1
|
|
689
|
+
fi
|
|
690
|
+
|
|
691
|
+
# Write outcome artifact
|
|
692
|
+
echo "$result" | jq '.' > "$artifacts_dir/skill-outcome.json" 2>/dev/null || true
|
|
693
|
+
|
|
694
|
+
# Apply refinements
|
|
695
|
+
skill_apply_refinements "$artifacts_dir/skill-outcome.json" 2>/dev/null || true
|
|
696
|
+
|
|
697
|
+
# Apply lifecycle verdicts for generated skills
|
|
698
|
+
skill_apply_lifecycle_verdicts "$artifacts_dir/skill-outcome.json" 2>/dev/null || true
|
|
699
|
+
|
|
700
|
+
# Record enriched outcomes to skill memory
|
|
701
|
+
local issue_type
|
|
702
|
+
issue_type=$(jq -r '.issue_type // "backend"' "$plan_file" 2>/dev/null)
|
|
703
|
+
|
|
704
|
+
echo "$result" | jq -r '.skill_effectiveness | to_entries[] | "\(.key) \(.value.verdict)"' 2>/dev/null | while read -r skill_name verdict; do
|
|
705
|
+
[[ -z "$skill_name" ]] && continue
|
|
706
|
+
local outcome="success"
|
|
707
|
+
[[ "$verdict" == "ineffective" ]] && outcome="failure"
|
|
708
|
+
[[ "$verdict" == "partially_effective" ]] && outcome="retry"
|
|
709
|
+
|
|
710
|
+
# Record to all stages this skill was used in
|
|
711
|
+
jq -r ".skill_plan | to_entries[] | select(.value | index(\"$skill_name\")) | .key" "$plan_file" 2>/dev/null | while read -r stage; do
|
|
712
|
+
skill_memory_record "$issue_type" "$stage" "$skill_name" "$outcome" "1" 2>/dev/null || true
|
|
713
|
+
done
|
|
714
|
+
done
|
|
715
|
+
|
|
716
|
+
return 0
|
|
717
|
+
}
|
|
718
|
+
|
|
719
|
+
# skill_apply_refinements — Write refinement patches from outcome analysis.
|
|
720
|
+
# $1: path to skill-outcome.json
|
|
721
|
+
skill_apply_refinements() {
|
|
722
|
+
local outcome_file="${1:-}"
|
|
723
|
+
[[ ! -f "$outcome_file" ]] && return 1
|
|
724
|
+
|
|
725
|
+
mkdir -p "$REFINEMENTS_DIR"
|
|
726
|
+
|
|
727
|
+
local ref_count
|
|
728
|
+
ref_count=$(jq '.refinements | length' "$outcome_file" 2>/dev/null || echo "0")
|
|
729
|
+
[[ "$ref_count" -eq 0 ]] && return 0
|
|
730
|
+
|
|
731
|
+
local i
|
|
732
|
+
for i in $(seq 0 $((ref_count - 1))); do
|
|
733
|
+
local skill_name addition
|
|
734
|
+
skill_name=$(jq -r ".refinements[$i].skill" "$outcome_file" 2>/dev/null)
|
|
735
|
+
addition=$(jq -r ".refinements[$i].addition" "$outcome_file" 2>/dev/null)
|
|
736
|
+
if [[ -n "$skill_name" && "$skill_name" != "null" && -n "$addition" && "$addition" != "null" ]]; then
|
|
737
|
+
local patch_file="$REFINEMENTS_DIR/${skill_name}.patch.md"
|
|
738
|
+
# Append (don't overwrite) — accumulate learnings
|
|
739
|
+
echo "" >> "$patch_file"
|
|
740
|
+
echo "### Learned ($(date -u +%Y-%m-%d))" >> "$patch_file"
|
|
741
|
+
echo "$addition" >> "$patch_file"
|
|
742
|
+
fi
|
|
743
|
+
done
|
|
744
|
+
}
|
|
745
|
+
|
|
746
|
+
# skill_apply_lifecycle_verdicts — Apply keep/prune verdicts for generated skills.
|
|
747
|
+
# $1: path to skill-outcome.json
|
|
748
|
+
skill_apply_lifecycle_verdicts() {
|
|
749
|
+
local outcome_file="${1:-}"
|
|
750
|
+
[[ ! -f "$outcome_file" ]] && return 1
|
|
751
|
+
|
|
752
|
+
local verdicts
|
|
753
|
+
verdicts=$(jq -r '.generated_skill_verdict // {} | to_entries[] | "\(.key) \(.value)"' "$outcome_file" 2>/dev/null)
|
|
754
|
+
[[ -z "$verdicts" ]] && return 0
|
|
755
|
+
|
|
756
|
+
while read -r skill_name verdict; do
|
|
757
|
+
[[ -z "$skill_name" ]] && continue
|
|
758
|
+
local gen_path="$GENERATED_SKILLS_DIR/${skill_name}.md"
|
|
759
|
+
|
|
760
|
+
case "$verdict" in
|
|
761
|
+
prune)
|
|
762
|
+
if [[ -f "$gen_path" ]]; then
|
|
763
|
+
rm -f "$gen_path"
|
|
764
|
+
info "Pruned generated skill: ${skill_name}"
|
|
765
|
+
fi
|
|
766
|
+
;;
|
|
767
|
+
keep)
|
|
768
|
+
# No action needed — skill stays
|
|
769
|
+
;;
|
|
770
|
+
keep_and_refine)
|
|
771
|
+
# Refinement handled by skill_apply_refinements
|
|
772
|
+
;;
|
|
773
|
+
esac
|
|
774
|
+
done <<< "$verdicts"
|
|
775
|
+
}
|