shipwright-cli 3.1.0 → 3.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (283) hide show
  1. package/.claude/agents/code-reviewer.md +2 -0
  2. package/.claude/agents/devops-engineer.md +2 -0
  3. package/.claude/agents/doc-fleet-agent.md +2 -0
  4. package/.claude/agents/pipeline-agent.md +2 -0
  5. package/.claude/agents/shell-script-specialist.md +2 -0
  6. package/.claude/agents/test-specialist.md +2 -0
  7. package/.claude/hooks/agent-crash-capture.sh +32 -0
  8. package/.claude/hooks/post-tool-use.sh +3 -2
  9. package/.claude/hooks/pre-tool-use.sh +35 -3
  10. package/README.md +22 -8
  11. package/claude-code/hooks/config-change.sh +18 -0
  12. package/claude-code/hooks/instructions-reloaded.sh +7 -0
  13. package/claude-code/hooks/worktree-create.sh +25 -0
  14. package/claude-code/hooks/worktree-remove.sh +20 -0
  15. package/config/code-constitution.json +130 -0
  16. package/config/defaults.json +25 -2
  17. package/config/policy.json +1 -1
  18. package/dashboard/middleware/auth.ts +134 -0
  19. package/dashboard/middleware/constants.ts +21 -0
  20. package/dashboard/public/index.html +8 -6
  21. package/dashboard/public/styles.css +176 -97
  22. package/dashboard/routes/auth.ts +38 -0
  23. package/dashboard/server.ts +117 -25
  24. package/dashboard/services/config.ts +26 -0
  25. package/dashboard/services/db.ts +118 -0
  26. package/dashboard/src/canvas/pixel-agent.ts +298 -0
  27. package/dashboard/src/canvas/pixel-sprites.ts +440 -0
  28. package/dashboard/src/canvas/shipyard-effects.ts +367 -0
  29. package/dashboard/src/canvas/shipyard-scene.ts +616 -0
  30. package/dashboard/src/canvas/submarine-layout.ts +267 -0
  31. package/dashboard/src/components/header.ts +8 -7
  32. package/dashboard/src/core/api.ts +5 -0
  33. package/dashboard/src/core/router.ts +1 -0
  34. package/dashboard/src/design/submarine-theme.ts +253 -0
  35. package/dashboard/src/main.ts +2 -0
  36. package/dashboard/src/types/api.ts +12 -1
  37. package/dashboard/src/views/activity.ts +2 -1
  38. package/dashboard/src/views/metrics.ts +69 -1
  39. package/dashboard/src/views/shipyard.ts +39 -0
  40. package/dashboard/types/index.ts +166 -0
  41. package/docs/plans/2026-02-28-compound-audit-and-shipyard-design.md +186 -0
  42. package/docs/plans/2026-02-28-skipper-shipwright-implementation-plan.md +1182 -0
  43. package/docs/plans/2026-02-28-skipper-shipwright-integration-design.md +531 -0
  44. package/docs/plans/2026-03-01-ai-powered-skill-injection-design.md +298 -0
  45. package/docs/plans/2026-03-01-ai-powered-skill-injection-plan.md +1109 -0
  46. package/docs/plans/2026-03-01-capabilities-cleanup-plan.md +658 -0
  47. package/docs/plans/2026-03-01-clean-architecture-plan.md +924 -0
  48. package/docs/plans/2026-03-01-compound-audit-cascade-design.md +191 -0
  49. package/docs/plans/2026-03-01-compound-audit-cascade-plan.md +921 -0
  50. package/docs/plans/2026-03-01-deep-integration-plan.md +851 -0
  51. package/docs/plans/2026-03-01-pipeline-audit-trail-design.md +145 -0
  52. package/docs/plans/2026-03-01-pipeline-audit-trail-plan.md +770 -0
  53. package/docs/plans/2026-03-01-refined-depths-brand-design.md +382 -0
  54. package/docs/plans/2026-03-01-refined-depths-implementation.md +599 -0
  55. package/docs/plans/2026-03-01-skipper-kernel-integration-design.md +203 -0
  56. package/docs/plans/2026-03-01-unified-platform-design.md +272 -0
  57. package/docs/plans/2026-03-07-claude-code-feature-integration-design.md +189 -0
  58. package/docs/plans/2026-03-07-claude-code-feature-integration-plan.md +1165 -0
  59. package/docs/research/BACKLOG_QUICK_REFERENCE.md +352 -0
  60. package/docs/research/CUTTING_EDGE_RESEARCH_2026.md +546 -0
  61. package/docs/research/RESEARCH_INDEX.md +439 -0
  62. package/docs/research/RESEARCH_SOURCES.md +440 -0
  63. package/docs/research/RESEARCH_SUMMARY.txt +275 -0
  64. package/docs/superpowers/specs/2026-03-10-pipeline-quality-revolution-design.md +341 -0
  65. package/package.json +2 -2
  66. package/scripts/lib/adaptive-model.sh +427 -0
  67. package/scripts/lib/adaptive-timeout.sh +316 -0
  68. package/scripts/lib/audit-trail.sh +309 -0
  69. package/scripts/lib/auto-recovery.sh +471 -0
  70. package/scripts/lib/bandit-selector.sh +431 -0
  71. package/scripts/lib/bootstrap.sh +104 -2
  72. package/scripts/lib/causal-graph.sh +455 -0
  73. package/scripts/lib/compat.sh +126 -0
  74. package/scripts/lib/compound-audit.sh +337 -0
  75. package/scripts/lib/constitutional.sh +454 -0
  76. package/scripts/lib/context-budget.sh +359 -0
  77. package/scripts/lib/convergence.sh +594 -0
  78. package/scripts/lib/cost-optimizer.sh +634 -0
  79. package/scripts/lib/daemon-adaptive.sh +14 -2
  80. package/scripts/lib/daemon-dispatch.sh +106 -17
  81. package/scripts/lib/daemon-failure.sh +34 -4
  82. package/scripts/lib/daemon-patrol.sh +25 -4
  83. package/scripts/lib/daemon-poll-github.sh +361 -0
  84. package/scripts/lib/daemon-poll-health.sh +299 -0
  85. package/scripts/lib/daemon-poll.sh +27 -611
  86. package/scripts/lib/daemon-state.sh +119 -66
  87. package/scripts/lib/daemon-triage.sh +10 -0
  88. package/scripts/lib/dod-scorecard.sh +442 -0
  89. package/scripts/lib/error-actionability.sh +300 -0
  90. package/scripts/lib/formal-spec.sh +461 -0
  91. package/scripts/lib/helpers.sh +180 -5
  92. package/scripts/lib/intent-analysis.sh +409 -0
  93. package/scripts/lib/loop-convergence.sh +350 -0
  94. package/scripts/lib/loop-iteration.sh +682 -0
  95. package/scripts/lib/loop-progress.sh +48 -0
  96. package/scripts/lib/loop-restart.sh +185 -0
  97. package/scripts/lib/memory-effectiveness.sh +506 -0
  98. package/scripts/lib/mutation-executor.sh +352 -0
  99. package/scripts/lib/outcome-feedback.sh +521 -0
  100. package/scripts/lib/pipeline-cli.sh +336 -0
  101. package/scripts/lib/pipeline-commands.sh +1216 -0
  102. package/scripts/lib/pipeline-detection.sh +101 -3
  103. package/scripts/lib/pipeline-execution.sh +897 -0
  104. package/scripts/lib/pipeline-github.sh +28 -3
  105. package/scripts/lib/pipeline-intelligence-compound.sh +431 -0
  106. package/scripts/lib/pipeline-intelligence-scoring.sh +407 -0
  107. package/scripts/lib/pipeline-intelligence-skip.sh +181 -0
  108. package/scripts/lib/pipeline-intelligence.sh +104 -1138
  109. package/scripts/lib/pipeline-quality-bash-compat.sh +182 -0
  110. package/scripts/lib/pipeline-quality-checks.sh +17 -711
  111. package/scripts/lib/pipeline-quality-gates.sh +563 -0
  112. package/scripts/lib/pipeline-stages-build.sh +730 -0
  113. package/scripts/lib/pipeline-stages-delivery.sh +965 -0
  114. package/scripts/lib/pipeline-stages-intake.sh +1133 -0
  115. package/scripts/lib/pipeline-stages-monitor.sh +407 -0
  116. package/scripts/lib/pipeline-stages-review.sh +1022 -0
  117. package/scripts/lib/pipeline-stages.sh +161 -2901
  118. package/scripts/lib/pipeline-state.sh +36 -5
  119. package/scripts/lib/pipeline-util.sh +487 -0
  120. package/scripts/lib/policy-learner.sh +438 -0
  121. package/scripts/lib/process-reward.sh +493 -0
  122. package/scripts/lib/project-detect.sh +649 -0
  123. package/scripts/lib/quality-profile.sh +334 -0
  124. package/scripts/lib/recruit-commands.sh +885 -0
  125. package/scripts/lib/recruit-learning.sh +739 -0
  126. package/scripts/lib/recruit-roles.sh +648 -0
  127. package/scripts/lib/reward-aggregator.sh +458 -0
  128. package/scripts/lib/rl-optimizer.sh +362 -0
  129. package/scripts/lib/root-cause.sh +427 -0
  130. package/scripts/lib/scope-enforcement.sh +445 -0
  131. package/scripts/lib/session-restart.sh +493 -0
  132. package/scripts/lib/skill-memory.sh +300 -0
  133. package/scripts/lib/skill-registry.sh +775 -0
  134. package/scripts/lib/spec-driven.sh +476 -0
  135. package/scripts/lib/test-helpers.sh +18 -7
  136. package/scripts/lib/test-holdout.sh +429 -0
  137. package/scripts/lib/test-optimizer.sh +511 -0
  138. package/scripts/shipwright-file-suggest.sh +45 -0
  139. package/scripts/skills/adversarial-quality.md +61 -0
  140. package/scripts/skills/api-design.md +44 -0
  141. package/scripts/skills/architecture-design.md +50 -0
  142. package/scripts/skills/brainstorming.md +43 -0
  143. package/scripts/skills/data-pipeline.md +44 -0
  144. package/scripts/skills/deploy-safety.md +64 -0
  145. package/scripts/skills/documentation.md +38 -0
  146. package/scripts/skills/frontend-design.md +45 -0
  147. package/scripts/skills/generated/.gitkeep +0 -0
  148. package/scripts/skills/generated/_refinements/.gitkeep +0 -0
  149. package/scripts/skills/generated/_refinements/adversarial-quality.patch.md +3 -0
  150. package/scripts/skills/generated/_refinements/architecture-design.patch.md +3 -0
  151. package/scripts/skills/generated/_refinements/brainstorming.patch.md +3 -0
  152. package/scripts/skills/generated/cli-version-management.md +29 -0
  153. package/scripts/skills/generated/collection-system-validation.md +99 -0
  154. package/scripts/skills/generated/large-scale-c-refactoring-coordination.md +97 -0
  155. package/scripts/skills/generated/pattern-matching-similarity-scoring.md +195 -0
  156. package/scripts/skills/generated/test-parallelization-detection.md +65 -0
  157. package/scripts/skills/observability.md +79 -0
  158. package/scripts/skills/performance.md +48 -0
  159. package/scripts/skills/pr-quality.md +49 -0
  160. package/scripts/skills/product-thinking.md +43 -0
  161. package/scripts/skills/security-audit.md +49 -0
  162. package/scripts/skills/systematic-debugging.md +40 -0
  163. package/scripts/skills/testing-strategy.md +47 -0
  164. package/scripts/skills/two-stage-review.md +52 -0
  165. package/scripts/skills/validation-thoroughness.md +55 -0
  166. package/scripts/sw +9 -3
  167. package/scripts/sw-activity.sh +9 -8
  168. package/scripts/sw-adaptive.sh +8 -7
  169. package/scripts/sw-adversarial.sh +2 -1
  170. package/scripts/sw-architecture-enforcer.sh +3 -1
  171. package/scripts/sw-auth.sh +12 -2
  172. package/scripts/sw-autonomous.sh +5 -1
  173. package/scripts/sw-changelog.sh +4 -1
  174. package/scripts/sw-checkpoint.sh +2 -1
  175. package/scripts/sw-ci.sh +15 -6
  176. package/scripts/sw-cleanup.sh +4 -26
  177. package/scripts/sw-code-review.sh +45 -20
  178. package/scripts/sw-connect.sh +2 -1
  179. package/scripts/sw-context.sh +2 -1
  180. package/scripts/sw-cost.sh +107 -5
  181. package/scripts/sw-daemon.sh +71 -11
  182. package/scripts/sw-dashboard.sh +3 -1
  183. package/scripts/sw-db.sh +71 -20
  184. package/scripts/sw-decide.sh +8 -2
  185. package/scripts/sw-decompose.sh +360 -17
  186. package/scripts/sw-deps.sh +4 -1
  187. package/scripts/sw-developer-simulation.sh +4 -1
  188. package/scripts/sw-discovery.sh +378 -5
  189. package/scripts/sw-doc-fleet.sh +4 -1
  190. package/scripts/sw-docs-agent.sh +3 -1
  191. package/scripts/sw-docs.sh +2 -1
  192. package/scripts/sw-doctor.sh +453 -2
  193. package/scripts/sw-dora.sh +4 -1
  194. package/scripts/sw-durable.sh +12 -7
  195. package/scripts/sw-e2e-orchestrator.sh +17 -16
  196. package/scripts/sw-eventbus.sh +13 -4
  197. package/scripts/sw-evidence.sh +364 -12
  198. package/scripts/sw-feedback.sh +550 -9
  199. package/scripts/sw-fix.sh +20 -1
  200. package/scripts/sw-fleet-discover.sh +6 -2
  201. package/scripts/sw-fleet-viz.sh +9 -4
  202. package/scripts/sw-fleet.sh +5 -1
  203. package/scripts/sw-github-app.sh +18 -4
  204. package/scripts/sw-github-checks.sh +3 -2
  205. package/scripts/sw-github-deploy.sh +3 -2
  206. package/scripts/sw-github-graphql.sh +18 -7
  207. package/scripts/sw-guild.sh +5 -1
  208. package/scripts/sw-heartbeat.sh +5 -30
  209. package/scripts/sw-hello.sh +67 -0
  210. package/scripts/sw-hygiene.sh +10 -3
  211. package/scripts/sw-incident.sh +273 -5
  212. package/scripts/sw-init.sh +18 -2
  213. package/scripts/sw-instrument.sh +10 -2
  214. package/scripts/sw-intelligence.sh +44 -7
  215. package/scripts/sw-jira.sh +5 -1
  216. package/scripts/sw-launchd.sh +2 -1
  217. package/scripts/sw-linear.sh +4 -1
  218. package/scripts/sw-logs.sh +4 -1
  219. package/scripts/sw-loop.sh +436 -1076
  220. package/scripts/sw-memory.sh +357 -3
  221. package/scripts/sw-mission-control.sh +6 -1
  222. package/scripts/sw-model-router.sh +483 -27
  223. package/scripts/sw-otel.sh +15 -4
  224. package/scripts/sw-oversight.sh +14 -5
  225. package/scripts/sw-patrol-meta.sh +334 -0
  226. package/scripts/sw-pipeline-composer.sh +7 -1
  227. package/scripts/sw-pipeline-vitals.sh +12 -6
  228. package/scripts/sw-pipeline.sh +54 -2653
  229. package/scripts/sw-pm.sh +16 -8
  230. package/scripts/sw-pr-lifecycle.sh +2 -1
  231. package/scripts/sw-predictive.sh +17 -5
  232. package/scripts/sw-prep.sh +185 -2
  233. package/scripts/sw-ps.sh +5 -25
  234. package/scripts/sw-public-dashboard.sh +17 -4
  235. package/scripts/sw-quality.sh +14 -6
  236. package/scripts/sw-reaper.sh +8 -25
  237. package/scripts/sw-recruit.sh +156 -2303
  238. package/scripts/sw-regression.sh +19 -12
  239. package/scripts/sw-release-manager.sh +3 -1
  240. package/scripts/sw-release.sh +4 -1
  241. package/scripts/sw-remote.sh +3 -1
  242. package/scripts/sw-replay.sh +7 -1
  243. package/scripts/sw-retro.sh +158 -1
  244. package/scripts/sw-review-rerun.sh +3 -1
  245. package/scripts/sw-scale.sh +14 -5
  246. package/scripts/sw-security-audit.sh +6 -1
  247. package/scripts/sw-self-optimize.sh +173 -6
  248. package/scripts/sw-session.sh +9 -3
  249. package/scripts/sw-setup.sh +3 -1
  250. package/scripts/sw-stall-detector.sh +406 -0
  251. package/scripts/sw-standup.sh +15 -7
  252. package/scripts/sw-status.sh +3 -1
  253. package/scripts/sw-strategic.sh +14 -6
  254. package/scripts/sw-stream.sh +13 -4
  255. package/scripts/sw-swarm.sh +20 -7
  256. package/scripts/sw-team-stages.sh +13 -6
  257. package/scripts/sw-templates.sh +7 -31
  258. package/scripts/sw-testgen.sh +17 -6
  259. package/scripts/sw-tmux-pipeline.sh +4 -1
  260. package/scripts/sw-tmux-role-color.sh +2 -0
  261. package/scripts/sw-tmux-status.sh +1 -1
  262. package/scripts/sw-tmux.sh +37 -1
  263. package/scripts/sw-trace.sh +3 -1
  264. package/scripts/sw-tracker-github.sh +3 -0
  265. package/scripts/sw-tracker-jira.sh +3 -0
  266. package/scripts/sw-tracker-linear.sh +3 -0
  267. package/scripts/sw-tracker.sh +3 -1
  268. package/scripts/sw-triage.sh +3 -2
  269. package/scripts/sw-upgrade.sh +3 -1
  270. package/scripts/sw-ux.sh +5 -2
  271. package/scripts/sw-webhook.sh +5 -2
  272. package/scripts/sw-widgets.sh +9 -4
  273. package/scripts/sw-worktree.sh +15 -3
  274. package/scripts/test-skill-injection.sh +1233 -0
  275. package/templates/pipelines/autonomous.json +27 -3
  276. package/templates/pipelines/cost-aware.json +34 -8
  277. package/templates/pipelines/deployed.json +12 -0
  278. package/templates/pipelines/enterprise.json +12 -0
  279. package/templates/pipelines/fast.json +6 -0
  280. package/templates/pipelines/full.json +27 -3
  281. package/templates/pipelines/hotfix.json +6 -0
  282. package/templates/pipelines/standard.json +12 -0
  283. package/templates/pipelines/tdd.json +12 -0
@@ -6,8 +6,10 @@
6
6
  set -euo pipefail
7
7
  trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
8
8
 
9
- VERSION="3.1.0"
9
+ # shellcheck disable=SC2034
10
+ VERSION="3.3.0"
10
11
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
12
+ # shellcheck disable=SC2034
11
13
  REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
12
14
 
13
15
  # ─── Cross-platform compatibility ──────────────────────────────────────────
@@ -29,18 +31,24 @@ fi
29
31
  if [[ "$(type -t emit_event 2>/dev/null)" != "function" ]]; then
30
32
  emit_event() {
31
33
  local event_type="$1"; shift; mkdir -p "${HOME}/.shipwright"
32
- local payload="{\"ts\":\"$(date -u +%Y-%m-%dT%H:%M:%SZ)\",\"type\":\"$event_type\""
34
+ local payload
35
+ payload="{\"ts\":\"$(date -u +%Y-%m-%dT%H:%M:%SZ)\",\"type\":\"$event_type\""
33
36
  while [[ $# -gt 0 ]]; do local key="${1%%=*}" val="${1#*=}"; payload="${payload},\"${key}\":\"${val}\""; shift; done
34
37
  echo "${payload}}" >> "${HOME}/.shipwright/events.jsonl"
35
38
  }
36
39
  fi
37
40
  # ─── Structured Event Log ──────────────────────────────────────────────────
41
+ # shellcheck disable=SC2034
38
42
  EVENTS_FILE="${HOME}/.shipwright/events.jsonl"
39
43
 
40
44
  # ─── Configuration ─────────────────────────────────────────────────────────
45
+ # shellcheck disable=SC2034
41
46
  COMPLEXITY_THRESHOLD=70 # Decompose if complexity > this
47
+ # shellcheck disable=SC2034
42
48
  HOURS_THRESHOLD=8 # Decompose if estimated hours > this
49
+ # shellcheck disable=SC2034
43
50
  MAX_SUBTASKS=5
51
+ # shellcheck disable=SC2034
44
52
  MIN_SUBTASKS=3
45
53
  DECOMPOSE_LABEL="subtask"
46
54
  DECOMPOSED_MARKER_LABEL="decomposed"
@@ -96,7 +104,7 @@ decompose_analyze() {
96
104
  local issue_num="$1"
97
105
 
98
106
  if [[ "$NO_GITHUB" == "true" ]]; then
99
- # Mock data for testing (JSON only, no messages)
107
+ # Mock data for testing with dependencies for DAG features
100
108
  echo '{
101
109
  "issue_number": '$issue_num',
102
110
  "complexity_score": 85,
@@ -106,15 +114,27 @@ decompose_analyze() {
106
114
  "subtasks": [
107
115
  {
108
116
  "title": "Subtask 1: Design phase",
109
- "description": "Plan and document the new architecture"
117
+ "description": "Plan and document the new architecture",
118
+ "acceptance_criteria": ["Design approved", "Architecture documented"],
119
+ "test_approach": "Code review",
120
+ "depends_on": [],
121
+ "estimated_hours": 3
110
122
  },
111
123
  {
112
124
  "title": "Subtask 2: Implementation phase",
113
- "description": "Implement core changes"
125
+ "description": "Implement core changes",
126
+ "acceptance_criteria": ["Core features working", "Tests pass"],
127
+ "test_approach": "Unit tests",
128
+ "depends_on": [0],
129
+ "estimated_hours": 6
114
130
  },
115
131
  {
116
132
  "title": "Subtask 3: Integration & testing",
117
- "description": "Integrate changes and add tests"
133
+ "description": "Integrate changes and add tests",
134
+ "acceptance_criteria": ["Integration complete", "E2E tests pass"],
135
+ "test_approach": "Integration tests",
136
+ "depends_on": [1],
137
+ "estimated_hours": 3
118
138
  }
119
139
  ]
120
140
  }'
@@ -146,13 +166,21 @@ You are an issue complexity analyzer. Analyze the GitHub issue below and determi
146
166
  1. Complexity score (1-100): How intricate/multi-faceted is the work?
147
167
  2. Estimated hours (1-100): How long would this realistically take?
148
168
  3. Should decompose: Is complexity > 70 OR hours > 8?
149
- 4. If should decompose: Generate 3-5 focused, independent subtasks
169
+ 4. If should decompose: Generate 3-5 focused subtasks with explicit dependencies
170
+
171
+ For dependencies (DAG scheduling):
172
+ - Index subtasks 0, 1, 2, ... in array order
173
+ - Each subtask lists indices of tasks it depends on in "depends_on" array
174
+ - Empty "depends_on" means no dependencies (can start immediately)
175
+ - Task N can only depend on tasks 0..N-1 (no circular dependencies)
176
+ - Examples: task 2 depends on [0, 1]; task 1 depends on [0]; task 0 depends on []
150
177
 
151
178
  Each subtask should be:
152
- - Self-contained (can be worked on independently)
179
+ - Self-contained (can be worked on after dependencies complete)
153
180
  - Completable in one pipeline run (~20 iterations max)
154
181
  - Have clear acceptance criteria
155
182
  - Include test strategy
183
+ - Have realistic estimated_hours for critical path analysis
156
184
 
157
185
  Return ONLY valid JSON (no markdown, no explanation):
158
186
  {
@@ -166,7 +194,9 @@ Return ONLY valid JSON (no markdown, no explanation):
166
194
  "title": "Subtask N: <clear title>",
167
195
  "description": "<1-2 sentences describing the work>",
168
196
  "acceptance_criteria": ["criterion 1", "criterion 2"],
169
- "test_approach": "<how to validate this subtask>"
197
+ "test_approach": "<how to validate this subtask>",
198
+ "depends_on": [<list of task indices, or empty>],
199
+ "estimated_hours": <1-100>
170
200
  }
171
201
  ]
172
202
  }
@@ -333,6 +363,203 @@ decompose_mark_decomposed() {
333
363
  fi
334
364
  }
335
365
 
366
+ # ─── DAG Validation: Check for cycles ──────────────────────────────────────
367
+ decompose_validate_dag() {
368
+ local analysis_json="$1"
369
+
370
+ # Validate DAG structure: all depends_on indices must be < current index (no cycles)
371
+ jq -c '
372
+ .subtasks as $tasks |
373
+ ($tasks | length) as $n |
374
+ (
375
+ reduce range(0; $n) as $i (
376
+ {"valid": true, "error": null};
377
+ if .valid then
378
+ (
379
+ $tasks[$i].depends_on // []
380
+ ) as $deps |
381
+ (
382
+ reduce $deps[] as $dep (
383
+ .;
384
+ if (.valid) then
385
+ if $dep >= $i then
386
+ .valid = false |
387
+ .error = "invalid_dependency: task \($i) depends on task \($dep) at same or later index"
388
+ elif $dep < 0 or $dep >= $n then
389
+ .valid = false |
390
+ .error = "out_of_range: task \($i) depends on nonexistent task \($dep)"
391
+ else . end
392
+ else . end
393
+ )
394
+ )
395
+ else . end
396
+ )
397
+ )
398
+ ' <<< "$analysis_json" 2>/dev/null || echo '{"valid": false, "error": "json_parse_error"}'
399
+ }
400
+
401
+ # ─── Topological Sort: Order subtasks into execution waves ──────────────────
402
+ decompose_topo_sort() {
403
+ local analysis_json="$1"
404
+
405
+ # Validate DAG first
406
+ local validation
407
+ validation=$(decompose_validate_dag "$analysis_json")
408
+ if [[ "$(echo "$validation" | jq -r '.valid' 2>/dev/null)" != "true" ]]; then
409
+ error "DAG validation failed: $(echo "$validation" | jq -r '.error' 2>/dev/null)"
410
+ echo '{"error": "invalid_dag"}'
411
+ return 1
412
+ fi
413
+
414
+ # Calculate depth (wave) for each task: max depth of dependencies + 1
415
+ jq -c '
416
+ .subtasks as $tasks |
417
+ ($tasks | length) as $n |
418
+ (
419
+ reduce range(0; $n) as $i (
420
+ {};
421
+ . as $depths |
422
+ (
423
+ if ($tasks[$i].depends_on // [] | length) == 0 then
424
+ 0
425
+ else
426
+ ([ $tasks[$i].depends_on[] | $depths[. | tostring] // 0 ] | max) + 1
427
+ end
428
+ ) as $depth |
429
+ $depths + {($i | tostring): $depth}
430
+ )
431
+ ) as $depths |
432
+ (
433
+ # Group tasks by depth (wave)
434
+ reduce range(0; $n) as $i (
435
+ {};
436
+ . as $wave_map |
437
+ ($depths[$i | tostring] | tostring) as $wave_key |
438
+ $wave_map + {($wave_key): (($wave_map[$wave_key] // []) + [$i])}
439
+ )
440
+ ) as $wave_map |
441
+ (
442
+ # Convert to array of waves, sorted
443
+ [
444
+ ($wave_map | keys[] | tonumber) as $wave |
445
+ {
446
+ "wave": ($wave + 1),
447
+ "tasks": ($wave_map[$wave | tostring] | sort)
448
+ }
449
+ ] | sort_by(.wave)
450
+ ) |
451
+ {
452
+ "waves": .,
453
+ "total_tasks": $n,
454
+ "max_wave": (map(.wave) | max)
455
+ }
456
+ ' <<< "$analysis_json" 2>/dev/null
457
+ }
458
+
459
+ # ─── Critical Path Analysis: Find bottleneck tasks ──────────────────────────
460
+ decompose_critical_path() {
461
+ local analysis_json="$1"
462
+
463
+ # Sum estimated_hours for all tasks as total critical path
464
+ jq -c '{
465
+ "critical_path_hours": ([.subtasks[].estimated_hours // 1] | add),
466
+ "total_tasks": (.subtasks | length),
467
+ "bottleneck_tasks": (
468
+ [
469
+ range(0; .subtasks | length) as $i |
470
+ select(.subtasks[$i].estimated_hours // 1 >= 4) |
471
+ {
472
+ "index": $i,
473
+ "title": .subtasks[$i].title,
474
+ "hours": (.subtasks[$i].estimated_hours // 1)
475
+ }
476
+ ]
477
+ )
478
+ }' <<< "$analysis_json" 2>/dev/null
479
+ }
480
+
481
+ # ─── DAG Visualization: Render as ASCII or Mermaid ──────────────────────────
482
+ decompose_visualize() {
483
+ local analysis_json="$1"
484
+ local format="${2:-text}"
485
+
486
+ case "$format" in
487
+ text)
488
+ jq -r '
489
+ .subtasks as $tasks |
490
+ "Dependencies DAG - Issue " + (.issue_number | tostring) + "\n" +
491
+ "==================================================\n" +
492
+ (
493
+ reduce range(0; $tasks | length) as $i (
494
+ "";
495
+ . + "[\($i)] \($tasks[$i].title)\n" +
496
+ (
497
+ if ($tasks[$i].depends_on // [] | length) > 0 then
498
+ " depends on: \($tasks[$i].depends_on | map("[" + (. | tostring) + "]") | join(", "))\n"
499
+ else
500
+ " (no dependencies)\n"
501
+ end
502
+ )
503
+ )
504
+ )
505
+ ' <<< "$analysis_json" 2>/dev/null
506
+ ;;
507
+ mermaid)
508
+ jq -r '
509
+ .subtasks as $tasks |
510
+ "graph TD\n" +
511
+ (
512
+ reduce range(0; $tasks | length) as $i (
513
+ "";
514
+ . + " task\($i)[\"[\($i)] \($tasks[$i].title)\"]\n" +
515
+ (
516
+ if ($tasks[$i].depends_on // [] | length) > 0 then
517
+ ($tasks[$i].depends_on | map(" task\(.) --> task\($i)\n") | join(""))
518
+ else "" end
519
+ )
520
+ )
521
+ )
522
+ ' <<< "$analysis_json" 2>/dev/null
523
+ ;;
524
+ *)
525
+ error "Unknown format: $format (use 'text' or 'mermaid')"
526
+ return 1
527
+ ;;
528
+ esac
529
+ }
530
+
531
+ # ─── Schedule Creation: Generate execution plan ──────────────────────────────
532
+ decompose_schedule() {
533
+ local analysis_json="$1"
534
+ local parent_issue="${2:-}"
535
+
536
+ # Get topological sort with waves
537
+ local waves_json
538
+ waves_json=$(decompose_topo_sort "$analysis_json") || return 1
539
+
540
+ # Create schedule file
541
+ local state_file
542
+ state_file="${REPO_DIR}/.claude/pipeline-artifacts/decompose-schedule-$(date +%s).json"
543
+ mkdir -p "$(dirname "$state_file")"
544
+
545
+ # Write schedule state
546
+ jq -c '{
547
+ "issue": ('$parent_issue'),
548
+ "created_at": now | todate,
549
+ "waves": .waves,
550
+ "task_status": (
551
+ reduce range(0; .total_tasks) as $i (
552
+ {};
553
+ . + {($i | tostring): "pending"}
554
+ )
555
+ )
556
+ }' <<< "$waves_json" > "$state_file"
557
+
558
+ info "Schedule created: $(jq '.total_tasks' <<< "$waves_json") tasks in $(jq '.max_wave' <<< "$waves_json") waves"
559
+ echo "$state_file"
560
+ emit_event "decompose.scheduled" "issue=$parent_issue" "total_tasks=$(jq '.total_tasks' <<< "$waves_json")" "waves=$(jq '.max_wave' <<< "$waves_json")"
561
+ }
562
+
336
563
  # ─── Main: Analyze Only ─────────────────────────────────────────────────────
337
564
  cmd_analyze() {
338
565
  local issue_num="${1:-}"
@@ -472,6 +699,108 @@ cmd_auto() {
472
699
  return 0
473
700
  }
474
701
 
702
+ # ─── Main: Visualize DAG ────────────────────────────────────────────────────
703
+ cmd_visualize() {
704
+ local json_file="${1:-}"
705
+ local format="${2:-text}"
706
+
707
+ if [[ -z "$json_file" ]]; then
708
+ error "Usage: sw-decompose.sh visualize <analysis-json-file> [text|mermaid]"
709
+ return 1
710
+ fi
711
+
712
+ if [[ ! -f "$json_file" ]]; then
713
+ error "File not found: $json_file"
714
+ return 1
715
+ fi
716
+
717
+ echo ""
718
+ decompose_visualize "$(cat "$json_file")" "$format" || return 1
719
+ echo ""
720
+ emit_event "decompose.visualized" "file=$json_file" "format=$format"
721
+ }
722
+
723
+ # ─── Main: Critical Path Analysis ───────────────────────────────────────────
724
+ cmd_critical_path() {
725
+ local json_file="${1:-}"
726
+
727
+ if [[ -z "$json_file" ]]; then
728
+ error "Usage: sw-decompose.sh critical-path <analysis-json-file>"
729
+ return 1
730
+ fi
731
+
732
+ if [[ ! -f "$json_file" ]]; then
733
+ error "File not found: $json_file"
734
+ return 1
735
+ fi
736
+
737
+ echo ""
738
+ info "Critical Path Analysis"
739
+ echo ""
740
+
741
+ decompose_critical_path "$(cat "$json_file")" | jq '.' 2>/dev/null || return 1
742
+
743
+ echo ""
744
+ emit_event "decompose.critical_path_analyzed" "file=$json_file"
745
+ }
746
+
747
+ # ─── Main: DAG Scheduling ──────────────────────────────────────────────────
748
+ cmd_schedule() {
749
+ local json_file="${1:-}"
750
+ local parent_issue="${2:-}"
751
+
752
+ if [[ -z "$json_file" ]]; then
753
+ error "Usage: sw-decompose.sh schedule <analysis-json-file> [issue-number]"
754
+ return 1
755
+ fi
756
+
757
+ if [[ ! -f "$json_file" ]]; then
758
+ error "File not found: $json_file"
759
+ return 1
760
+ fi
761
+
762
+ local json_content
763
+ json_content=$(cat "$json_file")
764
+
765
+ # Extract issue number if not provided
766
+ if [[ -z "$parent_issue" ]]; then
767
+ parent_issue=$(echo "$json_content" | jq -r '.issue_number' 2>/dev/null || echo "")
768
+ fi
769
+
770
+ echo ""
771
+ info "DAG Scheduling"
772
+ echo ""
773
+
774
+ # Validate DAG
775
+ local validation
776
+ validation=$(decompose_validate_dag "$json_content")
777
+ if [[ "$(echo "$validation" | jq -r '.valid' 2>/dev/null)" != "true" ]]; then
778
+ error "Invalid DAG: $(echo "$validation" | jq -r '.error' 2>/dev/null)"
779
+ return 1
780
+ fi
781
+ success "DAG is acyclic"
782
+ echo ""
783
+
784
+ # Show visualization
785
+ decompose_visualize "$json_content" "text"
786
+ echo ""
787
+
788
+ # Get topological sort
789
+ local waves
790
+ waves=$(decompose_topo_sort "$json_content") || return 1
791
+
792
+ info "Execution Waves:"
793
+ echo "$waves" | jq -r '.waves[] | " Wave \(.wave): Tasks \(.tasks | map("[" + (. | tostring) + "]") | join(", "))"'
794
+ echo ""
795
+
796
+ # Create schedule
797
+ local schedule_file
798
+ schedule_file=$(decompose_schedule "$json_content" "$parent_issue") || return 1
799
+
800
+ success "Schedule saved to: $schedule_file"
801
+ echo ""
802
+ }
803
+
475
804
  # ─── CLI Router ──────────────────────────────────────────────────────────────
476
805
  main() {
477
806
  local cmd="${1:-help}"
@@ -486,22 +815,36 @@ main() {
486
815
  auto)
487
816
  cmd_auto "${2:-}"
488
817
  ;;
818
+ schedule)
819
+ cmd_schedule "${2:-}" "${3:-}"
820
+ ;;
821
+ critical-path)
822
+ cmd_critical_path "${2:-}"
823
+ ;;
824
+ visualize)
825
+ cmd_visualize "${2:-}" "${3:-text}"
826
+ ;;
489
827
  help|--help|-h)
490
828
  echo ""
491
- echo -e "${CYAN}${BOLD}shipwright decompose${RESET} — Issue Complexity Analysis & Decomposition"
829
+ echo -e "${CYAN}${BOLD}shipwright decompose${RESET} — Issue Complexity & DAG Scheduling"
492
830
  echo ""
493
831
  echo -e "${BOLD}USAGE${RESET}"
494
- echo -e " ${CYAN}sw decompose${RESET} <command> <issue-number>"
832
+ echo -e " ${CYAN}sw decompose${RESET} <command> [options]"
495
833
  echo ""
496
834
  echo -e "${BOLD}COMMANDS${RESET}"
497
- echo -e " ${CYAN}analyze${RESET} <num> Analyze complexity without creating issues"
498
- echo -e " ${CYAN}decompose${RESET} <num> Analyze + create subtask issues if needed"
499
- echo -e " ${CYAN}auto${RESET} <num> Daemon mode: silent decomposition (returns 0)"
835
+ echo -e " ${CYAN}analyze${RESET} <num> Analyze complexity without creating issues"
836
+ echo -e " ${CYAN}decompose${RESET} <num> Analyze + create subtask issues if needed"
837
+ echo -e " ${CYAN}auto${RESET} <num> Daemon mode: silent decomposition"
838
+ echo -e " ${CYAN}schedule${RESET} <file> [issue] Create execution schedule from analysis JSON"
839
+ echo -e " ${CYAN}critical-path${RESET} <file> Analyze critical path (bottlenecks)"
840
+ echo -e " ${CYAN}visualize${RESET} <file> [fmt] Render DAG (text or mermaid format)"
500
841
  echo ""
501
842
  echo -e "${BOLD}EXAMPLES${RESET}"
502
- echo -e " ${DIM}sw decompose analyze 42${RESET} # See complexity score and reasoning"
503
- echo -e " ${DIM}sw decompose decompose 42${RESET} # Create subtasks for issue #42"
504
- echo -e " ${DIM}sw decompose auto 42${RESET} # Used by daemon (no output)"
843
+ echo -e " ${DIM}sw decompose analyze 42${RESET}"
844
+ echo -e " ${DIM}sw decompose decompose 42${RESET}"
845
+ echo -e " ${DIM}sw decompose schedule analysis.json 42${RESET}"
846
+ echo -e " ${DIM}sw decompose critical-path analysis.json${RESET}"
847
+ echo -e " ${DIM}sw decompose visualize analysis.json mermaid${RESET}"
505
848
  echo ""
506
849
  ;;
507
850
  --version|-v)
@@ -6,7 +6,8 @@
6
6
  set -euo pipefail
7
7
  trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
8
8
 
9
- VERSION="3.1.0"
9
+ # shellcheck disable=SC2034
10
+ VERSION="3.3.0"
10
11
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
11
12
 
12
13
  # ─── Cross-platform compatibility ──────────────────────────────────────────
@@ -26,8 +27,10 @@ if [[ "$(type -t now_iso 2>/dev/null)" != "function" ]]; then
26
27
  now_epoch() { date +%s; }
27
28
  fi
28
29
  # ─── Defaults ───────────────────────────────────────────────────────────────
30
+ # shellcheck disable=SC2034
29
31
  DEPS_DIR="${HOME}/.shipwright/deps"
30
32
  TEST_CMD=""
33
+ # shellcheck disable=SC2034
31
34
  AUTO_MERGE=false
32
35
  DRY_RUN=false
33
36
 
@@ -6,7 +6,7 @@
6
6
  set -euo pipefail
7
7
  trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
8
8
 
9
- VERSION="3.1.0"
9
+ VERSION="3.3.0"
10
10
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
11
11
  REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
12
12
 
@@ -29,6 +29,7 @@ fi
29
29
  if [[ "$(type -t emit_event 2>/dev/null)" != "function" ]]; then
30
30
  emit_event() {
31
31
  local event_type="$1"; shift; mkdir -p "${HOME}/.shipwright"
32
+ # shellcheck disable=SC2155
32
33
  local payload="{\"ts\":\"$(date -u +%Y-%m-%dT%H:%M:%SZ)\",\"type\":\"$event_type\""
33
34
  while [[ $# -gt 0 ]]; do local key="${1%%=*}" val="${1#*=}"; payload="${payload},\"${key}\":\"${val}\""; shift; done
34
35
  echo "${payload}}" >> "${HOME}/.shipwright/events.jsonl"
@@ -40,6 +41,7 @@ if [[ -f "$SCRIPT_DIR/sw-intelligence.sh" ]]; then
40
41
  fi
41
42
 
42
43
  # ─── Configuration ───────────────────────────────────────────────────────
44
+ # shellcheck disable=SC2034
43
45
  MAX_SIMULATION_ROUNDS="${SIMULATION_MAX_ROUNDS:-3}"
44
46
 
45
47
  _simulation_enabled() {
@@ -110,6 +112,7 @@ simulation_review() {
110
112
  local prompt
111
113
  prompt=$(_build_persona_prompt "$persona" "$pr_diff" "$pr_description")
112
114
 
115
+ # shellcheck disable=SC2155
113
116
  local cache_key="simulation_${persona}_$(echo -n "$pr_diff" | head -c 200 | _intelligence_md5)"
114
117
  local result
115
118
  if ! result=$(_intelligence_call_claude "$prompt" "$cache_key" 300); then