shipwright-cli 2.3.1 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (162) hide show
  1. package/README.md +95 -28
  2. package/completions/_shipwright +1 -1
  3. package/completions/shipwright.bash +3 -8
  4. package/completions/shipwright.fish +1 -1
  5. package/config/defaults.json +111 -0
  6. package/config/event-schema.json +81 -0
  7. package/config/policy.json +155 -2
  8. package/config/policy.schema.json +162 -1
  9. package/dashboard/coverage/coverage-summary.json +14 -0
  10. package/dashboard/public/index.html +1 -1
  11. package/dashboard/server.ts +306 -17
  12. package/dashboard/src/components/charts/bar.test.ts +79 -0
  13. package/dashboard/src/components/charts/donut.test.ts +68 -0
  14. package/dashboard/src/components/charts/pipeline-rail.test.ts +117 -0
  15. package/dashboard/src/components/charts/sparkline.test.ts +125 -0
  16. package/dashboard/src/core/api.test.ts +309 -0
  17. package/dashboard/src/core/helpers.test.ts +301 -0
  18. package/dashboard/src/core/router.test.ts +307 -0
  19. package/dashboard/src/core/router.ts +7 -0
  20. package/dashboard/src/core/sse.test.ts +144 -0
  21. package/dashboard/src/views/metrics.test.ts +186 -0
  22. package/dashboard/src/views/overview.test.ts +173 -0
  23. package/dashboard/src/views/pipelines.test.ts +183 -0
  24. package/dashboard/src/views/team.test.ts +253 -0
  25. package/dashboard/vitest.config.ts +14 -5
  26. package/docs/TIPS.md +1 -1
  27. package/docs/patterns/README.md +1 -1
  28. package/package.json +15 -5
  29. package/scripts/adapters/docker-deploy.sh +1 -1
  30. package/scripts/adapters/tmux-adapter.sh +11 -1
  31. package/scripts/adapters/wezterm-adapter.sh +1 -1
  32. package/scripts/check-version-consistency.sh +1 -1
  33. package/scripts/lib/architecture.sh +126 -0
  34. package/scripts/lib/bootstrap.sh +75 -0
  35. package/scripts/lib/compat.sh +89 -6
  36. package/scripts/lib/config.sh +91 -0
  37. package/scripts/lib/daemon-adaptive.sh +3 -3
  38. package/scripts/lib/daemon-dispatch.sh +39 -16
  39. package/scripts/lib/daemon-health.sh +1 -1
  40. package/scripts/lib/daemon-patrol.sh +24 -12
  41. package/scripts/lib/daemon-poll.sh +37 -25
  42. package/scripts/lib/daemon-state.sh +115 -23
  43. package/scripts/lib/daemon-triage.sh +30 -8
  44. package/scripts/lib/fleet-failover.sh +63 -0
  45. package/scripts/lib/helpers.sh +30 -6
  46. package/scripts/lib/pipeline-detection.sh +2 -2
  47. package/scripts/lib/pipeline-github.sh +9 -9
  48. package/scripts/lib/pipeline-intelligence.sh +85 -35
  49. package/scripts/lib/pipeline-quality-checks.sh +16 -16
  50. package/scripts/lib/pipeline-quality.sh +1 -1
  51. package/scripts/lib/pipeline-stages.sh +242 -28
  52. package/scripts/lib/pipeline-state.sh +40 -4
  53. package/scripts/lib/test-helpers.sh +247 -0
  54. package/scripts/postinstall.mjs +3 -11
  55. package/scripts/sw +10 -4
  56. package/scripts/sw-activity.sh +1 -11
  57. package/scripts/sw-adaptive.sh +109 -85
  58. package/scripts/sw-adversarial.sh +4 -14
  59. package/scripts/sw-architecture-enforcer.sh +1 -11
  60. package/scripts/sw-auth.sh +8 -17
  61. package/scripts/sw-autonomous.sh +111 -49
  62. package/scripts/sw-changelog.sh +1 -11
  63. package/scripts/sw-checkpoint.sh +144 -20
  64. package/scripts/sw-ci.sh +2 -12
  65. package/scripts/sw-cleanup.sh +13 -17
  66. package/scripts/sw-code-review.sh +16 -36
  67. package/scripts/sw-connect.sh +5 -12
  68. package/scripts/sw-context.sh +9 -26
  69. package/scripts/sw-cost.sh +6 -16
  70. package/scripts/sw-daemon.sh +75 -70
  71. package/scripts/sw-dashboard.sh +57 -17
  72. package/scripts/sw-db.sh +506 -15
  73. package/scripts/sw-decompose.sh +1 -11
  74. package/scripts/sw-deps.sh +15 -25
  75. package/scripts/sw-developer-simulation.sh +1 -11
  76. package/scripts/sw-discovery.sh +112 -30
  77. package/scripts/sw-doc-fleet.sh +7 -17
  78. package/scripts/sw-docs-agent.sh +6 -16
  79. package/scripts/sw-docs.sh +4 -12
  80. package/scripts/sw-doctor.sh +134 -43
  81. package/scripts/sw-dora.sh +11 -19
  82. package/scripts/sw-durable.sh +35 -52
  83. package/scripts/sw-e2e-orchestrator.sh +11 -27
  84. package/scripts/sw-eventbus.sh +115 -115
  85. package/scripts/sw-evidence.sh +748 -0
  86. package/scripts/sw-feedback.sh +3 -13
  87. package/scripts/sw-fix.sh +2 -20
  88. package/scripts/sw-fleet-discover.sh +1 -11
  89. package/scripts/sw-fleet-viz.sh +10 -18
  90. package/scripts/sw-fleet.sh +13 -17
  91. package/scripts/sw-github-app.sh +6 -16
  92. package/scripts/sw-github-checks.sh +1 -11
  93. package/scripts/sw-github-deploy.sh +1 -11
  94. package/scripts/sw-github-graphql.sh +2 -12
  95. package/scripts/sw-guild.sh +1 -11
  96. package/scripts/sw-heartbeat.sh +49 -12
  97. package/scripts/sw-hygiene.sh +45 -43
  98. package/scripts/sw-incident.sh +284 -67
  99. package/scripts/sw-init.sh +35 -37
  100. package/scripts/sw-instrument.sh +1 -11
  101. package/scripts/sw-intelligence.sh +362 -51
  102. package/scripts/sw-jira.sh +5 -14
  103. package/scripts/sw-launchd.sh +2 -12
  104. package/scripts/sw-linear.sh +8 -17
  105. package/scripts/sw-logs.sh +4 -12
  106. package/scripts/sw-loop.sh +641 -90
  107. package/scripts/sw-memory.sh +243 -17
  108. package/scripts/sw-mission-control.sh +2 -12
  109. package/scripts/sw-model-router.sh +73 -34
  110. package/scripts/sw-otel.sh +11 -21
  111. package/scripts/sw-oversight.sh +1 -11
  112. package/scripts/sw-patrol-meta.sh +5 -11
  113. package/scripts/sw-pipeline-composer.sh +7 -17
  114. package/scripts/sw-pipeline-vitals.sh +1 -11
  115. package/scripts/sw-pipeline.sh +478 -122
  116. package/scripts/sw-pm.sh +2 -12
  117. package/scripts/sw-pr-lifecycle.sh +203 -29
  118. package/scripts/sw-predictive.sh +16 -22
  119. package/scripts/sw-prep.sh +6 -16
  120. package/scripts/sw-ps.sh +1 -11
  121. package/scripts/sw-public-dashboard.sh +2 -12
  122. package/scripts/sw-quality.sh +77 -10
  123. package/scripts/sw-reaper.sh +1 -11
  124. package/scripts/sw-recruit.sh +15 -25
  125. package/scripts/sw-regression.sh +11 -21
  126. package/scripts/sw-release-manager.sh +19 -28
  127. package/scripts/sw-release.sh +8 -16
  128. package/scripts/sw-remote.sh +1 -11
  129. package/scripts/sw-replay.sh +48 -44
  130. package/scripts/sw-retro.sh +70 -92
  131. package/scripts/sw-review-rerun.sh +220 -0
  132. package/scripts/sw-scale.sh +109 -32
  133. package/scripts/sw-security-audit.sh +12 -22
  134. package/scripts/sw-self-optimize.sh +239 -23
  135. package/scripts/sw-session.sh +3 -13
  136. package/scripts/sw-setup.sh +8 -18
  137. package/scripts/sw-standup.sh +5 -15
  138. package/scripts/sw-status.sh +32 -23
  139. package/scripts/sw-strategic.sh +129 -13
  140. package/scripts/sw-stream.sh +1 -11
  141. package/scripts/sw-swarm.sh +76 -36
  142. package/scripts/sw-team-stages.sh +10 -20
  143. package/scripts/sw-templates.sh +4 -14
  144. package/scripts/sw-testgen.sh +3 -13
  145. package/scripts/sw-tmux-pipeline.sh +1 -19
  146. package/scripts/sw-tmux-role-color.sh +0 -10
  147. package/scripts/sw-tmux-status.sh +3 -11
  148. package/scripts/sw-tmux.sh +2 -20
  149. package/scripts/sw-trace.sh +1 -19
  150. package/scripts/sw-tracker-github.sh +0 -10
  151. package/scripts/sw-tracker-jira.sh +1 -11
  152. package/scripts/sw-tracker-linear.sh +1 -11
  153. package/scripts/sw-tracker.sh +7 -24
  154. package/scripts/sw-triage.sh +24 -34
  155. package/scripts/sw-upgrade.sh +5 -23
  156. package/scripts/sw-ux.sh +1 -19
  157. package/scripts/sw-webhook.sh +18 -32
  158. package/scripts/sw-widgets.sh +3 -21
  159. package/scripts/sw-worktree.sh +11 -27
  160. package/scripts/update-homebrew-sha.sh +67 -0
  161. package/templates/pipelines/tdd.json +72 -0
  162. package/scripts/sw-pipeline.sh.mock +0 -7
@@ -12,6 +12,7 @@ show_stage_preview() {
12
12
  plan) echo -e " Generate plan via Claude, post task checklist to issue" ;;
13
13
  design) echo -e " Generate Architecture Decision Record (ADR), evaluate alternatives" ;;
14
14
  build) echo -e " Delegate to ${CYAN}shipwright loop${RESET} for autonomous building" ;;
15
+ test_first) echo -e " Generate tests from requirements (TDD mode) before implementation" ;;
15
16
  test) echo -e " Run test suite and check coverage" ;;
16
17
  review) echo -e " AI code review on the diff, post findings" ;;
17
18
  pr) echo -e " Create GitHub PR with labels, reviewers, milestone" ;;
@@ -125,7 +126,7 @@ stage_plan() {
125
126
  CURRENT_STAGE_ID="plan"
126
127
  local plan_file="$ARTIFACTS_DIR/plan.md"
127
128
 
128
- if ! command -v claude &>/dev/null; then
129
+ if ! command -v claude >/dev/null 2>&1; then
129
130
  error "Claude CLI not found — cannot generate plan"
130
131
  return 1
131
132
  fi
@@ -138,6 +139,12 @@ stage_plan() {
138
139
  "$context_script" gather --goal "$GOAL" --stage plan 2>/dev/null || true
139
140
  fi
140
141
 
142
+ # Gather rich architecture context (call-graph, dependencies)
143
+ local arch_context=""
144
+ if type gather_architecture_context &>/dev/null; then
145
+ arch_context=$(gather_architecture_context "${PROJECT_ROOT:-.}" 2>/dev/null || true)
146
+ fi
147
+
141
148
  # Build rich prompt with all available context
142
149
  local plan_prompt="You are an autonomous development agent. Analyze this codebase and create a detailed implementation plan.
143
150
 
@@ -153,6 +160,14 @@ ${ISSUE_BODY}
153
160
  "
154
161
  fi
155
162
 
163
+ # Inject architecture context (import graph, modules, test map)
164
+ if [[ -n "$arch_context" ]]; then
165
+ plan_prompt="${plan_prompt}
166
+ ## Architecture Context
167
+ ${arch_context}
168
+ "
169
+ fi
170
+
156
171
  # Inject context bundle from context engine (if available)
157
172
  local _context_bundle="${ARTIFACTS_DIR}/context-bundle.md"
158
173
  if [[ -f "$_context_bundle" ]]; then
@@ -167,7 +182,7 @@ ${_cb_content}
167
182
  fi
168
183
 
169
184
  # Inject intelligence memory context for similar past plans
170
- if type intelligence_search_memory &>/dev/null 2>&1; then
185
+ if type intelligence_search_memory >/dev/null 2>&1; then
171
186
  local plan_memory
172
187
  plan_memory=$(intelligence_search_memory "plan stage for ${TASK_TYPE:-feature}: ${GOAL:-}" "${HOME}/.shipwright/memory" 5 2>/dev/null) || true
173
188
  if [[ -n "$plan_memory" && "$plan_memory" != *'"results":[]'* && "$plan_memory" != *'"error"'* ]]; then
@@ -392,7 +407,7 @@ CC_TASKS_EOF
392
407
 
393
408
  # ── Plan Validation Gate ──
394
409
  # Ask Claude to validate the plan before proceeding
395
- if command -v claude &>/dev/null && [[ -s "$plan_file" ]]; then
410
+ if command -v claude >/dev/null 2>&1 && [[ -s "$plan_file" ]]; then
396
411
  local validation_attempts=0
397
412
  local max_validation_attempts=2
398
413
  local plan_valid=false
@@ -405,7 +420,7 @@ CC_TASKS_EOF
405
420
  local validation_extra=""
406
421
 
407
422
  # Inject rejected plan history from memory
408
- if type intelligence_search_memory &>/dev/null 2>&1; then
423
+ if type intelligence_search_memory >/dev/null 2>&1; then
409
424
  local rejected_plans
410
425
  rejected_plans=$(intelligence_search_memory "rejected plan validation failures for: ${GOAL:-}" "${HOME}/.shipwright/memory" 3 2>/dev/null) || true
411
426
  if [[ -n "$rejected_plans" ]]; then
@@ -560,16 +575,22 @@ stage_design() {
560
575
  return 0
561
576
  fi
562
577
 
563
- if ! command -v claude &>/dev/null; then
578
+ if ! command -v claude >/dev/null 2>&1; then
564
579
  error "Claude CLI not found — cannot generate design"
565
580
  return 1
566
581
  fi
567
582
 
568
583
  info "Generating Architecture Decision Record..."
569
584
 
585
+ # Gather rich architecture context (call-graph, dependencies)
586
+ local arch_struct_context=""
587
+ if type gather_architecture_context &>/dev/null; then
588
+ arch_struct_context=$(gather_architecture_context "${PROJECT_ROOT:-.}" 2>/dev/null || true)
589
+ fi
590
+
570
591
  # Memory integration — inject context if memory system available
571
592
  local memory_context=""
572
- if type intelligence_search_memory &>/dev/null 2>&1; then
593
+ if type intelligence_search_memory >/dev/null 2>&1; then
573
594
  local mem_dir="${HOME}/.shipwright/memory"
574
595
  memory_context=$(intelligence_search_memory "design stage architecture patterns for: ${GOAL:-}" "$mem_dir" 5 2>/dev/null) || true
575
596
  fi
@@ -608,7 +629,7 @@ ${arch_layers}}"
608
629
 
609
630
  # Inject rejected design approaches and anti-patterns from memory
610
631
  local design_antipatterns=""
611
- if type intelligence_search_memory &>/dev/null 2>&1; then
632
+ if type intelligence_search_memory >/dev/null 2>&1; then
612
633
  local rejected_designs
613
634
  rejected_designs=$(intelligence_search_memory "rejected design approaches anti-patterns for: ${GOAL:-}" "${HOME}/.shipwright/memory" 3 2>/dev/null) || true
614
635
  if [[ -n "$rejected_designs" ]]; then
@@ -636,7 +657,10 @@ $(cat "$plan_file")
636
657
  - Language: ${project_lang}
637
658
  - Test command: ${TEST_CMD:-not configured}
638
659
  - Task type: ${TASK_TYPE:-feature}
639
- ${memory_context:+
660
+ ${arch_struct_context:+
661
+ ## Architecture Context (import graph, modules, test map)
662
+ ${arch_struct_context}
663
+ }${memory_context:+
640
664
  ## Historical Context (from memory)
641
665
  ${memory_context}
642
666
  }${arch_context:+
@@ -741,6 +765,117 @@ _Generated by \`shipwright pipeline\` design stage at $(now_iso)_"
741
765
  log_stage "design" "Generated design.md (${line_count} lines)"
742
766
  }
743
767
 
768
+ # ─── TDD: Generate tests before implementation ─────────────────────────────────
769
+ stage_test_first() {
770
+ CURRENT_STAGE_ID="test_first"
771
+ info "Generating tests from requirements (TDD mode)"
772
+
773
+ local plan_file="${ARTIFACTS_DIR}/plan.md"
774
+ local goal_file="${PROJECT_ROOT}/.claude/goal.md"
775
+ local requirements=""
776
+ if [[ -f "$plan_file" ]]; then
777
+ requirements=$(cat "$plan_file" 2>/dev/null || true)
778
+ elif [[ -f "$goal_file" ]]; then
779
+ requirements=$(cat "$goal_file" 2>/dev/null || true)
780
+ else
781
+ requirements="${GOAL:-}: ${ISSUE_BODY:-}"
782
+ fi
783
+
784
+ local tdd_prompt="You are writing tests BEFORE implementation (TDD).
785
+
786
+ Based on the following plan/requirements, generate test files that define the expected behavior. These tests should FAIL initially (since the implementation doesn't exist yet) but define the correct interface and behavior.
787
+
788
+ Requirements:
789
+ ${requirements}
790
+
791
+ Instructions:
792
+ 1. Create test files for each component mentioned in the plan
793
+ 2. Tests should verify the PUBLIC interface and expected behavior
794
+ 3. Include edge cases and error handling tests
795
+ 4. Tests should be runnable with the project's test framework
796
+ 5. Mark tests that need implementation with clear TODO comments
797
+ 6. Do NOT write implementation code — only tests
798
+
799
+ Output format: For each test file, use a fenced code block with the file path as the language identifier (e.g. \`\`\`tests/auth.test.ts):
800
+ \`\`\`path/to/test.test.ts
801
+ // file content
802
+ \`\`\`
803
+
804
+ Create files in the appropriate project directories (e.g. tests/, __tests__/, src/**/*.test.ts) per project convention."
805
+
806
+ local model="${CLAUDE_MODEL:-${MODEL:-sonnet}}"
807
+ [[ -z "$model" || "$model" == "null" ]] && model="sonnet"
808
+
809
+ local output=""
810
+ output=$(echo "$tdd_prompt" | timeout 120 claude --print --model "$model" 2>/dev/null) || {
811
+ warn "TDD test generation failed, falling back to standard build"
812
+ return 1
813
+ }
814
+
815
+ # Parse output: extract fenced code blocks and write to files
816
+ local wrote_any=false
817
+ local block_path="" in_block=false block_content=""
818
+ while IFS= read -r line; do
819
+ if [[ "$line" =~ ^\`\`\`([a-zA-Z0-9_/\.\-]+)$ ]]; then
820
+ if [[ -n "$block_path" && -n "$block_content" ]]; then
821
+ local out_file="${PROJECT_ROOT}/${block_path}"
822
+ local out_dir
823
+ out_dir=$(dirname "$out_file")
824
+ mkdir -p "$out_dir" 2>/dev/null || true
825
+ if echo "$block_content" > "$out_file" 2>/dev/null; then
826
+ wrote_any=true
827
+ info " Wrote: $block_path"
828
+ fi
829
+ fi
830
+ block_path="${BASH_REMATCH[1]}"
831
+ block_content=""
832
+ in_block=true
833
+ elif [[ "$line" == "\`\`\`" && "$in_block" == "true" ]]; then
834
+ if [[ -n "$block_path" && -n "$block_content" ]]; then
835
+ local out_file="${PROJECT_ROOT}/${block_path}"
836
+ local out_dir
837
+ out_dir=$(dirname "$out_file")
838
+ mkdir -p "$out_dir" 2>/dev/null || true
839
+ if echo "$block_content" > "$out_file" 2>/dev/null; then
840
+ wrote_any=true
841
+ info " Wrote: $block_path"
842
+ fi
843
+ fi
844
+ block_path=""
845
+ block_content=""
846
+ in_block=false
847
+ elif [[ "$in_block" == "true" && -n "$block_path" ]]; then
848
+ [[ -n "$block_content" ]] && block_content="${block_content}"$'\n'
849
+ block_content="${block_content}${line}"
850
+ fi
851
+ done <<< "$output"
852
+
853
+ # Flush last block if unclosed
854
+ if [[ -n "$block_path" && -n "$block_content" ]]; then
855
+ local out_file="${PROJECT_ROOT}/${block_path}"
856
+ local out_dir
857
+ out_dir=$(dirname "$out_file")
858
+ mkdir -p "$out_dir" 2>/dev/null || true
859
+ if echo "$block_content" > "$out_file" 2>/dev/null; then
860
+ wrote_any=true
861
+ info " Wrote: $block_path"
862
+ fi
863
+ fi
864
+
865
+ if [[ "$wrote_any" == "true" ]]; then
866
+ if (cd "$PROJECT_ROOT" && git diff --name-only 2>/dev/null | grep -qE 'test|spec'); then
867
+ git add -A 2>/dev/null || true
868
+ git commit -m "test: TDD - define expected behavior before implementation" 2>/dev/null || true
869
+ emit_event "tdd.tests_generated" "{\"stage\":\"test_first\"}"
870
+ fi
871
+ success "TDD tests generated"
872
+ else
873
+ warn "No test files extracted from TDD output — check format"
874
+ fi
875
+
876
+ return 0
877
+ }
878
+
744
879
  stage_build() {
745
880
  local plan_file="$ARTIFACTS_DIR/plan.md"
746
881
  local design_file="$ARTIFACTS_DIR/design.md"
@@ -749,7 +884,7 @@ stage_build() {
749
884
 
750
885
  # Memory integration — inject context if memory system available
751
886
  local memory_context=""
752
- if type intelligence_search_memory &>/dev/null 2>&1; then
887
+ if type intelligence_search_memory >/dev/null 2>&1; then
753
888
  local mem_dir="${HOME}/.shipwright/memory"
754
889
  memory_context=$(intelligence_search_memory "build stage for: ${GOAL:-}" "$mem_dir" 5 2>/dev/null) || true
755
890
  fi
@@ -761,6 +896,13 @@ stage_build() {
761
896
  local enriched_goal
762
897
  enriched_goal=$(_pipeline_compact_goal "$GOAL" "$plan_file" "$design_file")
763
898
 
899
+ # TDD: when test_first ran, tell build to make existing tests pass
900
+ if [[ "${TDD_ENABLED:-false}" == "true" || "${PIPELINE_TDD:-}" == "true" ]]; then
901
+ enriched_goal="${enriched_goal}
902
+
903
+ IMPORTANT (TDD mode): Test files already exist and define the expected behavior. Write implementation code to make ALL tests pass. Do not delete or modify the test files."
904
+ fi
905
+
764
906
  # Inject memory context
765
907
  if [[ -n "$memory_context" ]]; then
766
908
  enriched_goal="${enriched_goal}
@@ -790,7 +932,7 @@ $(cat "$TASKS_FILE")"
790
932
  fi
791
933
 
792
934
  # Inject file hotspots from GitHub intelligence
793
- if [[ "${NO_GITHUB:-}" != "true" ]] && type gh_file_change_frequency &>/dev/null 2>&1; then
935
+ if [[ "${NO_GITHUB:-}" != "true" ]] && type gh_file_change_frequency >/dev/null 2>&1; then
794
936
  local build_hotspots
795
937
  build_hotspots=$(gh_file_change_frequency 2>/dev/null | head -5 || true)
796
938
  if [[ -n "$build_hotspots" ]]; then
@@ -802,7 +944,7 @@ ${build_hotspots}"
802
944
  fi
803
945
 
804
946
  # Inject security alerts context
805
- if [[ "${NO_GITHUB:-}" != "true" ]] && type gh_security_alerts &>/dev/null 2>&1; then
947
+ if [[ "${NO_GITHUB:-}" != "true" ]] && type gh_security_alerts >/dev/null 2>&1; then
806
948
  local build_alerts
807
949
  build_alerts=$(gh_security_alerts 2>/dev/null | head -3 || true)
808
950
  if [[ -n "$build_alerts" ]]; then
@@ -930,6 +1072,11 @@ ${prevention_text}"
930
1072
  # Definition of Done: use plan-extracted DoD if available
931
1073
  [[ -s "$dod_file" ]] && loop_args+=(--definition-of-done "$dod_file")
932
1074
 
1075
+ # Checkpoint resume: when pipeline resumed from build-stage checkpoint, pass --resume to loop
1076
+ if [[ "${RESUME_FROM_CHECKPOINT:-false}" == "true" && "${checkpoint_stage:-}" == "build" ]]; then
1077
+ loop_args+=(--resume)
1078
+ fi
1079
+
933
1080
  # Skip permissions in CI (no interactive terminal)
934
1081
  [[ "${CI_MODE:-false}" == "true" ]] && loop_args+=(--skip-permissions)
935
1082
 
@@ -967,7 +1114,7 @@ ${prevention_text}"
967
1114
 
968
1115
  # Read accumulated token counts from build loop (written by sw-loop.sh)
969
1116
  local _loop_token_file="${PROJECT_ROOT}/.claude/loop-logs/loop-tokens.json"
970
- if [[ -f "$_loop_token_file" ]] && command -v jq &>/dev/null; then
1117
+ if [[ -f "$_loop_token_file" ]] && command -v jq >/dev/null 2>&1; then
971
1118
  local _loop_in _loop_out _loop_cost
972
1119
  _loop_in=$(jq -r '.input_tokens // 0' "$_loop_token_file" 2>/dev/null || echo "0")
973
1120
  _loop_out=$(jq -r '.output_tokens // 0' "$_loop_token_file" 2>/dev/null || echo "0")
@@ -988,7 +1135,7 @@ ${prevention_text}"
988
1135
  info "Build produced ${BOLD}$commit_count${RESET} commit(s)"
989
1136
 
990
1137
  # Commit quality evaluation when intelligence is enabled
991
- if type intelligence_search_memory &>/dev/null 2>&1 && command -v claude &>/dev/null && [[ "${commit_count:-0}" -gt 0 ]]; then
1138
+ if type intelligence_search_memory >/dev/null 2>&1 && command -v claude >/dev/null 2>&1 && [[ "${commit_count:-0}" -gt 0 ]]; then
992
1139
  local commit_msgs
993
1140
  commit_msgs=$(git log --format="%s" "${BASE_BRANCH}..HEAD" 2>/dev/null | head -20)
994
1141
  local quality_score
@@ -1086,6 +1233,14 @@ ${log_excerpt}
1086
1233
  fi
1087
1234
  fi
1088
1235
 
1236
+ # Emit test.completed with coverage for adaptive learning
1237
+ if [[ -n "$coverage" ]]; then
1238
+ emit_event "test.completed" \
1239
+ "issue=${ISSUE_NUMBER:-0}" \
1240
+ "stage=test" \
1241
+ "coverage=$coverage"
1242
+ fi
1243
+
1089
1244
  # Post test results to GitHub
1090
1245
  if [[ -n "$ISSUE_NUMBER" ]]; then
1091
1246
  local test_summary
@@ -1129,7 +1284,7 @@ stage_review() {
1129
1284
  return 0
1130
1285
  fi
1131
1286
 
1132
- if ! command -v claude &>/dev/null; then
1287
+ if ! command -v claude >/dev/null 2>&1; then
1133
1288
  warn "Claude CLI not found — skipping AI review"
1134
1289
  return 0
1135
1290
  fi
@@ -1139,7 +1294,7 @@ stage_review() {
1139
1294
  info "Running AI code review... ${DIM}($diff_stats)${RESET}"
1140
1295
 
1141
1296
  # Semantic risk scoring when intelligence is enabled
1142
- if type intelligence_search_memory &>/dev/null 2>&1 && command -v claude &>/dev/null; then
1297
+ if type intelligence_search_memory >/dev/null 2>&1 && command -v claude >/dev/null 2>&1; then
1143
1298
  local diff_files
1144
1299
  diff_files=$(git diff --name-only "${BASE_BRANCH}...${GIT_BRANCH}" 2>/dev/null || true)
1145
1300
  local risk_score="low"
@@ -1185,7 +1340,7 @@ If no issues are found, write: \"Review clean — no issues found.\"
1185
1340
  "
1186
1341
 
1187
1342
  # Inject previous review findings and anti-patterns from memory
1188
- if type intelligence_search_memory &>/dev/null 2>&1; then
1343
+ if type intelligence_search_memory >/dev/null 2>&1; then
1189
1344
  local review_memory
1190
1345
  review_memory=$(intelligence_search_memory "code review findings anti-patterns for: ${GOAL:-}" "${HOME}/.shipwright/memory" 5 2>/dev/null) || true
1191
1346
  if [[ -n "$review_memory" ]]; then
@@ -1211,7 +1366,7 @@ ${conventions}
1211
1366
  fi
1212
1367
 
1213
1368
  # Inject CODEOWNERS focus areas for review
1214
- if [[ "${NO_GITHUB:-}" != "true" ]] && type gh_codeowners &>/dev/null 2>&1; then
1369
+ if [[ "${NO_GITHUB:-}" != "true" ]] && type gh_codeowners >/dev/null 2>&1; then
1215
1370
  local review_owners
1216
1371
  review_owners=$(gh_codeowners 2>/dev/null | head -10 || true)
1217
1372
  if [[ -n "$review_owners" ]]; then
@@ -1448,7 +1603,7 @@ stage_pr() {
1448
1603
 
1449
1604
  # ── Developer Simulation (pre-PR review) ──
1450
1605
  local simulation_summary=""
1451
- if type simulation_review &>/dev/null 2>&1; then
1606
+ if type simulation_review >/dev/null 2>&1; then
1452
1607
  local sim_enabled
1453
1608
  sim_enabled=$(jq -r '.intelligence.simulation_enabled // false' "$PIPELINE_CONFIG" 2>/dev/null || echo "false")
1454
1609
  # Also check daemon-config
@@ -1479,7 +1634,7 @@ stage_pr() {
1479
1634
 
1480
1635
  # ── Architecture Validation (pre-PR check) ──
1481
1636
  local arch_summary=""
1482
- if type architecture_validate_changes &>/dev/null 2>&1; then
1637
+ if type architecture_validate_changes >/dev/null 2>&1; then
1483
1638
  local arch_enabled
1484
1639
  arch_enabled=$(jq -r '.intelligence.architecture_enabled // false' "$PIPELINE_CONFIG" 2>/dev/null || echo "false")
1485
1640
  local daemon_cfg=".claude/daemon-config.json"
@@ -1614,6 +1769,65 @@ Generated by \`shipwright pipeline\`
1614
1769
  EOF
1615
1770
  )"
1616
1771
 
1772
+ # Verify required evidence before PR (merge policy enforcement)
1773
+ local risk_tier
1774
+ risk_tier="low"
1775
+ if [[ -f "$REPO_DIR/config/policy.json" ]]; then
1776
+ local changed_files
1777
+ changed_files=$(git diff --name-only "${BASE_BRANCH}...HEAD" 2>/dev/null || true)
1778
+ if [[ -n "$changed_files" ]]; then
1779
+ local policy_file="$REPO_DIR/config/policy.json"
1780
+ check_tier_match() {
1781
+ local tier="$1"
1782
+ local patterns
1783
+ patterns=$(jq -r ".riskTierRules.${tier}[]? // empty" "$policy_file" 2>/dev/null)
1784
+ [[ -z "$patterns" ]] && return 1
1785
+ while IFS= read -r pattern; do
1786
+ [[ -z "$pattern" ]] && continue
1787
+ local regex
1788
+ regex=$(echo "$pattern" | sed 's/\./\\./g; s/\*\*/DOUBLESTAR/g; s/\*/[^\/]*/g; s/DOUBLESTAR/.*/g')
1789
+ while IFS= read -r file; do
1790
+ [[ -z "$file" ]] && continue
1791
+ if echo "$file" | grep -qE "^${regex}$"; then
1792
+ return 0
1793
+ fi
1794
+ done <<< "$changed_files"
1795
+ done <<< "$patterns"
1796
+ return 1
1797
+ }
1798
+ check_tier_match "critical" && risk_tier="critical"
1799
+ check_tier_match "high" && [[ "$risk_tier" != "critical" ]] && risk_tier="high"
1800
+ check_tier_match "medium" && [[ "$risk_tier" != "critical" && "$risk_tier" != "high" ]] && risk_tier="medium"
1801
+ fi
1802
+ fi
1803
+
1804
+ local required_evidence
1805
+ required_evidence=$(jq -r ".mergePolicy.\"$risk_tier\".requiredEvidence // [] | .[]" "$REPO_DIR/config/policy.json" 2>/dev/null)
1806
+
1807
+ if [[ -n "$required_evidence" ]]; then
1808
+ local evidence_dir="$REPO_DIR/.claude/evidence"
1809
+ local missing_evidence=()
1810
+ while IFS= read -r etype; do
1811
+ [[ -z "$etype" ]] && continue
1812
+ local has_evidence=false
1813
+ for f in "$evidence_dir"/*"$etype"*; do
1814
+ [[ -f "$f" ]] && has_evidence=true && break
1815
+ done
1816
+ [[ "$has_evidence" != "true" ]] && missing_evidence+=("$etype")
1817
+ done <<< "$required_evidence"
1818
+
1819
+ if [[ ${#missing_evidence[@]} -gt 0 ]]; then
1820
+ warn "Missing required evidence for $risk_tier tier: ${missing_evidence[*]}"
1821
+ emit_event "evidence.missing" "{\"tier\":\"$risk_tier\",\"missing\":\"${missing_evidence[*]}\"}"
1822
+ # Collect missing evidence
1823
+ if [[ -x "$SCRIPT_DIR/sw-evidence.sh" ]]; then
1824
+ for etype in "${missing_evidence[@]}"; do
1825
+ (cd "$REPO_DIR" && bash "$SCRIPT_DIR/sw-evidence.sh" capture "$etype" 2>/dev/null) || warn "Failed to collect $etype evidence"
1826
+ done
1827
+ fi
1828
+ fi
1829
+ fi
1830
+
1617
1831
  # Build gh pr create args
1618
1832
  local pr_args=(--title "$pr_title" --body "$pr_body" --base "$BASE_BRANCH")
1619
1833
 
@@ -1687,7 +1901,7 @@ EOF
1687
1901
  local reviewer_assigned=false
1688
1902
 
1689
1903
  # Try CODEOWNERS-based routing via GraphQL API
1690
- if type gh_codeowners &>/dev/null 2>&1 && [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
1904
+ if type gh_codeowners >/dev/null 2>&1 && [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
1691
1905
  local codeowners_json
1692
1906
  codeowners_json=$(gh_codeowners "$REPO_OWNER" "$REPO_NAME" 2>/dev/null || echo "[]")
1693
1907
  if [[ "$codeowners_json" != "[]" && -n "$codeowners_json" ]]; then
@@ -1710,7 +1924,7 @@ EOF
1710
1924
  fi
1711
1925
 
1712
1926
  # Fallback: contributor-based routing via GraphQL API
1713
- if [[ "$reviewer_assigned" != "true" ]] && type gh_contributors &>/dev/null 2>&1 && [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
1927
+ if [[ "$reviewer_assigned" != "true" ]] && type gh_contributors >/dev/null 2>&1 && [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
1714
1928
  local contributors_json
1715
1929
  contributors_json=$(gh_contributors "$REPO_OWNER" "$REPO_NAME" 2>/dev/null || echo "[]")
1716
1930
  local top_contributor
@@ -1816,7 +2030,7 @@ stage_merge() {
1816
2030
  fi
1817
2031
 
1818
2032
  # ── Branch Protection Check ──
1819
- if type gh_branch_protection &>/dev/null 2>&1 && [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
2033
+ if type gh_branch_protection >/dev/null 2>&1 && [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
1820
2034
  local protection_json
1821
2035
  protection_json=$(gh_branch_protection "$REPO_OWNER" "$REPO_NAME" "${BASE_BRANCH:-main}" 2>/dev/null || echo '{"protected": false}')
1822
2036
  local is_protected
@@ -1912,13 +2126,13 @@ stage_merge() {
1912
2126
  check_status=$(gh pr checks "$pr_number" --json 'bucket,name' --jq '[.[] | .bucket] | unique | sort' 2>/dev/null || echo '["pending"]')
1913
2127
 
1914
2128
  # If all checks passed (only "pass" in buckets)
1915
- if echo "$check_status" | jq -e '. == ["pass"]' &>/dev/null; then
2129
+ if echo "$check_status" | jq -e '. == ["pass"]' >/dev/null 2>&1; then
1916
2130
  success "All CI checks passed"
1917
2131
  break
1918
2132
  fi
1919
2133
 
1920
2134
  # If any check failed
1921
- if echo "$check_status" | jq -e 'any(. == "fail")' &>/dev/null; then
2135
+ if echo "$check_status" | jq -e 'any(. == "fail")' >/dev/null 2>&1; then
1922
2136
  error "CI checks failed — aborting merge"
1923
2137
  return 1
1924
2138
  fi
@@ -2018,7 +2232,7 @@ stage_deploy() {
2018
2232
  # Create GitHub deployment tracking
2019
2233
  local gh_deploy_env="production"
2020
2234
  [[ -n "$staging_cmd" && -z "$prod_cmd" ]] && gh_deploy_env="staging"
2021
- if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_start &>/dev/null 2>&1; then
2235
+ if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_start >/dev/null 2>&1; then
2022
2236
  if [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
2023
2237
  gh_deploy_pipeline_start "$REPO_OWNER" "$REPO_NAME" "${GIT_BRANCH:-HEAD}" "$gh_deploy_env" 2>/dev/null || true
2024
2238
  info "GitHub Deployment: tracking as $gh_deploy_env"
@@ -2151,7 +2365,7 @@ stage_deploy() {
2151
2365
  error "Staging deploy failed"
2152
2366
  [[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Staging deploy failed"
2153
2367
  # Mark GitHub deployment as failed
2154
- if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_complete &>/dev/null 2>&1; then
2368
+ if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_complete >/dev/null 2>&1; then
2155
2369
  gh_deploy_pipeline_complete "$REPO_OWNER" "$REPO_NAME" "$gh_deploy_env" false "Staging deploy failed" 2>/dev/null || true
2156
2370
  fi
2157
2371
  return 1
@@ -2169,7 +2383,7 @@ stage_deploy() {
2169
2383
  fi
2170
2384
  [[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Production deploy failed — rollback ${rollback_cmd:+attempted}"
2171
2385
  # Mark GitHub deployment as failed
2172
- if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_complete &>/dev/null 2>&1; then
2386
+ if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_complete >/dev/null 2>&1; then
2173
2387
  gh_deploy_pipeline_complete "$REPO_OWNER" "$REPO_NAME" "$gh_deploy_env" false "Production deploy failed" 2>/dev/null || true
2174
2388
  fi
2175
2389
  return 1
@@ -2184,7 +2398,7 @@ stage_deploy() {
2184
2398
  fi
2185
2399
 
2186
2400
  # Mark GitHub deployment as successful
2187
- if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_complete &>/dev/null 2>&1; then
2401
+ if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_complete >/dev/null 2>&1; then
2188
2402
  if [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
2189
2403
  gh_deploy_pipeline_complete "$REPO_OWNER" "$REPO_NAME" "$gh_deploy_env" true "" 2>/dev/null || true
2190
2404
  fi
@@ -63,6 +63,23 @@ get_stage_timing_seconds() {
63
63
  fi
64
64
  }
65
65
 
66
+ # Name of the slowest completed stage (for pipeline.completed event)
67
+ get_slowest_stage() {
68
+ local slowest="" max_sec=0
69
+ local stage_ids
70
+ stage_ids=$(echo "$STAGE_TIMINGS" | grep "_start:" | sed 's/_start:.*//' | sort -u)
71
+ for sid in $stage_ids; do
72
+ [[ -z "$sid" ]] && continue
73
+ local sec
74
+ sec=$(get_stage_timing_seconds "$sid")
75
+ if [[ -n "$sec" && "$sec" =~ ^[0-9]+$ && "$sec" -gt "$max_sec" ]]; then
76
+ max_sec="$sec"
77
+ slowest="$sid"
78
+ fi
79
+ done
80
+ echo "${slowest:-}"
81
+ }
82
+
66
83
  get_stage_description() {
67
84
  local stage_id="$1"
68
85
 
@@ -191,7 +208,7 @@ mark_stage_complete() {
191
208
  fi
192
209
 
193
210
  # Update GitHub Check Run for this stage
194
- if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_checks_stage_update &>/dev/null 2>&1; then
211
+ if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_checks_stage_update >/dev/null 2>&1; then
195
212
  gh_checks_stage_update "$stage_id" "completed" "success" "Stage $stage_id: ${timing}" 2>/dev/null || true
196
213
  fi
197
214
 
@@ -215,9 +232,18 @@ mark_stage_complete() {
215
232
  fi
216
233
 
217
234
  # Durable WAL: publish stage completion event
218
- if type publish_event &>/dev/null 2>&1; then
235
+ if type publish_event >/dev/null 2>&1; then
219
236
  publish_event "stage.complete" "{\"stage\":\"${stage_id}\",\"issue\":\"${ISSUE_NUMBER:-0}\",\"timing\":\"${timing}\"}" 2>/dev/null || true
220
237
  fi
238
+
239
+ # Durable checkpoint: save to DB for pipeline resume
240
+ if type db_save_checkpoint >/dev/null 2>&1; then
241
+ local checkpoint_data
242
+ checkpoint_data=$(jq -nc --arg stage "$stage_id" --arg status "${PIPELINE_STATUS:-running}" \
243
+ --arg issue "${ISSUE_NUMBER:-}" --arg goal "${GOAL:-}" --arg template "${PIPELINE_TEMPLATE:-}" \
244
+ '{stage: $stage, status: $status, issue: $issue, goal: $goal, template: $template, ts: "'"$(now_iso)"'"}')
245
+ db_save_checkpoint "pipeline-${SHIPWRIGHT_PIPELINE_ID:-$$}" "$checkpoint_data" 2>/dev/null || true
246
+ fi
221
247
  }
222
248
 
223
249
  persist_artifacts() {
@@ -350,7 +376,7 @@ $(tail -5 "$ARTIFACTS_DIR/${stage_id}"*.log 2>/dev/null || echo 'No log availabl
350
376
  fi
351
377
 
352
378
  # Update GitHub Check Run for this stage
353
- if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_checks_stage_update &>/dev/null 2>&1; then
379
+ if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_checks_stage_update >/dev/null 2>&1; then
354
380
  local fail_summary
355
381
  fail_summary=$(tail -3 "$ARTIFACTS_DIR/${stage_id}"*.log 2>/dev/null | head -c 500 || echo "Stage $stage_id failed")
356
382
  gh_checks_stage_update "$stage_id" "completed" "failure" "$fail_summary" 2>/dev/null || true
@@ -368,7 +394,7 @@ $(tail -5 "$ARTIFACTS_DIR/${stage_id}"*.log 2>/dev/null || echo 'No log availabl
368
394
  fi
369
395
 
370
396
  # Durable WAL: publish stage failure event
371
- if type publish_event &>/dev/null 2>&1; then
397
+ if type publish_event >/dev/null 2>&1; then
372
398
  publish_event "stage.failed" "{\"stage\":\"${stage_id}\",\"issue\":\"${ISSUE_NUMBER:-0}\",\"timing\":\"${timing}\"}" 2>/dev/null || true
373
399
  fi
374
400
  }
@@ -446,6 +472,16 @@ _SW_STATE_END_
446
472
  printf '## Log\n'
447
473
  printf '%s\n' "$LOG_ENTRIES"
448
474
  } >> "$STATE_FILE"
475
+
476
+ # Update pipeline_runs in DB
477
+ if type update_pipeline_status >/dev/null 2>&1 && db_available 2>/dev/null; then
478
+ local _job_id="${SHIPWRIGHT_PIPELINE_ID:-pipeline-$$-${ISSUE_NUMBER:-0}}"
479
+ local _dur_secs=0
480
+ if [[ -n "$PIPELINE_START_EPOCH" ]]; then
481
+ _dur_secs=$(( $(now_epoch) - PIPELINE_START_EPOCH ))
482
+ fi
483
+ update_pipeline_status "$_job_id" "$PIPELINE_STATUS" "$CURRENT_STAGE" "" "$_dur_secs" 2>/dev/null || true
484
+ fi
449
485
  }
450
486
 
451
487
  resume_state() {