shipwright-cli 3.0.0 → 3.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +21 -7
- package/completions/_shipwright +247 -93
- package/completions/shipwright.bash +69 -15
- package/completions/shipwright.fish +309 -41
- package/config/decision-tiers.json +55 -0
- package/config/defaults.json +25 -2
- package/config/event-schema.json +142 -5
- package/config/policy.json +8 -0
- package/dashboard/public/index.html +6 -0
- package/dashboard/public/styles.css +76 -0
- package/dashboard/server.ts +51 -0
- package/dashboard/src/core/api.ts +5 -0
- package/dashboard/src/types/api.ts +10 -0
- package/dashboard/src/views/metrics.ts +69 -1
- package/package.json +3 -3
- package/scripts/lib/architecture.sh +2 -1
- package/scripts/lib/bootstrap.sh +0 -0
- package/scripts/lib/config.sh +0 -0
- package/scripts/lib/daemon-adaptive.sh +4 -2
- package/scripts/lib/daemon-dispatch.sh +24 -1
- package/scripts/lib/daemon-failure.sh +0 -0
- package/scripts/lib/daemon-health.sh +0 -0
- package/scripts/lib/daemon-patrol.sh +42 -7
- package/scripts/lib/daemon-poll.sh +17 -0
- package/scripts/lib/daemon-state.sh +17 -0
- package/scripts/lib/daemon-triage.sh +1 -1
- package/scripts/lib/decide-autonomy.sh +295 -0
- package/scripts/lib/decide-scoring.sh +228 -0
- package/scripts/lib/decide-signals.sh +462 -0
- package/scripts/lib/fleet-failover.sh +0 -0
- package/scripts/lib/helpers.sh +19 -18
- package/scripts/lib/pipeline-detection.sh +1 -1
- package/scripts/lib/pipeline-github.sh +0 -0
- package/scripts/lib/pipeline-intelligence.sh +23 -4
- package/scripts/lib/pipeline-quality-checks.sh +11 -6
- package/scripts/lib/pipeline-quality.sh +0 -0
- package/scripts/lib/pipeline-stages.sh +330 -33
- package/scripts/lib/pipeline-state.sh +14 -0
- package/scripts/lib/policy.sh +0 -0
- package/scripts/lib/test-helpers.sh +0 -0
- package/scripts/postinstall.mjs +75 -1
- package/scripts/signals/example-collector.sh +36 -0
- package/scripts/sw +8 -4
- package/scripts/sw-activity.sh +1 -7
- package/scripts/sw-adaptive.sh +7 -7
- package/scripts/sw-adversarial.sh +1 -1
- package/scripts/sw-architecture-enforcer.sh +1 -1
- package/scripts/sw-auth.sh +1 -1
- package/scripts/sw-autonomous.sh +1 -1
- package/scripts/sw-changelog.sh +1 -1
- package/scripts/sw-checkpoint.sh +1 -1
- package/scripts/sw-ci.sh +11 -6
- package/scripts/sw-cleanup.sh +1 -1
- package/scripts/sw-code-review.sh +36 -17
- package/scripts/sw-connect.sh +1 -1
- package/scripts/sw-context.sh +1 -1
- package/scripts/sw-cost.sh +71 -5
- package/scripts/sw-daemon.sh +6 -3
- package/scripts/sw-dashboard.sh +1 -1
- package/scripts/sw-db.sh +53 -38
- package/scripts/sw-decide.sh +685 -0
- package/scripts/sw-decompose.sh +1 -1
- package/scripts/sw-deps.sh +1 -1
- package/scripts/sw-developer-simulation.sh +1 -1
- package/scripts/sw-discovery.sh +80 -4
- package/scripts/sw-doc-fleet.sh +1 -1
- package/scripts/sw-docs-agent.sh +1 -1
- package/scripts/sw-docs.sh +1 -1
- package/scripts/sw-doctor.sh +1 -1
- package/scripts/sw-dora.sh +1 -1
- package/scripts/sw-durable.sh +9 -5
- package/scripts/sw-e2e-orchestrator.sh +1 -1
- package/scripts/sw-eventbus.sh +7 -4
- package/scripts/sw-evidence.sh +1 -1
- package/scripts/sw-feedback.sh +1 -1
- package/scripts/sw-fix.sh +1 -1
- package/scripts/sw-fleet-discover.sh +1 -1
- package/scripts/sw-fleet-viz.sh +6 -4
- package/scripts/sw-fleet.sh +1 -1
- package/scripts/sw-github-app.sh +3 -2
- package/scripts/sw-github-checks.sh +1 -1
- package/scripts/sw-github-deploy.sh +1 -1
- package/scripts/sw-github-graphql.sh +1 -1
- package/scripts/sw-guild.sh +1 -1
- package/scripts/sw-heartbeat.sh +1 -1
- package/scripts/sw-hygiene.sh +5 -3
- package/scripts/sw-incident.sh +9 -5
- package/scripts/sw-init.sh +1 -1
- package/scripts/sw-instrument.sh +1 -1
- package/scripts/sw-intelligence.sh +11 -6
- package/scripts/sw-jira.sh +1 -1
- package/scripts/sw-launchd.sh +1 -1
- package/scripts/sw-linear.sh +1 -1
- package/scripts/sw-logs.sh +1 -1
- package/scripts/sw-loop.sh +338 -32
- package/scripts/sw-memory.sh +23 -6
- package/scripts/sw-mission-control.sh +1 -1
- package/scripts/sw-model-router.sh +3 -2
- package/scripts/sw-otel.sh +8 -4
- package/scripts/sw-oversight.sh +1 -1
- package/scripts/sw-pipeline-composer.sh +3 -1
- package/scripts/sw-pipeline-vitals.sh +11 -6
- package/scripts/sw-pipeline.sh +92 -8
- package/scripts/sw-pm.sh +5 -4
- package/scripts/sw-pr-lifecycle.sh +7 -4
- package/scripts/sw-predictive.sh +11 -5
- package/scripts/sw-prep.sh +1 -1
- package/scripts/sw-ps.sh +1 -1
- package/scripts/sw-public-dashboard.sh +3 -2
- package/scripts/sw-quality.sh +21 -10
- package/scripts/sw-reaper.sh +1 -1
- package/scripts/sw-recruit.sh +1 -1
- package/scripts/sw-regression.sh +1 -1
- package/scripts/sw-release-manager.sh +1 -1
- package/scripts/sw-release.sh +1 -1
- package/scripts/sw-remote.sh +1 -1
- package/scripts/sw-replay.sh +1 -1
- package/scripts/sw-retro.sh +1 -1
- package/scripts/sw-review-rerun.sh +1 -1
- package/scripts/sw-scale.sh +69 -11
- package/scripts/sw-security-audit.sh +1 -1
- package/scripts/sw-self-optimize.sh +168 -4
- package/scripts/sw-session.sh +3 -3
- package/scripts/sw-setup.sh +1 -1
- package/scripts/sw-standup.sh +1 -1
- package/scripts/sw-status.sh +1 -1
- package/scripts/sw-strategic.sh +11 -6
- package/scripts/sw-stream.sh +7 -4
- package/scripts/sw-swarm.sh +3 -2
- package/scripts/sw-team-stages.sh +1 -1
- package/scripts/sw-templates.sh +3 -3
- package/scripts/sw-testgen.sh +11 -6
- package/scripts/sw-tmux-pipeline.sh +1 -1
- package/scripts/sw-tmux.sh +35 -1
- package/scripts/sw-trace.sh +1 -1
- package/scripts/sw-tracker.sh +1 -1
- package/scripts/sw-triage.sh +7 -7
- package/scripts/sw-upgrade.sh +1 -1
- package/scripts/sw-ux.sh +1 -1
- package/scripts/sw-webhook.sh +3 -2
- package/scripts/sw-widgets.sh +7 -4
- package/scripts/sw-worktree.sh +1 -1
- package/scripts/update-homebrew-sha.sh +21 -15
package/scripts/sw-memory.sh
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
set -euo pipefail
|
|
7
7
|
trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
|
|
8
8
|
|
|
9
|
-
VERSION="3.
|
|
9
|
+
VERSION="3.2.0"
|
|
10
10
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
11
11
|
REPO_DIR="${REPO_DIR:-$(cd "$SCRIPT_DIR/.." && pwd)}"
|
|
12
12
|
|
|
@@ -88,7 +88,13 @@ memory_ranked_search() {
|
|
|
88
88
|
memory_dir="$(repo_memory_dir)"
|
|
89
89
|
fi
|
|
90
90
|
memory_dir="${memory_dir:-$HOME/.shipwright/memory}"
|
|
91
|
-
[[ ! -d "$memory_dir" ]]
|
|
91
|
+
if [[ ! -d "$memory_dir" ]]; then
|
|
92
|
+
info "Memory dir not found at ${memory_dir} — auto-creating"
|
|
93
|
+
mkdir -p "$memory_dir"
|
|
94
|
+
emit_event "memory.not_available" "path=$memory_dir" "action=auto_created"
|
|
95
|
+
echo "[]"
|
|
96
|
+
return 0
|
|
97
|
+
fi
|
|
92
98
|
|
|
93
99
|
# Extract and expand query keywords
|
|
94
100
|
local keywords
|
|
@@ -372,7 +378,10 @@ memory_capture_failure() {
|
|
|
372
378
|
pattern=$(echo "$error_output" | head -1 | cut -c1-200)
|
|
373
379
|
fi
|
|
374
380
|
|
|
375
|
-
[[ -z "$pattern" ]]
|
|
381
|
+
if [[ -z "$pattern" ]]; then
|
|
382
|
+
warn "Memory capture: empty error pattern — skipping"
|
|
383
|
+
return 0
|
|
384
|
+
fi
|
|
376
385
|
|
|
377
386
|
# Check for duplicate — increment seen_count if pattern already exists
|
|
378
387
|
local existing_idx
|
|
@@ -716,7 +725,7 @@ memory_analyze_failure() {
|
|
|
716
725
|
"$failures_file" 2>/dev/null || true)
|
|
717
726
|
fi
|
|
718
727
|
|
|
719
|
-
# Build valid categories list (from compat.sh if available, else
|
|
728
|
+
# Build valid categories list (from compat.sh if available, else built-in defaults)
|
|
720
729
|
local valid_cats="test_failure, build_error, lint_error, timeout, dependency, flaky, config"
|
|
721
730
|
if [[ -n "${SW_ERROR_CATEGORIES:-}" ]]; then
|
|
722
731
|
valid_cats=$(echo "$SW_ERROR_CATEGORIES" | tr ' ' ', ')
|
|
@@ -987,6 +996,7 @@ memory_inject_context() {
|
|
|
987
996
|
done
|
|
988
997
|
|
|
989
998
|
if [[ "$has_memory" == "false" ]]; then
|
|
999
|
+
info "No memory available for repo (${mem_dir}) — first pipeline run will seed it"
|
|
990
1000
|
echo "# No memory available for this repository yet."
|
|
991
1001
|
return 0
|
|
992
1002
|
fi
|
|
@@ -1642,6 +1652,11 @@ memory_export() {
|
|
|
1642
1652
|
local mem_dir
|
|
1643
1653
|
mem_dir="$(repo_memory_dir)"
|
|
1644
1654
|
|
|
1655
|
+
# Ensure all memory files exist (jq --slurpfile fails on missing files)
|
|
1656
|
+
for f in patterns.json failures.json decisions.json metrics.json; do
|
|
1657
|
+
[[ -f "$mem_dir/$f" ]] || echo '{}' > "$mem_dir/$f"
|
|
1658
|
+
done
|
|
1659
|
+
|
|
1645
1660
|
# Merge all memory files into a single JSON export
|
|
1646
1661
|
local export_json
|
|
1647
1662
|
export_json=$(jq -n \
|
|
@@ -1757,8 +1772,10 @@ memory_stats() {
|
|
|
1757
1772
|
# Event-based hit rate
|
|
1758
1773
|
local inject_count capture_count
|
|
1759
1774
|
if [[ -f "$EVENTS_FILE" ]]; then
|
|
1760
|
-
inject_count=$(grep -c '"memory.inject"' "$EVENTS_FILE" 2>/dev/null ||
|
|
1761
|
-
|
|
1775
|
+
inject_count=$(grep -c '"memory.inject"' "$EVENTS_FILE" 2>/dev/null || true)
|
|
1776
|
+
inject_count="${inject_count:-0}"
|
|
1777
|
+
capture_count=$(grep -c '"memory.capture"' "$EVENTS_FILE" 2>/dev/null || true)
|
|
1778
|
+
capture_count="${capture_count:-0}"
|
|
1762
1779
|
echo ""
|
|
1763
1780
|
echo -e " ${BOLD}Usage${RESET}"
|
|
1764
1781
|
printf " %-18s %s\n" "Context injections:" "$inject_count"
|
|
@@ -7,7 +7,7 @@
|
|
|
7
7
|
set -euo pipefail
|
|
8
8
|
trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
|
|
9
9
|
|
|
10
|
-
VERSION="3.
|
|
10
|
+
VERSION="3.2.0"
|
|
11
11
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
12
12
|
REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
13
13
|
|
|
@@ -416,7 +416,8 @@ show_report() {
|
|
|
416
416
|
|
|
417
417
|
# Summary stats
|
|
418
418
|
local total_runs
|
|
419
|
-
total_runs=$(wc -l < "$MODEL_USAGE_LOG" ||
|
|
419
|
+
total_runs=$(wc -l < "$MODEL_USAGE_LOG" || true)
|
|
420
|
+
total_runs="${total_runs:-0}"
|
|
420
421
|
|
|
421
422
|
local haiku_runs
|
|
422
423
|
haiku_runs=$(grep -c '"model":"haiku"' "$MODEL_USAGE_LOG" || true)
|
package/scripts/sw-otel.sh
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
set -euo pipefail
|
|
7
7
|
trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
|
|
8
8
|
|
|
9
|
-
VERSION="3.
|
|
9
|
+
VERSION="3.2.0"
|
|
10
10
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
11
11
|
REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
12
12
|
|
|
@@ -77,6 +77,7 @@ cmd_metrics() {
|
|
|
77
77
|
|
|
78
78
|
# Parse events.jsonl
|
|
79
79
|
if [[ -f "$EVENTS_FILE" ]]; then
|
|
80
|
+
{
|
|
80
81
|
while IFS= read -r line; do
|
|
81
82
|
[[ -z "$line" ]] && continue
|
|
82
83
|
|
|
@@ -462,9 +463,12 @@ cmd_report() {
|
|
|
462
463
|
local last_event_ts=""
|
|
463
464
|
|
|
464
465
|
if [[ -f "$EVENTS_FILE" ]]; then
|
|
465
|
-
event_count=$(wc -l < "$EVENTS_FILE" ||
|
|
466
|
-
|
|
467
|
-
|
|
466
|
+
event_count=$(wc -l < "$EVENTS_FILE" || true)
|
|
467
|
+
event_count="${event_count:-0}"
|
|
468
|
+
export_count=$(grep -c '"type":"otel_export"' "$EVENTS_FILE" 2>/dev/null || true)
|
|
469
|
+
export_count="${export_count:-0}"
|
|
470
|
+
webhook_count=$(grep -c '"type":"webhook_sent"' "$EVENTS_FILE" 2>/dev/null || true)
|
|
471
|
+
webhook_count="${webhook_count:-0}"
|
|
468
472
|
last_event_ts=$(tail -n1 "$EVENTS_FILE" | jq -r '.ts // "unknown"' 2>/dev/null || echo "unknown")
|
|
469
473
|
fi
|
|
470
474
|
|
package/scripts/sw-oversight.sh
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
set -euo pipefail
|
|
7
7
|
trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
|
|
8
8
|
|
|
9
|
-
VERSION="3.
|
|
9
|
+
VERSION="3.2.0"
|
|
10
10
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
11
11
|
REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
12
12
|
|
|
@@ -17,6 +17,8 @@ REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
|
17
17
|
# Canonical helpers (colors, output, events)
|
|
18
18
|
# shellcheck source=lib/helpers.sh
|
|
19
19
|
[[ -f "$SCRIPT_DIR/lib/helpers.sh" ]] && source "$SCRIPT_DIR/lib/helpers.sh"
|
|
20
|
+
# shellcheck source=lib/config.sh
|
|
21
|
+
[[ -f "$SCRIPT_DIR/lib/config.sh" ]] && source "$SCRIPT_DIR/lib/config.sh"
|
|
20
22
|
# Fallbacks when helpers not loaded (e.g. test env with overridden SCRIPT_DIR)
|
|
21
23
|
[[ "$(type -t info 2>/dev/null)" == "function" ]] || info() { echo -e "\033[38;2;0;212;255m\033[1m▸\033[0m $*"; }
|
|
22
24
|
[[ "$(type -t success 2>/dev/null)" == "function" ]] || success() { echo -e "\033[38;2;74;222;128m\033[1m✓\033[0m $*"; }
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
set -euo pipefail
|
|
7
7
|
trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
|
|
8
8
|
|
|
9
|
-
VERSION="3.
|
|
9
|
+
VERSION="3.2.0"
|
|
10
10
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
11
11
|
REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
12
12
|
|
|
@@ -150,7 +150,8 @@ _compute_convergence() {
|
|
|
150
150
|
fi
|
|
151
151
|
|
|
152
152
|
local total_errors
|
|
153
|
-
total_errors=$(wc -l < "$error_log" 2>/dev/null | tr -d ' ' ||
|
|
153
|
+
total_errors=$(wc -l < "$error_log" 2>/dev/null | tr -d ' ' || true)
|
|
154
|
+
total_errors="${total_errors:-0}"
|
|
154
155
|
total_errors=$(_safe_num "$total_errors")
|
|
155
156
|
|
|
156
157
|
if [[ "$total_errors" -eq 0 ]]; then
|
|
@@ -270,7 +271,8 @@ _compute_error_maturity() {
|
|
|
270
271
|
fi
|
|
271
272
|
|
|
272
273
|
local total_errors
|
|
273
|
-
total_errors=$(wc -l < "$error_log" 2>/dev/null | tr -d ' ' ||
|
|
274
|
+
total_errors=$(wc -l < "$error_log" 2>/dev/null | tr -d ' ' || true)
|
|
275
|
+
total_errors="${total_errors:-0}"
|
|
274
276
|
total_errors=$(_safe_num "$total_errors")
|
|
275
277
|
|
|
276
278
|
if [[ "$total_errors" -eq 0 ]]; then
|
|
@@ -280,7 +282,8 @@ _compute_error_maturity() {
|
|
|
280
282
|
|
|
281
283
|
# Count unique error signatures
|
|
282
284
|
local unique_errors
|
|
283
|
-
unique_errors=$(jq -r '.signature // "unknown"' "$error_log" 2>/dev/null | sort -u | wc -l | tr -d ' ' ||
|
|
285
|
+
unique_errors=$(jq -r '.signature // "unknown"' "$error_log" 2>/dev/null | sort -u | wc -l | tr -d ' ' || true)
|
|
286
|
+
unique_errors="${unique_errors:-0}"
|
|
284
287
|
unique_errors=$(_safe_num "$unique_errors")
|
|
285
288
|
|
|
286
289
|
if [[ "$unique_errors" -eq 0 ]]; then
|
|
@@ -507,8 +510,10 @@ pipeline_compute_vitals() {
|
|
|
507
510
|
# ── Error counts ──
|
|
508
511
|
local total_errors=0 unique_errors=0
|
|
509
512
|
if [[ -f "$error_log" ]]; then
|
|
510
|
-
total_errors=$(wc -l < "$error_log" 2>/dev/null | tr -d ' ' ||
|
|
511
|
-
|
|
513
|
+
total_errors=$(wc -l < "$error_log" 2>/dev/null | tr -d ' ' || true)
|
|
514
|
+
total_errors="${total_errors:-0}"
|
|
515
|
+
unique_errors=$(jq -r '.signature // "unknown"' "$error_log" 2>/dev/null | sort -u | wc -l | tr -d ' ' || true)
|
|
516
|
+
unique_errors="${unique_errors:-0}"
|
|
512
517
|
fi
|
|
513
518
|
|
|
514
519
|
# ── Output JSON ──
|
package/scripts/sw-pipeline.sh
CHANGED
|
@@ -10,8 +10,9 @@ trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
|
|
|
10
10
|
unset CLAUDECODE 2>/dev/null || true
|
|
11
11
|
# Ignore SIGHUP so tmux attach/detach doesn't kill long-running plan/design/review stages
|
|
12
12
|
trap '' HUP
|
|
13
|
+
trap '' SIGPIPE
|
|
13
14
|
|
|
14
|
-
VERSION="3.
|
|
15
|
+
VERSION="3.2.0"
|
|
15
16
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
16
17
|
REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
17
18
|
|
|
@@ -93,6 +94,12 @@ if [[ -f "$SCRIPT_DIR/sw-durable.sh" ]]; then
|
|
|
93
94
|
fi
|
|
94
95
|
# shellcheck source=sw-db.sh — for db_save_checkpoint/db_load_checkpoint (durable workflows)
|
|
95
96
|
[[ -f "$SCRIPT_DIR/sw-db.sh" ]] && source "$SCRIPT_DIR/sw-db.sh"
|
|
97
|
+
# Ensure DB schema exists so emit_event → db_add_event can write rows (CREATE IF NOT EXISTS is idempotent)
|
|
98
|
+
if type init_schema >/dev/null 2>&1 && type check_sqlite3 >/dev/null 2>&1 && check_sqlite3 2>/dev/null; then
|
|
99
|
+
init_schema 2>/dev/null || true
|
|
100
|
+
fi
|
|
101
|
+
# shellcheck source=sw-cost.sh — for cost_record persistence to costs.json + DB
|
|
102
|
+
[[ -f "$SCRIPT_DIR/sw-cost.sh" ]] && source "$SCRIPT_DIR/sw-cost.sh"
|
|
96
103
|
|
|
97
104
|
# ─── GitHub API Modules (optional) ─────────────────────────────────────────
|
|
98
105
|
# shellcheck source=sw-github-graphql.sh
|
|
@@ -143,7 +150,8 @@ rotate_event_log_if_needed() {
|
|
|
143
150
|
local max_lines=10000
|
|
144
151
|
[[ ! -f "$events_file" ]] && return
|
|
145
152
|
local lines
|
|
146
|
-
lines=$(wc -l < "$events_file" 2>/dev/null ||
|
|
153
|
+
lines=$(wc -l < "$events_file" 2>/dev/null || true)
|
|
154
|
+
lines="${lines:-0}"
|
|
147
155
|
if [[ "$lines" -gt "$max_lines" ]]; then
|
|
148
156
|
local tmp="${events_file}.rotating"
|
|
149
157
|
if tail -5000 "$events_file" > "$tmp" 2>/dev/null && mv "$tmp" "$events_file" 2>/dev/null; then
|
|
@@ -500,14 +508,16 @@ load_pipeline_config() {
|
|
|
500
508
|
# Check for intelligence-composed pipeline first
|
|
501
509
|
local composed_pipeline="${ARTIFACTS_DIR}/composed-pipeline.json"
|
|
502
510
|
if [[ -f "$composed_pipeline" ]] && type composer_validate_pipeline >/dev/null 2>&1; then
|
|
503
|
-
# Use composed pipeline if fresh (
|
|
511
|
+
# Use composed pipeline if fresh (within cache TTL)
|
|
512
|
+
local composed_cache_ttl
|
|
513
|
+
composed_cache_ttl=$(_config_get_int "pipeline.composed_cache_ttl" 3600 2>/dev/null || echo 3600)
|
|
504
514
|
local composed_age=99999
|
|
505
515
|
local composed_mtime
|
|
506
516
|
composed_mtime=$(file_mtime "$composed_pipeline")
|
|
507
517
|
if [[ "$composed_mtime" -gt 0 ]]; then
|
|
508
518
|
composed_age=$(( $(now_epoch) - composed_mtime ))
|
|
509
519
|
fi
|
|
510
|
-
if [[ "$composed_age" -lt
|
|
520
|
+
if [[ "$composed_age" -lt "$composed_cache_ttl" ]]; then
|
|
511
521
|
local validate_json
|
|
512
522
|
validate_json=$(cat "$composed_pipeline" 2>/dev/null || echo "")
|
|
513
523
|
if [[ -n "$validate_json" ]] && composer_validate_pipeline "$validate_json" 2>/dev/null; then
|
|
@@ -633,6 +643,11 @@ cleanup_on_exit() {
|
|
|
633
643
|
git stash pop --quiet 2>/dev/null || true
|
|
634
644
|
fi
|
|
635
645
|
|
|
646
|
+
# Release durable pipeline lock
|
|
647
|
+
if [[ -n "${_PIPELINE_LOCK_ID:-}" ]] && type release_lock >/dev/null 2>&1; then
|
|
648
|
+
release_lock "$_PIPELINE_LOCK_ID" 2>/dev/null || true
|
|
649
|
+
fi
|
|
650
|
+
|
|
636
651
|
# Cancel lingering in_progress GitHub Check Runs
|
|
637
652
|
pipeline_cancel_check_runs 2>/dev/null || true
|
|
638
653
|
|
|
@@ -1550,6 +1565,10 @@ run_pipeline() {
|
|
|
1550
1565
|
stage_dur_s=$(( $(now_epoch) - stage_start_epoch ))
|
|
1551
1566
|
success "Stage ${BOLD}$id${RESET} complete ${DIM}(${timing})${RESET}"
|
|
1552
1567
|
emit_event "stage.completed" "issue=${ISSUE_NUMBER:-0}" "stage=$id" "duration_s=$stage_dur_s" "result=success"
|
|
1568
|
+
# Emit vitals snapshot on every stage transition (not just build/test)
|
|
1569
|
+
if type pipeline_emit_progress_snapshot >/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
|
|
1570
|
+
pipeline_emit_progress_snapshot "${ISSUE_NUMBER}" "$id" "0" "0" "0" "" 2>/dev/null || true
|
|
1571
|
+
fi
|
|
1553
1572
|
# Record model outcome for UCB1 learning
|
|
1554
1573
|
type record_model_outcome >/dev/null 2>&1 && record_model_outcome "$stage_model_used" "$id" 1 "$stage_dur_s" 0 2>/dev/null || true
|
|
1555
1574
|
# Broadcast discovery for cross-pipeline learning
|
|
@@ -1580,6 +1599,10 @@ run_pipeline() {
|
|
|
1580
1599
|
"duration_s=$stage_dur_s" \
|
|
1581
1600
|
"error=${LAST_STAGE_ERROR:-unknown}" \
|
|
1582
1601
|
"error_class=${LAST_STAGE_ERROR_CLASS:-unknown}"
|
|
1602
|
+
# Emit vitals snapshot on failure too
|
|
1603
|
+
if type pipeline_emit_progress_snapshot >/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
|
|
1604
|
+
pipeline_emit_progress_snapshot "${ISSUE_NUMBER}" "$id" "0" "0" "0" "${LAST_STAGE_ERROR:-unknown}" 2>/dev/null || true
|
|
1605
|
+
fi
|
|
1583
1606
|
# Log model used for prediction feedback
|
|
1584
1607
|
echo "${id}|${stage_model_used}|false" >> "${ARTIFACTS_DIR}/model-routing.log"
|
|
1585
1608
|
# Record model outcome for UCB1 learning
|
|
@@ -1772,10 +1795,14 @@ pipeline_cleanup_worktree() {
|
|
|
1772
1795
|
# Extract branch name before removing worktree
|
|
1773
1796
|
local _wt_branch=""
|
|
1774
1797
|
_wt_branch=$(git worktree list --porcelain 2>/dev/null | grep -A1 "worktree ${worktree_path}$" | grep "^branch " | sed 's|^branch refs/heads/||' || true)
|
|
1775
|
-
git worktree remove --force "$worktree_path" 2>/dev/null
|
|
1798
|
+
if ! git worktree remove --force "$worktree_path" 2>/dev/null; then
|
|
1799
|
+
warn "Failed to remove worktree at ${worktree_path} — may need manual cleanup"
|
|
1800
|
+
fi
|
|
1776
1801
|
# Clean up the local branch
|
|
1777
1802
|
if [[ -n "$_wt_branch" ]]; then
|
|
1778
|
-
git branch -D "$_wt_branch" 2>/dev/null
|
|
1803
|
+
if ! git branch -D "$_wt_branch" 2>/dev/null; then
|
|
1804
|
+
warn "Failed to delete local branch ${_wt_branch}"
|
|
1805
|
+
fi
|
|
1779
1806
|
fi
|
|
1780
1807
|
# Clean up the remote branch (if it was pushed)
|
|
1781
1808
|
if [[ -n "$_wt_branch" && "${NO_GITHUB:-}" != "true" ]]; then
|
|
@@ -2103,6 +2130,19 @@ pipeline_start() {
|
|
|
2103
2130
|
|
|
2104
2131
|
setup_dirs
|
|
2105
2132
|
|
|
2133
|
+
# Acquire durable lock to prevent concurrent pipelines on the same issue/goal
|
|
2134
|
+
_PIPELINE_LOCK_ID=""
|
|
2135
|
+
if type acquire_lock >/dev/null 2>&1; then
|
|
2136
|
+
_PIPELINE_LOCK_ID="pipeline-${ISSUE_NUMBER:-goal-$$}"
|
|
2137
|
+
if ! acquire_lock "$_PIPELINE_LOCK_ID" 5 2>/dev/null; then
|
|
2138
|
+
error "Another pipeline is already running for this issue/goal"
|
|
2139
|
+
echo -e " Wait for it to finish, or remove stale lock:"
|
|
2140
|
+
echo -e " ${DIM}rm -rf ~/.shipwright/durable/locks/${_PIPELINE_LOCK_ID}.lock${RESET}"
|
|
2141
|
+
_PIPELINE_LOCK_ID=""
|
|
2142
|
+
exit 1
|
|
2143
|
+
fi
|
|
2144
|
+
fi
|
|
2145
|
+
|
|
2106
2146
|
# Generate reasoning trace (complexity analysis, template selection, failure predictions)
|
|
2107
2147
|
local user_specified_pipeline="$PIPELINE_NAME"
|
|
2108
2148
|
generate_reasoning_trace 2>/dev/null || true
|
|
@@ -2226,11 +2266,15 @@ pipeline_start() {
|
|
|
2226
2266
|
if [[ -z "$GIT_BRANCH" ]]; then
|
|
2227
2267
|
local ci_branch="ci/issue-${ISSUE_NUMBER}"
|
|
2228
2268
|
info "CI resume: creating branch ${ci_branch} from current HEAD"
|
|
2229
|
-
git checkout -b "$ci_branch" 2>/dev/null
|
|
2269
|
+
if ! git checkout -b "$ci_branch" 2>/dev/null && ! git checkout "$ci_branch" 2>/dev/null; then
|
|
2270
|
+
warn "CI resume: failed to create or checkout branch ${ci_branch}"
|
|
2271
|
+
fi
|
|
2230
2272
|
GIT_BRANCH="$ci_branch"
|
|
2231
2273
|
elif [[ "$(git branch --show-current 2>/dev/null)" != "$GIT_BRANCH" ]]; then
|
|
2232
2274
|
info "CI resume: checking out branch ${GIT_BRANCH}"
|
|
2233
|
-
git checkout -b "$GIT_BRANCH" 2>/dev/null
|
|
2275
|
+
if ! git checkout -b "$GIT_BRANCH" 2>/dev/null && ! git checkout "$GIT_BRANCH" 2>/dev/null; then
|
|
2276
|
+
warn "CI resume: failed to create or checkout branch ${GIT_BRANCH}"
|
|
2277
|
+
fi
|
|
2234
2278
|
fi
|
|
2235
2279
|
write_state 2>/dev/null || true
|
|
2236
2280
|
fi
|
|
@@ -2339,6 +2383,11 @@ pipeline_start() {
|
|
|
2339
2383
|
"model=${MODEL:-opus}" \
|
|
2340
2384
|
"goal=${GOAL}"
|
|
2341
2385
|
|
|
2386
|
+
# Record pipeline run in SQLite for dashboard visibility
|
|
2387
|
+
if type add_pipeline_run >/dev/null 2>&1; then
|
|
2388
|
+
add_pipeline_run "${SHIPWRIGHT_PIPELINE_ID}" "${ISSUE_NUMBER:-0}" "${GOAL}" "${BRANCH:-}" "${PIPELINE_NAME}" 2>/dev/null || true
|
|
2389
|
+
fi
|
|
2390
|
+
|
|
2342
2391
|
# Durable WAL: publish pipeline start event
|
|
2343
2392
|
if type publish_event >/dev/null 2>&1; then
|
|
2344
2393
|
publish_event "pipeline.started" "{\"issue\":\"${ISSUE_NUMBER:-0}\",\"pipeline\":\"${PIPELINE_NAME}\",\"goal\":\"${GOAL:0:200}\"}" 2>/dev/null || true
|
|
@@ -2385,10 +2434,35 @@ pipeline_start() {
|
|
|
2385
2434
|
"total_cost=$total_cost" \
|
|
2386
2435
|
"self_heal_count=$SELF_HEAL_COUNT"
|
|
2387
2436
|
|
|
2437
|
+
# Update pipeline run status in SQLite
|
|
2438
|
+
if type update_pipeline_status >/dev/null 2>&1; then
|
|
2439
|
+
update_pipeline_status "${SHIPWRIGHT_PIPELINE_ID}" "completed" "${PIPELINE_SLOWEST_STAGE:-}" "complete" "${total_dur_s:-0}" 2>/dev/null || true
|
|
2440
|
+
fi
|
|
2441
|
+
|
|
2388
2442
|
# Auto-ingest pipeline outcome into recruit profiles
|
|
2389
2443
|
if [[ -x "$SCRIPT_DIR/sw-recruit.sh" ]]; then
|
|
2390
2444
|
bash "$SCRIPT_DIR/sw-recruit.sh" ingest-pipeline 1 2>/dev/null || true
|
|
2391
2445
|
fi
|
|
2446
|
+
|
|
2447
|
+
# Capture success patterns to memory (learn what works — parallel the failure path)
|
|
2448
|
+
if [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
|
|
2449
|
+
bash "$SCRIPT_DIR/sw-memory.sh" capture "$STATE_FILE" "$ARTIFACTS_DIR" 2>/dev/null || true
|
|
2450
|
+
fi
|
|
2451
|
+
# Update memory baselines with successful run metrics
|
|
2452
|
+
if type memory_update_metrics >/dev/null 2>&1; then
|
|
2453
|
+
memory_update_metrics "build_duration_s" "${total_dur_s:-0}" 2>/dev/null || true
|
|
2454
|
+
memory_update_metrics "total_cost_usd" "${total_cost:-0}" 2>/dev/null || true
|
|
2455
|
+
memory_update_metrics "iterations" "$((SELF_HEAL_COUNT + 1))" 2>/dev/null || true
|
|
2456
|
+
fi
|
|
2457
|
+
|
|
2458
|
+
# Record positive fix outcome if self-healing succeeded
|
|
2459
|
+
if [[ "$SELF_HEAL_COUNT" -gt 0 && -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
|
|
2460
|
+
local _success_sig
|
|
2461
|
+
_success_sig=$(tail -30 "$ARTIFACTS_DIR/test-results.log" 2>/dev/null | head -3 | tr '\n' ' ' | sed 's/^ *//;s/ *$//' || true)
|
|
2462
|
+
if [[ -n "$_success_sig" ]]; then
|
|
2463
|
+
bash "$SCRIPT_DIR/sw-memory.sh" fix-outcome "$_success_sig" "true" "true" 2>/dev/null || true
|
|
2464
|
+
fi
|
|
2465
|
+
fi
|
|
2392
2466
|
else
|
|
2393
2467
|
notify "Pipeline Failed" "Goal: ${GOAL}\nFailed at: ${CURRENT_STAGE_ID:-unknown}" "error"
|
|
2394
2468
|
emit_event "pipeline.completed" \
|
|
@@ -2406,6 +2480,11 @@ pipeline_start() {
|
|
|
2406
2480
|
"total_cost=$total_cost" \
|
|
2407
2481
|
"self_heal_count=$SELF_HEAL_COUNT"
|
|
2408
2482
|
|
|
2483
|
+
# Update pipeline run status in SQLite
|
|
2484
|
+
if type update_pipeline_status >/dev/null 2>&1; then
|
|
2485
|
+
update_pipeline_status "${SHIPWRIGHT_PIPELINE_ID}" "failed" "${CURRENT_STAGE_ID:-unknown}" "failed" "${total_dur_s:-0}" 2>/dev/null || true
|
|
2486
|
+
fi
|
|
2487
|
+
|
|
2409
2488
|
# Auto-ingest pipeline outcome into recruit profiles
|
|
2410
2489
|
if [[ -x "$SCRIPT_DIR/sw-recruit.sh" ]]; then
|
|
2411
2490
|
bash "$SCRIPT_DIR/sw-recruit.sh" ingest-pipeline 1 2>/dev/null || true
|
|
@@ -2542,6 +2621,11 @@ pipeline_start() {
|
|
|
2542
2621
|
"model=$model_key" \
|
|
2543
2622
|
"cost_usd=$total_cost"
|
|
2544
2623
|
|
|
2624
|
+
# Persist cost entry to costs.json + SQLite (was missing — tokens accumulated but never written)
|
|
2625
|
+
if type cost_record >/dev/null 2>&1; then
|
|
2626
|
+
cost_record "$TOTAL_INPUT_TOKENS" "$TOTAL_OUTPUT_TOKENS" "$model_key" "pipeline" "${ISSUE_NUMBER:-}" 2>/dev/null || true
|
|
2627
|
+
fi
|
|
2628
|
+
|
|
2545
2629
|
# Record pipeline outcome for Thompson sampling / outcome-based learning
|
|
2546
2630
|
if type db_record_outcome >/dev/null 2>&1; then
|
|
2547
2631
|
local _outcome_success=0
|
package/scripts/sw-pm.sh
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
set -euo pipefail
|
|
7
7
|
trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
|
|
8
8
|
|
|
9
|
-
VERSION="3.
|
|
9
|
+
VERSION="3.2.0"
|
|
10
10
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
11
11
|
|
|
12
12
|
# ─── Cross-platform compatibility ──────────────────────────────────────────
|
|
@@ -121,7 +121,8 @@ analyze_issue() {
|
|
|
121
121
|
# Count estimated files affected by analyzing body content
|
|
122
122
|
local file_scope complexity risk estimated_hours
|
|
123
123
|
local files_mentioned
|
|
124
|
-
files_mentioned=$(echo "$body" | grep -o '\b[a-zA-Z0-9_.-]*\.[a-z]*' | sort -u | wc -l ||
|
|
124
|
+
files_mentioned=$(echo "$body" | grep -o '\b[a-zA-Z0-9_.-]*\.[a-z]*' | sort -u | wc -l || true)
|
|
125
|
+
files_mentioned="${files_mentioned:-0}"
|
|
125
126
|
files_mentioned=$((files_mentioned + 1)) # At least 1 file
|
|
126
127
|
|
|
127
128
|
# Determine file scope
|
|
@@ -203,7 +204,7 @@ analyze_issue() {
|
|
|
203
204
|
|
|
204
205
|
# ─── recommend_team <analysis_json> ──────────────────────────────────────────
|
|
205
206
|
# Based on analysis, recommend team composition
|
|
206
|
-
# Tries recruit's AI/heuristic team composition first, falls back to
|
|
207
|
+
# Tries recruit's AI/heuristic team composition first, falls back to built-in rules.
|
|
207
208
|
recommend_team() {
|
|
208
209
|
local analysis="$1"
|
|
209
210
|
|
|
@@ -253,7 +254,7 @@ recommend_team() {
|
|
|
253
254
|
fi
|
|
254
255
|
fi
|
|
255
256
|
|
|
256
|
-
# ── Fallback:
|
|
257
|
+
# ── Fallback: heuristic team composition ──
|
|
257
258
|
local complexity risk is_security is_perf file_scope
|
|
258
259
|
complexity=$(echo "$analysis" | jq -r '.complexity')
|
|
259
260
|
risk=$(echo "$analysis" | jq -r '.risk')
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
set -euo pipefail
|
|
7
7
|
trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
|
|
8
8
|
|
|
9
|
-
VERSION="3.
|
|
9
|
+
VERSION="3.2.0"
|
|
10
10
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
11
11
|
REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
12
12
|
|
|
@@ -274,13 +274,16 @@ pr_review() {
|
|
|
274
274
|
# Evaluate quality criteria
|
|
275
275
|
local issues_found=0
|
|
276
276
|
local file_count
|
|
277
|
-
file_count=$(echo "$diff_output" | grep -c '^diff --git' ||
|
|
277
|
+
file_count=$(echo "$diff_output" | grep -c '^diff --git' || true)
|
|
278
|
+
file_count="${file_count:-0}"
|
|
278
279
|
|
|
279
280
|
local line_additions
|
|
280
|
-
line_additions=$(echo "$diff_output" | grep -c '^+' ||
|
|
281
|
+
line_additions=$(echo "$diff_output" | grep -c '^+' || true)
|
|
282
|
+
line_additions="${line_additions:-0}"
|
|
281
283
|
|
|
282
284
|
local line_deletions
|
|
283
|
-
line_deletions=$(echo "$diff_output" | grep -c '^-' ||
|
|
285
|
+
line_deletions=$(echo "$diff_output" | grep -c '^-' || true)
|
|
286
|
+
line_deletions="${line_deletions:-0}"
|
|
284
287
|
|
|
285
288
|
info "Diff analysis: ${file_count} files, +${line_additions}/-${line_deletions} lines"
|
|
286
289
|
|
package/scripts/sw-predictive.sh
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
set -euo pipefail
|
|
7
7
|
trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
|
|
8
8
|
|
|
9
|
-
VERSION="3.
|
|
9
|
+
VERSION="3.2.0"
|
|
10
10
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
11
11
|
REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
12
12
|
|
|
@@ -17,6 +17,8 @@ REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
|
17
17
|
# Canonical helpers (colors, output, events)
|
|
18
18
|
# shellcheck source=lib/helpers.sh
|
|
19
19
|
[[ -f "$SCRIPT_DIR/lib/helpers.sh" ]] && source "$SCRIPT_DIR/lib/helpers.sh"
|
|
20
|
+
# shellcheck source=lib/config.sh
|
|
21
|
+
[[ -f "$SCRIPT_DIR/lib/config.sh" ]] && source "$SCRIPT_DIR/lib/config.sh"
|
|
20
22
|
# Fallbacks when helpers not loaded (e.g. test env with overridden SCRIPT_DIR)
|
|
21
23
|
[[ "$(type -t info 2>/dev/null)" == "function" ]] || info() { echo -e "\033[38;2;0;212;255m\033[1m▸\033[0m $*"; }
|
|
22
24
|
[[ "$(type -t success 2>/dev/null)" == "function" ]] || success() { echo -e "\033[38;2;74;222;128m\033[1m✓\033[0m $*"; }
|
|
@@ -384,10 +386,12 @@ Return JSON format:
|
|
|
384
386
|
fi
|
|
385
387
|
|
|
386
388
|
# Fallback: heuristic risk assessment
|
|
387
|
-
local
|
|
389
|
+
local default_risk
|
|
390
|
+
default_risk=$(_config_get_int "predictive.default_risk_score" 50 2>/dev/null || echo 50)
|
|
391
|
+
local risk=$default_risk
|
|
388
392
|
local reason="Default medium risk — no AI analysis available"
|
|
389
393
|
|
|
390
|
-
# Check for learned keyword weights first, fall back to
|
|
394
|
+
# Check for learned keyword weights first, fall back to config defaults
|
|
391
395
|
local keywords_json
|
|
392
396
|
keywords_json=$(_predictive_get_risk_keywords)
|
|
393
397
|
|
|
@@ -416,9 +420,11 @@ Return JSON format:
|
|
|
416
420
|
reason="Learned keyword weights: ${matched_keywords%%, }"
|
|
417
421
|
fi
|
|
418
422
|
else
|
|
419
|
-
#
|
|
423
|
+
# Config-driven keyword risk elevation
|
|
424
|
+
local keyword_risk
|
|
425
|
+
keyword_risk=$(_config_get_int "predictive.keyword_risk_score" 70 2>/dev/null || echo 70)
|
|
420
426
|
if echo "$issue_json" | grep -qiE "refactor|migration|breaking|security|deploy"; then
|
|
421
|
-
risk
|
|
427
|
+
risk=$keyword_risk
|
|
422
428
|
reason="Keywords suggest elevated complexity"
|
|
423
429
|
fi
|
|
424
430
|
fi
|
package/scripts/sw-prep.sh
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
set -euo pipefail
|
|
7
7
|
trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
|
|
8
8
|
|
|
9
|
-
VERSION="3.
|
|
9
|
+
VERSION="3.2.0"
|
|
10
10
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
11
11
|
|
|
12
12
|
# ─── Handle subcommands ───────────────────────────────────────────────────────
|
package/scripts/sw-ps.sh
CHANGED
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
# ║ Displays a table of agents running in claude-* tmux windows with ║
|
|
6
6
|
# ║ PID, status, idle time, and pane references. ║
|
|
7
7
|
# ╚═══════════════════════════════════════════════════════════════════════════╝
|
|
8
|
-
VERSION="3.
|
|
8
|
+
VERSION="3.2.0"
|
|
9
9
|
set -euo pipefail
|
|
10
10
|
trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
|
|
11
11
|
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
set -euo pipefail
|
|
7
7
|
trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
|
|
8
8
|
|
|
9
|
-
VERSION="3.
|
|
9
|
+
VERSION="3.2.0"
|
|
10
10
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
11
11
|
REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
12
12
|
|
|
@@ -112,7 +112,8 @@ gather_pipeline_state() {
|
|
|
112
112
|
# Read pipeline artifacts if available
|
|
113
113
|
if [[ -d "$pipeline_artifacts" ]]; then
|
|
114
114
|
local stage_count
|
|
115
|
-
stage_count=$(find "$pipeline_artifacts" -name "*.md" -o -name "*.json" | wc -l ||
|
|
115
|
+
stage_count=$(find "$pipeline_artifacts" -name "*.md" -o -name "*.json" | wc -l || true)
|
|
116
|
+
stage_count="${stage_count:-0}"
|
|
116
117
|
pipeline_data=$(jq --arg count "$stage_count" '.artifact_count = $count' <<<"$pipeline_data")
|
|
117
118
|
fi
|
|
118
119
|
|