shipwright-cli 3.1.0 → 3.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/agents/code-reviewer.md +2 -0
- package/.claude/agents/devops-engineer.md +2 -0
- package/.claude/agents/doc-fleet-agent.md +2 -0
- package/.claude/agents/pipeline-agent.md +2 -0
- package/.claude/agents/shell-script-specialist.md +2 -0
- package/.claude/agents/test-specialist.md +2 -0
- package/.claude/hooks/agent-crash-capture.sh +32 -0
- package/.claude/hooks/post-tool-use.sh +3 -2
- package/.claude/hooks/pre-tool-use.sh +35 -3
- package/README.md +22 -8
- package/claude-code/hooks/config-change.sh +18 -0
- package/claude-code/hooks/instructions-reloaded.sh +7 -0
- package/claude-code/hooks/worktree-create.sh +25 -0
- package/claude-code/hooks/worktree-remove.sh +20 -0
- package/config/code-constitution.json +130 -0
- package/config/defaults.json +25 -2
- package/config/policy.json +1 -1
- package/dashboard/middleware/auth.ts +134 -0
- package/dashboard/middleware/constants.ts +21 -0
- package/dashboard/public/index.html +8 -6
- package/dashboard/public/styles.css +176 -97
- package/dashboard/routes/auth.ts +38 -0
- package/dashboard/server.ts +117 -25
- package/dashboard/services/config.ts +26 -0
- package/dashboard/services/db.ts +118 -0
- package/dashboard/src/canvas/pixel-agent.ts +298 -0
- package/dashboard/src/canvas/pixel-sprites.ts +440 -0
- package/dashboard/src/canvas/shipyard-effects.ts +367 -0
- package/dashboard/src/canvas/shipyard-scene.ts +616 -0
- package/dashboard/src/canvas/submarine-layout.ts +267 -0
- package/dashboard/src/components/header.ts +8 -7
- package/dashboard/src/core/api.ts +5 -0
- package/dashboard/src/core/router.ts +1 -0
- package/dashboard/src/design/submarine-theme.ts +253 -0
- package/dashboard/src/main.ts +2 -0
- package/dashboard/src/types/api.ts +12 -1
- package/dashboard/src/views/activity.ts +2 -1
- package/dashboard/src/views/metrics.ts +69 -1
- package/dashboard/src/views/shipyard.ts +39 -0
- package/dashboard/types/index.ts +166 -0
- package/docs/plans/2026-02-28-compound-audit-and-shipyard-design.md +186 -0
- package/docs/plans/2026-02-28-skipper-shipwright-implementation-plan.md +1182 -0
- package/docs/plans/2026-02-28-skipper-shipwright-integration-design.md +531 -0
- package/docs/plans/2026-03-01-ai-powered-skill-injection-design.md +298 -0
- package/docs/plans/2026-03-01-ai-powered-skill-injection-plan.md +1109 -0
- package/docs/plans/2026-03-01-capabilities-cleanup-plan.md +658 -0
- package/docs/plans/2026-03-01-clean-architecture-plan.md +924 -0
- package/docs/plans/2026-03-01-compound-audit-cascade-design.md +191 -0
- package/docs/plans/2026-03-01-compound-audit-cascade-plan.md +921 -0
- package/docs/plans/2026-03-01-deep-integration-plan.md +851 -0
- package/docs/plans/2026-03-01-pipeline-audit-trail-design.md +145 -0
- package/docs/plans/2026-03-01-pipeline-audit-trail-plan.md +770 -0
- package/docs/plans/2026-03-01-refined-depths-brand-design.md +382 -0
- package/docs/plans/2026-03-01-refined-depths-implementation.md +599 -0
- package/docs/plans/2026-03-01-skipper-kernel-integration-design.md +203 -0
- package/docs/plans/2026-03-01-unified-platform-design.md +272 -0
- package/docs/plans/2026-03-07-claude-code-feature-integration-design.md +189 -0
- package/docs/plans/2026-03-07-claude-code-feature-integration-plan.md +1165 -0
- package/docs/research/BACKLOG_QUICK_REFERENCE.md +352 -0
- package/docs/research/CUTTING_EDGE_RESEARCH_2026.md +546 -0
- package/docs/research/RESEARCH_INDEX.md +439 -0
- package/docs/research/RESEARCH_SOURCES.md +440 -0
- package/docs/research/RESEARCH_SUMMARY.txt +275 -0
- package/docs/superpowers/specs/2026-03-10-pipeline-quality-revolution-design.md +341 -0
- package/package.json +2 -2
- package/scripts/lib/adaptive-model.sh +427 -0
- package/scripts/lib/adaptive-timeout.sh +316 -0
- package/scripts/lib/audit-trail.sh +309 -0
- package/scripts/lib/auto-recovery.sh +471 -0
- package/scripts/lib/bandit-selector.sh +431 -0
- package/scripts/lib/bootstrap.sh +104 -2
- package/scripts/lib/causal-graph.sh +455 -0
- package/scripts/lib/compat.sh +126 -0
- package/scripts/lib/compound-audit.sh +337 -0
- package/scripts/lib/constitutional.sh +454 -0
- package/scripts/lib/context-budget.sh +359 -0
- package/scripts/lib/convergence.sh +594 -0
- package/scripts/lib/cost-optimizer.sh +634 -0
- package/scripts/lib/daemon-adaptive.sh +14 -2
- package/scripts/lib/daemon-dispatch.sh +106 -17
- package/scripts/lib/daemon-failure.sh +34 -4
- package/scripts/lib/daemon-patrol.sh +25 -4
- package/scripts/lib/daemon-poll-github.sh +361 -0
- package/scripts/lib/daemon-poll-health.sh +299 -0
- package/scripts/lib/daemon-poll.sh +27 -611
- package/scripts/lib/daemon-state.sh +119 -66
- package/scripts/lib/daemon-triage.sh +10 -0
- package/scripts/lib/dod-scorecard.sh +442 -0
- package/scripts/lib/error-actionability.sh +300 -0
- package/scripts/lib/formal-spec.sh +461 -0
- package/scripts/lib/helpers.sh +180 -5
- package/scripts/lib/intent-analysis.sh +409 -0
- package/scripts/lib/loop-convergence.sh +350 -0
- package/scripts/lib/loop-iteration.sh +682 -0
- package/scripts/lib/loop-progress.sh +48 -0
- package/scripts/lib/loop-restart.sh +185 -0
- package/scripts/lib/memory-effectiveness.sh +506 -0
- package/scripts/lib/mutation-executor.sh +352 -0
- package/scripts/lib/outcome-feedback.sh +521 -0
- package/scripts/lib/pipeline-cli.sh +336 -0
- package/scripts/lib/pipeline-commands.sh +1216 -0
- package/scripts/lib/pipeline-detection.sh +101 -3
- package/scripts/lib/pipeline-execution.sh +897 -0
- package/scripts/lib/pipeline-github.sh +28 -3
- package/scripts/lib/pipeline-intelligence-compound.sh +431 -0
- package/scripts/lib/pipeline-intelligence-scoring.sh +407 -0
- package/scripts/lib/pipeline-intelligence-skip.sh +181 -0
- package/scripts/lib/pipeline-intelligence.sh +104 -1138
- package/scripts/lib/pipeline-quality-bash-compat.sh +182 -0
- package/scripts/lib/pipeline-quality-checks.sh +17 -711
- package/scripts/lib/pipeline-quality-gates.sh +563 -0
- package/scripts/lib/pipeline-stages-build.sh +730 -0
- package/scripts/lib/pipeline-stages-delivery.sh +965 -0
- package/scripts/lib/pipeline-stages-intake.sh +1133 -0
- package/scripts/lib/pipeline-stages-monitor.sh +407 -0
- package/scripts/lib/pipeline-stages-review.sh +1022 -0
- package/scripts/lib/pipeline-stages.sh +161 -2901
- package/scripts/lib/pipeline-state.sh +36 -5
- package/scripts/lib/pipeline-util.sh +487 -0
- package/scripts/lib/policy-learner.sh +438 -0
- package/scripts/lib/process-reward.sh +493 -0
- package/scripts/lib/project-detect.sh +649 -0
- package/scripts/lib/quality-profile.sh +334 -0
- package/scripts/lib/recruit-commands.sh +885 -0
- package/scripts/lib/recruit-learning.sh +739 -0
- package/scripts/lib/recruit-roles.sh +648 -0
- package/scripts/lib/reward-aggregator.sh +458 -0
- package/scripts/lib/rl-optimizer.sh +362 -0
- package/scripts/lib/root-cause.sh +427 -0
- package/scripts/lib/scope-enforcement.sh +445 -0
- package/scripts/lib/session-restart.sh +493 -0
- package/scripts/lib/skill-memory.sh +300 -0
- package/scripts/lib/skill-registry.sh +775 -0
- package/scripts/lib/spec-driven.sh +476 -0
- package/scripts/lib/test-helpers.sh +18 -7
- package/scripts/lib/test-holdout.sh +429 -0
- package/scripts/lib/test-optimizer.sh +511 -0
- package/scripts/shipwright-file-suggest.sh +45 -0
- package/scripts/skills/adversarial-quality.md +61 -0
- package/scripts/skills/api-design.md +44 -0
- package/scripts/skills/architecture-design.md +50 -0
- package/scripts/skills/brainstorming.md +43 -0
- package/scripts/skills/data-pipeline.md +44 -0
- package/scripts/skills/deploy-safety.md +64 -0
- package/scripts/skills/documentation.md +38 -0
- package/scripts/skills/frontend-design.md +45 -0
- package/scripts/skills/generated/.gitkeep +0 -0
- package/scripts/skills/generated/_refinements/.gitkeep +0 -0
- package/scripts/skills/generated/_refinements/adversarial-quality.patch.md +3 -0
- package/scripts/skills/generated/_refinements/architecture-design.patch.md +3 -0
- package/scripts/skills/generated/_refinements/brainstorming.patch.md +3 -0
- package/scripts/skills/generated/cli-version-management.md +29 -0
- package/scripts/skills/generated/collection-system-validation.md +99 -0
- package/scripts/skills/generated/large-scale-c-refactoring-coordination.md +97 -0
- package/scripts/skills/generated/pattern-matching-similarity-scoring.md +195 -0
- package/scripts/skills/generated/test-parallelization-detection.md +65 -0
- package/scripts/skills/observability.md +79 -0
- package/scripts/skills/performance.md +48 -0
- package/scripts/skills/pr-quality.md +49 -0
- package/scripts/skills/product-thinking.md +43 -0
- package/scripts/skills/security-audit.md +49 -0
- package/scripts/skills/systematic-debugging.md +40 -0
- package/scripts/skills/testing-strategy.md +47 -0
- package/scripts/skills/two-stage-review.md +52 -0
- package/scripts/skills/validation-thoroughness.md +55 -0
- package/scripts/sw +9 -3
- package/scripts/sw-activity.sh +9 -8
- package/scripts/sw-adaptive.sh +8 -7
- package/scripts/sw-adversarial.sh +2 -1
- package/scripts/sw-architecture-enforcer.sh +3 -1
- package/scripts/sw-auth.sh +12 -2
- package/scripts/sw-autonomous.sh +5 -1
- package/scripts/sw-changelog.sh +4 -1
- package/scripts/sw-checkpoint.sh +2 -1
- package/scripts/sw-ci.sh +15 -6
- package/scripts/sw-cleanup.sh +4 -26
- package/scripts/sw-code-review.sh +45 -20
- package/scripts/sw-connect.sh +2 -1
- package/scripts/sw-context.sh +2 -1
- package/scripts/sw-cost.sh +107 -5
- package/scripts/sw-daemon.sh +71 -11
- package/scripts/sw-dashboard.sh +3 -1
- package/scripts/sw-db.sh +71 -20
- package/scripts/sw-decide.sh +8 -2
- package/scripts/sw-decompose.sh +360 -17
- package/scripts/sw-deps.sh +4 -1
- package/scripts/sw-developer-simulation.sh +4 -1
- package/scripts/sw-discovery.sh +378 -5
- package/scripts/sw-doc-fleet.sh +4 -1
- package/scripts/sw-docs-agent.sh +3 -1
- package/scripts/sw-docs.sh +2 -1
- package/scripts/sw-doctor.sh +453 -2
- package/scripts/sw-dora.sh +4 -1
- package/scripts/sw-durable.sh +12 -7
- package/scripts/sw-e2e-orchestrator.sh +17 -16
- package/scripts/sw-eventbus.sh +13 -4
- package/scripts/sw-evidence.sh +364 -12
- package/scripts/sw-feedback.sh +550 -9
- package/scripts/sw-fix.sh +20 -1
- package/scripts/sw-fleet-discover.sh +6 -2
- package/scripts/sw-fleet-viz.sh +9 -4
- package/scripts/sw-fleet.sh +5 -1
- package/scripts/sw-github-app.sh +18 -4
- package/scripts/sw-github-checks.sh +3 -2
- package/scripts/sw-github-deploy.sh +3 -2
- package/scripts/sw-github-graphql.sh +18 -7
- package/scripts/sw-guild.sh +5 -1
- package/scripts/sw-heartbeat.sh +5 -30
- package/scripts/sw-hello.sh +67 -0
- package/scripts/sw-hygiene.sh +10 -3
- package/scripts/sw-incident.sh +273 -5
- package/scripts/sw-init.sh +18 -2
- package/scripts/sw-instrument.sh +10 -2
- package/scripts/sw-intelligence.sh +44 -7
- package/scripts/sw-jira.sh +5 -1
- package/scripts/sw-launchd.sh +2 -1
- package/scripts/sw-linear.sh +4 -1
- package/scripts/sw-logs.sh +4 -1
- package/scripts/sw-loop.sh +436 -1076
- package/scripts/sw-memory.sh +357 -3
- package/scripts/sw-mission-control.sh +6 -1
- package/scripts/sw-model-router.sh +483 -27
- package/scripts/sw-otel.sh +15 -4
- package/scripts/sw-oversight.sh +14 -5
- package/scripts/sw-patrol-meta.sh +334 -0
- package/scripts/sw-pipeline-composer.sh +7 -1
- package/scripts/sw-pipeline-vitals.sh +12 -6
- package/scripts/sw-pipeline.sh +54 -2653
- package/scripts/sw-pm.sh +16 -8
- package/scripts/sw-pr-lifecycle.sh +2 -1
- package/scripts/sw-predictive.sh +17 -5
- package/scripts/sw-prep.sh +185 -2
- package/scripts/sw-ps.sh +5 -25
- package/scripts/sw-public-dashboard.sh +17 -4
- package/scripts/sw-quality.sh +14 -6
- package/scripts/sw-reaper.sh +8 -25
- package/scripts/sw-recruit.sh +156 -2303
- package/scripts/sw-regression.sh +19 -12
- package/scripts/sw-release-manager.sh +3 -1
- package/scripts/sw-release.sh +4 -1
- package/scripts/sw-remote.sh +3 -1
- package/scripts/sw-replay.sh +7 -1
- package/scripts/sw-retro.sh +158 -1
- package/scripts/sw-review-rerun.sh +3 -1
- package/scripts/sw-scale.sh +14 -5
- package/scripts/sw-security-audit.sh +6 -1
- package/scripts/sw-self-optimize.sh +173 -6
- package/scripts/sw-session.sh +9 -3
- package/scripts/sw-setup.sh +3 -1
- package/scripts/sw-stall-detector.sh +406 -0
- package/scripts/sw-standup.sh +15 -7
- package/scripts/sw-status.sh +3 -1
- package/scripts/sw-strategic.sh +14 -6
- package/scripts/sw-stream.sh +13 -4
- package/scripts/sw-swarm.sh +20 -7
- package/scripts/sw-team-stages.sh +13 -6
- package/scripts/sw-templates.sh +7 -31
- package/scripts/sw-testgen.sh +17 -6
- package/scripts/sw-tmux-pipeline.sh +4 -1
- package/scripts/sw-tmux-role-color.sh +2 -0
- package/scripts/sw-tmux-status.sh +1 -1
- package/scripts/sw-tmux.sh +37 -1
- package/scripts/sw-trace.sh +3 -1
- package/scripts/sw-tracker-github.sh +3 -0
- package/scripts/sw-tracker-jira.sh +3 -0
- package/scripts/sw-tracker-linear.sh +3 -0
- package/scripts/sw-tracker.sh +3 -1
- package/scripts/sw-triage.sh +3 -2
- package/scripts/sw-upgrade.sh +3 -1
- package/scripts/sw-ux.sh +5 -2
- package/scripts/sw-webhook.sh +5 -2
- package/scripts/sw-widgets.sh +9 -4
- package/scripts/sw-worktree.sh +15 -3
- package/scripts/test-skill-injection.sh +1233 -0
- package/templates/pipelines/autonomous.json +27 -3
- package/templates/pipelines/cost-aware.json +34 -8
- package/templates/pipelines/deployed.json +12 -0
- package/templates/pipelines/enterprise.json +12 -0
- package/templates/pipelines/fast.json +6 -0
- package/templates/pipelines/full.json +27 -3
- package/templates/pipelines/hotfix.json +6 -0
- package/templates/pipelines/standard.json +12 -0
- package/templates/pipelines/tdd.json +12 -0
|
@@ -0,0 +1,316 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# adaptive-timeout.sh — Adaptive Stage Timeout Engine with P95 Duration-Based Auto-Tuning
|
|
3
|
+
# Source from sw-pipeline.sh. Requires SCRIPT_DIR and helpers (info, warn, error, emit_event).
|
|
4
|
+
# Tracks historical stage durations and auto-adjusts timeouts based on P95 percentile.
|
|
5
|
+
|
|
6
|
+
[[ -n "${_ADAPTIVE_TIMEOUT_LOADED:-}" ]] && return 0
|
|
7
|
+
_ADAPTIVE_TIMEOUT_LOADED=1
|
|
8
|
+
|
|
9
|
+
# Module version for debugging
|
|
10
|
+
VERSION="3.3.0"
|
|
11
|
+
|
|
12
|
+
# ─── Configuration ──────────────────────────────────────────────────────────
|
|
13
|
+
|
|
14
|
+
# Default timeouts per stage (in seconds)
|
|
15
|
+
# Using case statement instead of associative arrays for bash 3.2 compatibility
|
|
16
|
+
_timeout_default() {
|
|
17
|
+
local stage="${1:-unknown}"
|
|
18
|
+
case "$stage" in
|
|
19
|
+
intake) echo 60 ;;
|
|
20
|
+
plan) echo 300 ;;
|
|
21
|
+
design) echo 300 ;;
|
|
22
|
+
build) echo 1800 ;;
|
|
23
|
+
test) echo 600 ;;
|
|
24
|
+
review) echo 600 ;;
|
|
25
|
+
compound_quality) echo 900 ;;
|
|
26
|
+
pr) echo 120 ;;
|
|
27
|
+
merge) echo 120 ;;
|
|
28
|
+
deploy) echo 300 ;;
|
|
29
|
+
validate) echo 300 ;;
|
|
30
|
+
monitor) echo 300 ;;
|
|
31
|
+
*) echo 300 ;; # Default fallback
|
|
32
|
+
esac
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
# Adaptive timeout constraints (in seconds)
|
|
36
|
+
TIMEOUT_MIN=30
|
|
37
|
+
TIMEOUT_MAX=7200
|
|
38
|
+
TIMEOUT_BUFFER_PCT=20 # Add 20% buffer to P95
|
|
39
|
+
|
|
40
|
+
# Historical data thresholds
|
|
41
|
+
TIMEOUT_MIN_SAMPLES=10 # Require N samples before using adaptive timeout
|
|
42
|
+
TIMEOUT_HISTORY_LOOKBACK=100 # Use last N samples for P95 calculation
|
|
43
|
+
TIMEOUT_ROTATION_ENTRIES=10000 # Rotate history file at N entries
|
|
44
|
+
|
|
45
|
+
# Paths
|
|
46
|
+
TIMEOUT_HISTORY_FILE="${HOME}/.shipwright/optimization/stage-durations.jsonl"
|
|
47
|
+
|
|
48
|
+
# ─── Initialize ────────────────────────────────────────────────────────────
|
|
49
|
+
|
|
50
|
+
# timeout_init() — Initialize adaptive timeout system.
|
|
51
|
+
# Creates history directory if needed.
|
|
52
|
+
# Returns: 0 on success
|
|
53
|
+
timeout_init() {
|
|
54
|
+
local history_dir
|
|
55
|
+
history_dir=$(dirname "$TIMEOUT_HISTORY_FILE")
|
|
56
|
+
|
|
57
|
+
if [[ ! -d "$history_dir" ]]; then
|
|
58
|
+
mkdir -p "$history_dir" 2>/dev/null || true
|
|
59
|
+
fi
|
|
60
|
+
|
|
61
|
+
# Verify file exists (create if not)
|
|
62
|
+
if [[ ! -f "$TIMEOUT_HISTORY_FILE" ]]; then
|
|
63
|
+
touch "$TIMEOUT_HISTORY_FILE" 2>/dev/null || true
|
|
64
|
+
fi
|
|
65
|
+
|
|
66
|
+
return 0
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
# ─── Timeout Retrieval ──────────────────────────────────────────────────────
|
|
70
|
+
|
|
71
|
+
# timeout_get(stage) — Get adaptive timeout for a stage.
|
|
72
|
+
# Returns the appropriate timeout in seconds (default or adaptive based on history).
|
|
73
|
+
# $1: stage name (e.g., "build", "test")
|
|
74
|
+
# Returns: timeout in seconds
|
|
75
|
+
timeout_get() {
|
|
76
|
+
local stage="${1:-unknown}"
|
|
77
|
+
|
|
78
|
+
# Ensure initialization
|
|
79
|
+
timeout_init
|
|
80
|
+
|
|
81
|
+
# Get default for this stage
|
|
82
|
+
local default_timeout
|
|
83
|
+
default_timeout=$(_timeout_default "$stage")
|
|
84
|
+
|
|
85
|
+
# Check if we have enough historical data
|
|
86
|
+
local sample_count
|
|
87
|
+
sample_count=$(timeout_sample_count "$stage")
|
|
88
|
+
|
|
89
|
+
if [[ "$sample_count" -lt "$TIMEOUT_MIN_SAMPLES" ]]; then
|
|
90
|
+
# Not enough data — use default
|
|
91
|
+
echo "$default_timeout"
|
|
92
|
+
return 0
|
|
93
|
+
fi
|
|
94
|
+
|
|
95
|
+
# Calculate P95 and apply buffer
|
|
96
|
+
local p95_duration
|
|
97
|
+
p95_duration=$(timeout_calculate_p95 "$stage") || p95_duration=""
|
|
98
|
+
|
|
99
|
+
if [[ -z "$p95_duration" || "$p95_duration" -le 0 ]]; then
|
|
100
|
+
# P95 calculation failed — use default
|
|
101
|
+
echo "$default_timeout"
|
|
102
|
+
return 0
|
|
103
|
+
fi
|
|
104
|
+
|
|
105
|
+
# Add 20% buffer to P95
|
|
106
|
+
local adaptive_timeout
|
|
107
|
+
adaptive_timeout=$(( p95_duration + (p95_duration * TIMEOUT_BUFFER_PCT / 100) ))
|
|
108
|
+
|
|
109
|
+
# Enforce min/max bounds
|
|
110
|
+
if [[ "$adaptive_timeout" -lt "$TIMEOUT_MIN" ]]; then
|
|
111
|
+
adaptive_timeout=$TIMEOUT_MIN
|
|
112
|
+
elif [[ "$adaptive_timeout" -gt "$TIMEOUT_MAX" ]]; then
|
|
113
|
+
adaptive_timeout=$TIMEOUT_MAX
|
|
114
|
+
fi
|
|
115
|
+
|
|
116
|
+
echo "$adaptive_timeout"
|
|
117
|
+
return 0
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
# ─── Duration Recording ────────────────────────────────────────────────────
|
|
121
|
+
|
|
122
|
+
# timeout_record(stage, duration_seconds) — Record a stage duration.
|
|
123
|
+
# Appends JSONL entry to history. Rotates file when it reaches TIMEOUT_ROTATION_ENTRIES.
|
|
124
|
+
# $1: stage name
|
|
125
|
+
# $2: duration in seconds
|
|
126
|
+
# $3: (optional) pipeline_template (fast/standard/full/hotfix/autonomous/enterprise/cost-aware/deployed)
|
|
127
|
+
# $4: (optional) complexity (simple/medium/complex/critical)
|
|
128
|
+
# Returns: 0 on success
|
|
129
|
+
timeout_record() {
|
|
130
|
+
local stage="${1:-unknown}"
|
|
131
|
+
local duration_seconds="${2:-0}"
|
|
132
|
+
local pipeline_template="${3:-standard}"
|
|
133
|
+
local complexity="${4:-medium}"
|
|
134
|
+
|
|
135
|
+
# Validate inputs
|
|
136
|
+
if ! [[ "$duration_seconds" =~ ^[0-9]+$ ]]; then
|
|
137
|
+
return 1
|
|
138
|
+
fi
|
|
139
|
+
|
|
140
|
+
if [[ "$stage" == "unknown" ]]; then
|
|
141
|
+
return 1
|
|
142
|
+
fi
|
|
143
|
+
|
|
144
|
+
# Ensure initialization
|
|
145
|
+
timeout_init
|
|
146
|
+
|
|
147
|
+
# Create JSON entry
|
|
148
|
+
local timestamp
|
|
149
|
+
timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
|
150
|
+
|
|
151
|
+
local entry
|
|
152
|
+
entry=$(printf '{"stage":"%s","duration_s":%d,"timestamp":"%s","pipeline_template":"%s","complexity":"%s"}\n' \
|
|
153
|
+
"$stage" "$duration_seconds" "$timestamp" "$pipeline_template" "$complexity")
|
|
154
|
+
|
|
155
|
+
# Atomic write (mktemp + mv pattern for safety under pipefail)
|
|
156
|
+
local tmpfile
|
|
157
|
+
tmpfile=$(mktemp) || return 1
|
|
158
|
+
trap "rm -f '$tmpfile'" RETURN
|
|
159
|
+
|
|
160
|
+
# Append new entry and existing data
|
|
161
|
+
{
|
|
162
|
+
printf '%s\n' "$entry"
|
|
163
|
+
cat "$TIMEOUT_HISTORY_FILE" 2>/dev/null || true
|
|
164
|
+
} > "$tmpfile"
|
|
165
|
+
|
|
166
|
+
# Rotate if needed
|
|
167
|
+
local line_count
|
|
168
|
+
line_count=$(wc -l < "$tmpfile" 2>/dev/null | xargs)
|
|
169
|
+
if [[ "$line_count" -gt "$TIMEOUT_ROTATION_ENTRIES" ]]; then
|
|
170
|
+
# Keep only last N entries
|
|
171
|
+
tail -n "$TIMEOUT_ROTATION_ENTRIES" "$tmpfile" > "${tmpfile}.rotated"
|
|
172
|
+
mv "${tmpfile}.rotated" "$tmpfile"
|
|
173
|
+
fi
|
|
174
|
+
|
|
175
|
+
# Move to final location
|
|
176
|
+
mv "$tmpfile" "$TIMEOUT_HISTORY_FILE"
|
|
177
|
+
|
|
178
|
+
return 0
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
# ─── P95 Calculation ────────────────────────────────────────────────────────
|
|
182
|
+
|
|
183
|
+
# timeout_sample_count(stage) — Count historical samples for a stage.
|
|
184
|
+
# $1: stage name
|
|
185
|
+
# Returns: number of samples
|
|
186
|
+
timeout_sample_count() {
|
|
187
|
+
local stage="${1:-unknown}"
|
|
188
|
+
|
|
189
|
+
[[ ! -f "$TIMEOUT_HISTORY_FILE" ]] && echo "0" && return 0
|
|
190
|
+
|
|
191
|
+
# Count lines where .stage == arg1
|
|
192
|
+
grep -c "\"stage\":\"$stage\"" "$TIMEOUT_HISTORY_FILE" 2>/dev/null | xargs || echo "0"
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
# timeout_calculate_p95(stage) — Calculate P95 duration from historical data.
|
|
196
|
+
# Uses jq to extract durations, sort, and compute the 95th percentile.
|
|
197
|
+
# $1: stage name
|
|
198
|
+
# Returns: P95 duration in seconds (or empty string on error)
|
|
199
|
+
timeout_calculate_p95() {
|
|
200
|
+
local stage="${1:-unknown}"
|
|
201
|
+
|
|
202
|
+
[[ ! -f "$TIMEOUT_HISTORY_FILE" ]] && return 1
|
|
203
|
+
|
|
204
|
+
# Extract durations for this stage, sort, and get P95
|
|
205
|
+
# P95 is the value at index (count * 0.95) rounded down
|
|
206
|
+
local p95
|
|
207
|
+
|
|
208
|
+
# Method 1: Pure jq with proper slurping of each JSON object's duration_s field
|
|
209
|
+
p95=$(grep "\"stage\":\"$stage\"" "$TIMEOUT_HISTORY_FILE" 2>/dev/null | \
|
|
210
|
+
jq -s 'map(.duration_s) | sort |
|
|
211
|
+
(length * 0.95 | floor) as $idx |
|
|
212
|
+
if .[$idx] then .[$idx] else empty end' 2>/dev/null) || p95=""
|
|
213
|
+
|
|
214
|
+
# Fallback: if jq pipeline fails, try awk approach
|
|
215
|
+
if [[ -z "$p95" ]]; then
|
|
216
|
+
p95=$(grep "\"stage\":\"$stage\"" "$TIMEOUT_HISTORY_FILE" 2>/dev/null | \
|
|
217
|
+
tail -n "$TIMEOUT_HISTORY_LOOKBACK" | \
|
|
218
|
+
jq -r '.duration_s' 2>/dev/null | \
|
|
219
|
+
awk '{arr[NR]=$1} END {
|
|
220
|
+
count=length(arr);
|
|
221
|
+
if (count==0) exit 1;
|
|
222
|
+
for (i=1; i<=count; i++) for (j=i; j<=count; j++)
|
|
223
|
+
if (arr[i]>arr[j]) {t=arr[i]; arr[i]=arr[j]; arr[j]=t}
|
|
224
|
+
idx=int(count*0.95); if (idx==0) idx=1;
|
|
225
|
+
print arr[idx]
|
|
226
|
+
}' 2>/dev/null) || p95=""
|
|
227
|
+
fi
|
|
228
|
+
|
|
229
|
+
# Output result (empty string on complete failure)
|
|
230
|
+
[[ -n "$p95" ]] && echo "$p95" || return 1
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
# ─── Reporting ──────────────────────────────────────────────────────────────
|
|
234
|
+
|
|
235
|
+
# timeout_report() — Show timeout tuning statistics.
|
|
236
|
+
# Displays per-stage defaults, current adaptive values, P50/P95, and sample counts.
|
|
237
|
+
# Returns: 0
|
|
238
|
+
timeout_report() {
|
|
239
|
+
local stage
|
|
240
|
+
|
|
241
|
+
echo ""
|
|
242
|
+
printf "├─ %s\n" "Adaptive Stage Timeout Report"
|
|
243
|
+
printf "│\n"
|
|
244
|
+
printf "│ %-20s %8s %8s %8s %8s %6s\n" "Stage" "Default" "Adaptive" "P50" "P95" "Samples"
|
|
245
|
+
printf "│ %s\n" "$(printf '%.0s─' {1..80})"
|
|
246
|
+
|
|
247
|
+
# List of all known stages
|
|
248
|
+
local stages=(intake plan design build test review compound_quality pr merge deploy validate monitor)
|
|
249
|
+
|
|
250
|
+
for stage in "${stages[@]}"; do
|
|
251
|
+
local default_timeout
|
|
252
|
+
default_timeout=$(_timeout_default "$stage")
|
|
253
|
+
local sample_count
|
|
254
|
+
sample_count=$(timeout_sample_count "$stage")
|
|
255
|
+
|
|
256
|
+
if [[ "$sample_count" -lt "$TIMEOUT_MIN_SAMPLES" ]]; then
|
|
257
|
+
# Not enough data
|
|
258
|
+
printf "│ %-20s %8ds %8s %8s %8s %6d\n" \
|
|
259
|
+
"$stage" "$default_timeout" "—" "—" "—" "$sample_count"
|
|
260
|
+
else
|
|
261
|
+
# Calculate P50 and P95
|
|
262
|
+
local p50 p95 adaptive_timeout
|
|
263
|
+
|
|
264
|
+
# P50 (median)
|
|
265
|
+
p50=$(grep "\"stage\":\"$stage\"" "$TIMEOUT_HISTORY_FILE" 2>/dev/null | \
|
|
266
|
+
jq -r '.duration_s' 2>/dev/null | \
|
|
267
|
+
jq -nR -s '[inputs | tonumber] | sort |
|
|
268
|
+
(length * 0.5 | floor) as $idx |
|
|
269
|
+
if .[$idx] then .[$idx] else empty end' 2>/dev/null)
|
|
270
|
+
p50="${p50:-—}"
|
|
271
|
+
|
|
272
|
+
# P95
|
|
273
|
+
p95=$(timeout_calculate_p95 "$stage") || p95="—"
|
|
274
|
+
|
|
275
|
+
# Current adaptive timeout
|
|
276
|
+
adaptive_timeout=$(timeout_get "$stage")
|
|
277
|
+
|
|
278
|
+
# Format output
|
|
279
|
+
local p50_str p95_str adaptive_str
|
|
280
|
+
p50_str=$([[ "$p50" == "—" ]] && echo "—" || printf "%ds" "$p50")
|
|
281
|
+
p95_str=$([[ "$p95" == "—" ]] && echo "—" || printf "%ds" "$p95")
|
|
282
|
+
adaptive_str=$(printf "%ds" "$adaptive_timeout")
|
|
283
|
+
|
|
284
|
+
printf "│ %-20s %8ds %8s %8s %8s %6d\n" \
|
|
285
|
+
"$stage" "$default_timeout" "$adaptive_str" "$p50_str" "$p95_str" "$sample_count"
|
|
286
|
+
fi
|
|
287
|
+
done
|
|
288
|
+
|
|
289
|
+
printf "│\n"
|
|
290
|
+
printf "├─ Summary\n"
|
|
291
|
+
printf "│ Min timeout: %ds | Max timeout: %ds | Buffer: %d%%\n" \
|
|
292
|
+
"$TIMEOUT_MIN" "$TIMEOUT_MAX" "$TIMEOUT_BUFFER_PCT"
|
|
293
|
+
printf "│ Min samples for adaptive: %d | History lookback: %d | Rotation: %d entries\n" \
|
|
294
|
+
"$TIMEOUT_MIN_SAMPLES" "$TIMEOUT_HISTORY_LOOKBACK" "$TIMEOUT_ROTATION_ENTRIES"
|
|
295
|
+
printf "│ History file: %s (%d lines)\n" \
|
|
296
|
+
"$TIMEOUT_HISTORY_FILE" "$(wc -l < "$TIMEOUT_HISTORY_FILE" 2>/dev/null | xargs || echo 0)"
|
|
297
|
+
printf "└─\n"
|
|
298
|
+
|
|
299
|
+
return 0
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
# ─── Utility: Reset history (for testing/cleanup) ────────────────────────────
|
|
303
|
+
|
|
304
|
+
# timeout_reset() — Clear all historical data (for testing).
|
|
305
|
+
# Usage: timeout_reset
|
|
306
|
+
# Returns: 0
|
|
307
|
+
timeout_reset() {
|
|
308
|
+
if [[ -f "$TIMEOUT_HISTORY_FILE" ]]; then
|
|
309
|
+
rm -f "$TIMEOUT_HISTORY_FILE" || return 1
|
|
310
|
+
fi
|
|
311
|
+
return 0
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
# ─── Initialization on load ──────────────────────────────────────────────────
|
|
315
|
+
|
|
316
|
+
timeout_init
|
|
@@ -0,0 +1,309 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# ╔═══════════════════════════════════════════════════════════════════════════╗
|
|
3
|
+
# ║ audit-trail — Structured pipeline audit logging ║
|
|
4
|
+
# ║ ║
|
|
5
|
+
# ║ Provides JSONL event emission, prompt archiving, and report generation ║
|
|
6
|
+
# ║ for full pipeline lifecycle tracking and post-mortem analysis. ║
|
|
7
|
+
# ║ ║
|
|
8
|
+
# ║ All functions are fail-open: risky operations wrapped with || return 0 ║
|
|
9
|
+
# ║ so audit never blocks the pipeline. ║
|
|
10
|
+
# ╚═══════════════════════════════════════════════════════════════════════════╝
|
|
11
|
+
|
|
12
|
+
[[ -n "${_AUDIT_TRAIL_LOADED:-}" ]] && return 0
|
|
13
|
+
_AUDIT_TRAIL_LOADED=1
|
|
14
|
+
|
|
15
|
+
# ─── Internal State ──────────────────────────────────────────────────────────
|
|
16
|
+
_AUDIT_JSONL="" # Updated by audit_init from current ARTIFACTS_DIR
|
|
17
|
+
|
|
18
|
+
# ─── Helper: Build JSON with escaped values ──────────────────────────────────
|
|
19
|
+
_audit_escape_json_value() {
|
|
20
|
+
local value="$1"
|
|
21
|
+
# Escape backslashes first, then double quotes
|
|
22
|
+
value="${value//\\/\\\\}"
|
|
23
|
+
value="${value//\"/\\\"}"
|
|
24
|
+
echo "$value"
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
# ─── audit_init — Initialize audit trail ────────────────────────────────────
|
|
28
|
+
# Creates JSONL file and writes pipeline.start event with metadata.
|
|
29
|
+
# Updates _AUDIT_JSONL from current ARTIFACTS_DIR.
|
|
30
|
+
#
|
|
31
|
+
# Usage: audit_init --issue 42 --goal "..." --template standard --model gpt-4 --git-sha abc123
|
|
32
|
+
audit_init() {
|
|
33
|
+
# Parse arguments
|
|
34
|
+
local issue="" goal="" template="" model="" git_sha=""
|
|
35
|
+
while [[ $# -gt 0 ]]; do
|
|
36
|
+
case "$1" in
|
|
37
|
+
--issue) issue="$2"; shift 2 ;;
|
|
38
|
+
--goal) goal="$2"; shift 2 ;;
|
|
39
|
+
--template) template="$2"; shift 2 ;;
|
|
40
|
+
--model) model="$2"; shift 2 ;;
|
|
41
|
+
--git-sha) git_sha="$2"; shift 2 ;;
|
|
42
|
+
*) shift ;;
|
|
43
|
+
esac
|
|
44
|
+
done
|
|
45
|
+
|
|
46
|
+
# Update path from current ARTIFACTS_DIR
|
|
47
|
+
_AUDIT_JSONL="${ARTIFACTS_DIR:-/tmp}/pipeline-audit.jsonl"
|
|
48
|
+
|
|
49
|
+
# Create directory if needed
|
|
50
|
+
mkdir -p "$(dirname "$_AUDIT_JSONL")" || return 0
|
|
51
|
+
|
|
52
|
+
# Emit pipeline.start event
|
|
53
|
+
local ts
|
|
54
|
+
ts=$(date -u +%Y-%m-%dT%H:%M:%SZ) || return 0
|
|
55
|
+
|
|
56
|
+
# Build JSON with proper escaping
|
|
57
|
+
local json="{\"ts\":\"$ts\",\"type\":\"pipeline.start\""
|
|
58
|
+
[[ -n "$issue" ]] && json="${json},\"issue\":\"$issue\""
|
|
59
|
+
[[ -n "$goal" ]] && {
|
|
60
|
+
goal=$(_audit_escape_json_value "$goal")
|
|
61
|
+
json="${json},\"goal\":\"$goal\""
|
|
62
|
+
}
|
|
63
|
+
[[ -n "$template" ]] && json="${json},\"template\":\"$template\""
|
|
64
|
+
[[ -n "$model" ]] && json="${json},\"model\":\"$model\""
|
|
65
|
+
[[ -n "$git_sha" ]] && json="${json},\"git_sha\":\"$git_sha\""
|
|
66
|
+
json="${json}}"
|
|
67
|
+
|
|
68
|
+
# Append to JSONL
|
|
69
|
+
echo "$json" >> "$_AUDIT_JSONL" || return 0
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
# ─── audit_emit — Emit structured event to JSONL ────────────────────────────
|
|
73
|
+
# Appends one JSON line with timestamp and key=value pairs.
|
|
74
|
+
#
|
|
75
|
+
# Usage: audit_emit "stage.complete" "stage=plan" "duration_s=5"
|
|
76
|
+
audit_emit() {
|
|
77
|
+
local event_type="$1"
|
|
78
|
+
shift
|
|
79
|
+
|
|
80
|
+
# Fail gracefully if JSONL not initialized
|
|
81
|
+
_AUDIT_JSONL="${_AUDIT_JSONL:-${ARTIFACTS_DIR:-/tmp}/pipeline-audit.jsonl}"
|
|
82
|
+
mkdir -p "$(dirname "$_AUDIT_JSONL")" || return 0
|
|
83
|
+
|
|
84
|
+
local ts
|
|
85
|
+
ts=$(date -u +%Y-%m-%dT%H:%M:%SZ) || return 0
|
|
86
|
+
|
|
87
|
+
# Build JSON with event type and timestamp
|
|
88
|
+
local json="{\"ts\":\"$ts\",\"type\":\"$event_type\""
|
|
89
|
+
|
|
90
|
+
# Add key=value pairs
|
|
91
|
+
while [[ $# -gt 0 ]]; do
|
|
92
|
+
local key="${1%%=*}"
|
|
93
|
+
local val="${1#*=}"
|
|
94
|
+
|
|
95
|
+
# Escape value
|
|
96
|
+
val=$(_audit_escape_json_value "$val")
|
|
97
|
+
|
|
98
|
+
json="${json},\"${key}\":\"${val}\""
|
|
99
|
+
shift
|
|
100
|
+
done
|
|
101
|
+
|
|
102
|
+
json="${json}}"
|
|
103
|
+
|
|
104
|
+
# Append to JSONL
|
|
105
|
+
echo "$json" >> "$_AUDIT_JSONL" || return 0
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
# ─── audit_save_prompt — Archive prompt for iteration ────────────────────────
|
|
109
|
+
# Saves prompt text to $LOG_DIR/iteration-N.prompt.txt for analysis.
|
|
110
|
+
#
|
|
111
|
+
# Usage: audit_save_prompt "Full prompt text" 1
|
|
112
|
+
audit_save_prompt() {
|
|
113
|
+
local prompt_text="$1"
|
|
114
|
+
local iteration="$2"
|
|
115
|
+
|
|
116
|
+
LOG_DIR="${LOG_DIR:-/tmp}"
|
|
117
|
+
mkdir -p "$LOG_DIR" || return 0
|
|
118
|
+
|
|
119
|
+
local prompt_file="$LOG_DIR/iteration-${iteration}.prompt.txt"
|
|
120
|
+
echo "$prompt_text" > "$prompt_file" || return 0
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
# ─── Helper: Build JSON report from JSONL ───────────────────────────────────
|
|
124
|
+
_audit_build_json() {
|
|
125
|
+
local jsonl_file="$1"
|
|
126
|
+
local outcome="$2"
|
|
127
|
+
|
|
128
|
+
# Read JSONL and build JSON structure
|
|
129
|
+
# Try using jq if available
|
|
130
|
+
if command -v jq &>/dev/null; then
|
|
131
|
+
jq -s \
|
|
132
|
+
--arg outcome "$outcome" \
|
|
133
|
+
'{
|
|
134
|
+
version: "1.0",
|
|
135
|
+
pipeline_id: (.[0].git_sha // "unknown"),
|
|
136
|
+
issue: (.[0].issue // "unknown"),
|
|
137
|
+
goal: (.[0].goal // "unknown"),
|
|
138
|
+
template: (.[0].template // "unknown"),
|
|
139
|
+
model: (.[0].model // "unknown"),
|
|
140
|
+
outcome: $outcome,
|
|
141
|
+
duration_s: (if .[0].ts and .[-1].ts then
|
|
142
|
+
((.[-1].ts | fromdate) - (.[0].ts | fromdate))
|
|
143
|
+
else 0 end),
|
|
144
|
+
stages: [
|
|
145
|
+
.[] | select(.type == "stage.complete") |
|
|
146
|
+
{stage: .stage, duration_s: .duration_s}
|
|
147
|
+
],
|
|
148
|
+
iterations: [
|
|
149
|
+
.[] | select(.type | test("^loop\\.")) |
|
|
150
|
+
{type: .type, iteration: .iteration}
|
|
151
|
+
]
|
|
152
|
+
}' "$jsonl_file"
|
|
153
|
+
else
|
|
154
|
+
# Fallback without jq: build simpler JSON manually
|
|
155
|
+
local first_line
|
|
156
|
+
first_line=$(head -1 "$jsonl_file" 2>/dev/null)
|
|
157
|
+
|
|
158
|
+
local issue goal template model git_sha
|
|
159
|
+
issue=$(echo "$first_line" | grep -o '"issue":"[^"]*' | cut -d'"' -f4 || echo "unknown")
|
|
160
|
+
goal=$(echo "$first_line" | grep -o '"goal":"[^"]*' | cut -d'"' -f4 || echo "unknown")
|
|
161
|
+
template=$(echo "$first_line" | grep -o '"template":"[^"]*' | cut -d'"' -f4 || echo "unknown")
|
|
162
|
+
model=$(echo "$first_line" | grep -o '"model":"[^"]*' | cut -d'"' -f4 || echo "unknown")
|
|
163
|
+
git_sha=$(echo "$first_line" | grep -o '"git_sha":"[^"]*' | cut -d'"' -f4 || echo "unknown")
|
|
164
|
+
|
|
165
|
+
# Count stages and iterations manually
|
|
166
|
+
local stage_count iteration_count
|
|
167
|
+
stage_count=$(grep -c '"type":"stage.complete"' "$jsonl_file" || echo "0")
|
|
168
|
+
iteration_count=$(grep -c '"type":"loop.iteration_complete"' "$jsonl_file" || echo "0")
|
|
169
|
+
|
|
170
|
+
cat <<EOF
|
|
171
|
+
{
|
|
172
|
+
"version": "1.0",
|
|
173
|
+
"pipeline_id": "$git_sha",
|
|
174
|
+
"issue": "$issue",
|
|
175
|
+
"goal": "$goal",
|
|
176
|
+
"template": "$template",
|
|
177
|
+
"model": "$model",
|
|
178
|
+
"outcome": "$outcome",
|
|
179
|
+
"duration_s": 0,
|
|
180
|
+
"stages": $(grep '"type":"stage.complete"' "$jsonl_file" | wc -l),
|
|
181
|
+
"iterations": $(grep '"type":"loop.iteration_complete"' "$jsonl_file" | wc -l)
|
|
182
|
+
}
|
|
183
|
+
EOF
|
|
184
|
+
fi
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
# ─── Helper: Build markdown report from JSONL ────────────────────────────────
|
|
188
|
+
_audit_build_markdown() {
|
|
189
|
+
local jsonl_file="$1"
|
|
190
|
+
local outcome="$2"
|
|
191
|
+
|
|
192
|
+
# Read first line for metadata
|
|
193
|
+
local first_line
|
|
194
|
+
first_line=$(head -1 "$jsonl_file" 2>/dev/null || echo "")
|
|
195
|
+
|
|
196
|
+
local issue goal template model git_sha
|
|
197
|
+
issue=$(echo "$first_line" | grep -o '"issue":"[^"]*' | cut -d'"' -f4 || echo "unknown")
|
|
198
|
+
goal=$(echo "$first_line" | grep -o '"goal":"[^"]*' | cut -d'"' -f4 || echo "unknown")
|
|
199
|
+
template=$(echo "$first_line" | grep -o '"template":"[^"]*' | cut -d'"' -f4 || echo "unknown")
|
|
200
|
+
model=$(echo "$first_line" | grep -o '"model":"[^"]*' | cut -d'"' -f4 || echo "unknown")
|
|
201
|
+
git_sha=$(echo "$first_line" | grep -o '"git_sha":"[^"]*' | cut -d'"' -f4 || echo "unknown")
|
|
202
|
+
|
|
203
|
+
cat <<'EOF'
|
|
204
|
+
# Pipeline Audit Report
|
|
205
|
+
|
|
206
|
+
## Summary
|
|
207
|
+
|
|
208
|
+
EOF
|
|
209
|
+
|
|
210
|
+
cat <<EOF
|
|
211
|
+
| Field | Value |
|
|
212
|
+
|-------|-------|
|
|
213
|
+
| Outcome | $outcome |
|
|
214
|
+
| Issue | $issue |
|
|
215
|
+
| Goal | $goal |
|
|
216
|
+
| Template | $template |
|
|
217
|
+
| Model | $model |
|
|
218
|
+
| Git SHA | $git_sha |
|
|
219
|
+
|
|
220
|
+
## Stages
|
|
221
|
+
|
|
222
|
+
EOF
|
|
223
|
+
|
|
224
|
+
grep '"type":"stage.complete"' "$jsonl_file" | while IFS= read -r line; do
|
|
225
|
+
local stage duration
|
|
226
|
+
stage=$(echo "$line" | grep -o '"stage":"[^"]*' | cut -d'"' -f4)
|
|
227
|
+
duration=$(echo "$line" | grep -o '"duration_s":"[^"]*' | cut -d'"' -f4)
|
|
228
|
+
echo "- **$stage**: ${duration}s"
|
|
229
|
+
done
|
|
230
|
+
|
|
231
|
+
cat <<'EOF'
|
|
232
|
+
|
|
233
|
+
## Build Loop
|
|
234
|
+
|
|
235
|
+
EOF
|
|
236
|
+
|
|
237
|
+
grep '"type":"loop.iteration_complete"' "$jsonl_file" | while IFS= read -r line; do
|
|
238
|
+
local iteration
|
|
239
|
+
iteration=$(echo "$line" | grep -o '"iteration":"[^"]*' | cut -d'"' -f4)
|
|
240
|
+
echo "- Iteration $iteration completed"
|
|
241
|
+
done
|
|
242
|
+
|
|
243
|
+
# Compound audit findings section
|
|
244
|
+
local compound_events
|
|
245
|
+
compound_events=$(grep '"type":"compound.finding"' "$jsonl_file" 2>/dev/null || true)
|
|
246
|
+
if [[ -n "$compound_events" ]]; then
|
|
247
|
+
cat <<'EOF'
|
|
248
|
+
|
|
249
|
+
## Compound Audit Findings
|
|
250
|
+
|
|
251
|
+
EOF
|
|
252
|
+
echo "$compound_events" | while IFS= read -r line; do
|
|
253
|
+
local sev file desc
|
|
254
|
+
sev=$(echo "$line" | grep -o '"severity":"[^"]*' | cut -d'"' -f4)
|
|
255
|
+
file=$(echo "$line" | grep -o '"file":"[^"]*' | cut -d'"' -f4)
|
|
256
|
+
desc=$(echo "$line" | grep -o '"description":"[^"]*' | cut -d'"' -f4)
|
|
257
|
+
echo "- **[$sev]** \`$file\`: $desc"
|
|
258
|
+
done
|
|
259
|
+
|
|
260
|
+
# Convergence summary
|
|
261
|
+
local converge_line
|
|
262
|
+
converge_line=$(grep '"type":"compound.converged"' "$jsonl_file" 2>/dev/null | tail -1 || true)
|
|
263
|
+
if [[ -n "$converge_line" ]]; then
|
|
264
|
+
local reason cycles
|
|
265
|
+
reason=$(echo "$converge_line" | grep -o '"reason":"[^"]*' | cut -d'"' -f4)
|
|
266
|
+
cycles=$(echo "$converge_line" | grep -o '"total_cycles":"[^"]*' | cut -d'"' -f4)
|
|
267
|
+
echo ""
|
|
268
|
+
echo "**Converged** after ${cycles} cycle(s): ${reason}"
|
|
269
|
+
fi
|
|
270
|
+
fi
|
|
271
|
+
|
|
272
|
+
cat <<'EOF'
|
|
273
|
+
|
|
274
|
+
---
|
|
275
|
+
|
|
276
|
+
*Report generated by audit-trail*
|
|
277
|
+
EOF
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
# ─── audit_finalize — Generate JSON and markdown reports ──────────────────────
|
|
281
|
+
# Reads JSONL file and generates structured reports for analysis.
|
|
282
|
+
#
|
|
283
|
+
# Outputs:
|
|
284
|
+
# - $ARTIFACTS_DIR/pipeline-audit.json: Structured report
|
|
285
|
+
# - $ARTIFACTS_DIR/pipeline-audit.md: Human-readable markdown
|
|
286
|
+
#
|
|
287
|
+
# Usage: audit_finalize "success" [or "failure"]
|
|
288
|
+
audit_finalize() {
|
|
289
|
+
local outcome="${1:-unknown}"
|
|
290
|
+
|
|
291
|
+
_AUDIT_JSONL="${_AUDIT_JSONL:-${ARTIFACTS_DIR:-/tmp}/pipeline-audit.jsonl}"
|
|
292
|
+
|
|
293
|
+
# Fail gracefully if JSONL doesn't exist
|
|
294
|
+
if [[ ! -f "$_AUDIT_JSONL" ]]; then
|
|
295
|
+
return 0
|
|
296
|
+
fi
|
|
297
|
+
|
|
298
|
+
local artifacts_dir
|
|
299
|
+
artifacts_dir=$(dirname "$_AUDIT_JSONL")
|
|
300
|
+
mkdir -p "$artifacts_dir" || return 0
|
|
301
|
+
|
|
302
|
+
# Generate JSON report
|
|
303
|
+
_audit_build_json "$_AUDIT_JSONL" "$outcome" > "$artifacts_dir/pipeline-audit.json" 2>/dev/null || return 0
|
|
304
|
+
|
|
305
|
+
# Generate markdown report
|
|
306
|
+
_audit_build_markdown "$_AUDIT_JSONL" "$outcome" > "$artifacts_dir/pipeline-audit.md" 2>/dev/null || return 0
|
|
307
|
+
|
|
308
|
+
return 0
|
|
309
|
+
}
|