shipwright-cli 1.10.0 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +114 -36
- package/completions/_shipwright +212 -32
- package/completions/shipwright.bash +97 -25
- package/docs/strategy/01-market-research.md +619 -0
- package/docs/strategy/02-mission-and-brand.md +587 -0
- package/docs/strategy/03-gtm-and-roadmap.md +759 -0
- package/docs/strategy/QUICK-START.txt +289 -0
- package/docs/strategy/README.md +172 -0
- package/package.json +4 -2
- package/scripts/sw +208 -1
- package/scripts/sw-activity.sh +500 -0
- package/scripts/sw-adaptive.sh +925 -0
- package/scripts/sw-adversarial.sh +1 -1
- package/scripts/sw-architecture-enforcer.sh +1 -1
- package/scripts/sw-auth.sh +613 -0
- package/scripts/sw-autonomous.sh +664 -0
- package/scripts/sw-changelog.sh +704 -0
- package/scripts/sw-checkpoint.sh +1 -1
- package/scripts/sw-ci.sh +602 -0
- package/scripts/sw-cleanup.sh +1 -1
- package/scripts/sw-code-review.sh +637 -0
- package/scripts/sw-connect.sh +1 -1
- package/scripts/sw-context.sh +605 -0
- package/scripts/sw-cost.sh +1 -1
- package/scripts/sw-daemon.sh +432 -130
- package/scripts/sw-dashboard.sh +1 -1
- package/scripts/sw-db.sh +540 -0
- package/scripts/sw-decompose.sh +539 -0
- package/scripts/sw-deps.sh +551 -0
- package/scripts/sw-developer-simulation.sh +1 -1
- package/scripts/sw-discovery.sh +412 -0
- package/scripts/sw-docs-agent.sh +539 -0
- package/scripts/sw-docs.sh +1 -1
- package/scripts/sw-doctor.sh +59 -1
- package/scripts/sw-dora.sh +615 -0
- package/scripts/sw-durable.sh +710 -0
- package/scripts/sw-e2e-orchestrator.sh +535 -0
- package/scripts/sw-eventbus.sh +393 -0
- package/scripts/sw-feedback.sh +471 -0
- package/scripts/sw-fix.sh +1 -1
- package/scripts/sw-fleet-discover.sh +567 -0
- package/scripts/sw-fleet-viz.sh +404 -0
- package/scripts/sw-fleet.sh +8 -1
- package/scripts/sw-github-app.sh +596 -0
- package/scripts/sw-github-checks.sh +1 -1
- package/scripts/sw-github-deploy.sh +1 -1
- package/scripts/sw-github-graphql.sh +1 -1
- package/scripts/sw-guild.sh +569 -0
- package/scripts/sw-heartbeat.sh +1 -1
- package/scripts/sw-hygiene.sh +559 -0
- package/scripts/sw-incident.sh +617 -0
- package/scripts/sw-init.sh +88 -1
- package/scripts/sw-instrument.sh +699 -0
- package/scripts/sw-intelligence.sh +1 -1
- package/scripts/sw-jira.sh +1 -1
- package/scripts/sw-launchd.sh +363 -28
- package/scripts/sw-linear.sh +1 -1
- package/scripts/sw-logs.sh +1 -1
- package/scripts/sw-loop.sh +64 -3
- package/scripts/sw-memory.sh +1 -1
- package/scripts/sw-mission-control.sh +487 -0
- package/scripts/sw-model-router.sh +545 -0
- package/scripts/sw-otel.sh +596 -0
- package/scripts/sw-oversight.sh +689 -0
- package/scripts/sw-pipeline-composer.sh +1 -1
- package/scripts/sw-pipeline-vitals.sh +1 -1
- package/scripts/sw-pipeline.sh +687 -24
- package/scripts/sw-pm.sh +693 -0
- package/scripts/sw-pr-lifecycle.sh +522 -0
- package/scripts/sw-predictive.sh +1 -1
- package/scripts/sw-prep.sh +1 -1
- package/scripts/sw-ps.sh +1 -1
- package/scripts/sw-public-dashboard.sh +798 -0
- package/scripts/sw-quality.sh +595 -0
- package/scripts/sw-reaper.sh +1 -1
- package/scripts/sw-recruit.sh +573 -0
- package/scripts/sw-regression.sh +642 -0
- package/scripts/sw-release-manager.sh +736 -0
- package/scripts/sw-release.sh +706 -0
- package/scripts/sw-remote.sh +1 -1
- package/scripts/sw-replay.sh +520 -0
- package/scripts/sw-retro.sh +691 -0
- package/scripts/sw-scale.sh +444 -0
- package/scripts/sw-security-audit.sh +505 -0
- package/scripts/sw-self-optimize.sh +1 -1
- package/scripts/sw-session.sh +1 -1
- package/scripts/sw-setup.sh +1 -1
- package/scripts/sw-standup.sh +712 -0
- package/scripts/sw-status.sh +1 -1
- package/scripts/sw-strategic.sh +658 -0
- package/scripts/sw-stream.sh +450 -0
- package/scripts/sw-swarm.sh +583 -0
- package/scripts/sw-team-stages.sh +511 -0
- package/scripts/sw-templates.sh +1 -1
- package/scripts/sw-testgen.sh +515 -0
- package/scripts/sw-tmux-pipeline.sh +554 -0
- package/scripts/sw-tmux.sh +1 -1
- package/scripts/sw-trace.sh +485 -0
- package/scripts/sw-tracker-github.sh +188 -0
- package/scripts/sw-tracker-jira.sh +172 -0
- package/scripts/sw-tracker-linear.sh +251 -0
- package/scripts/sw-tracker.sh +117 -2
- package/scripts/sw-triage.sh +603 -0
- package/scripts/sw-upgrade.sh +1 -1
- package/scripts/sw-ux.sh +677 -0
- package/scripts/sw-webhook.sh +627 -0
- package/scripts/sw-widgets.sh +530 -0
- package/scripts/sw-worktree.sh +1 -1
|
@@ -0,0 +1,925 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# ╔═══════════════════════════════════════════════════════════════════════════╗
|
|
3
|
+
# ║ shipwright adaptive — data-driven pipeline tuning ║
|
|
4
|
+
# ║ Replace 83+ hardcoded values with learned defaults from historical runs ║
|
|
5
|
+
# ╚═══════════════════════════════════════════════════════════════════════════╝
|
|
6
|
+
set -euo pipefail
|
|
7
|
+
trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
|
|
8
|
+
|
|
9
|
+
VERSION="2.0.0"
|
|
10
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
11
|
+
|
|
12
|
+
# ─── Colors (matches Seth's tmux theme) ─────────────────────────────────────
|
|
13
|
+
CYAN='\033[38;2;0;212;255m' # #00d4ff — primary accent
|
|
14
|
+
PURPLE='\033[38;2;124;58;237m' # #7c3aed — secondary
|
|
15
|
+
BLUE='\033[38;2;0;102;255m' # #0066ff — tertiary
|
|
16
|
+
GREEN='\033[38;2;74;222;128m' # success
|
|
17
|
+
YELLOW='\033[38;2;250;204;21m' # warning
|
|
18
|
+
RED='\033[38;2;248;113;113m' # error
|
|
19
|
+
DIM='\033[2m'
|
|
20
|
+
BOLD='\033[1m'
|
|
21
|
+
RESET='\033[0m'
|
|
22
|
+
|
|
23
|
+
# ─── Cross-platform compatibility ──────────────────────────────────────────
|
|
24
|
+
# shellcheck source=lib/compat.sh
|
|
25
|
+
[[ -f "$SCRIPT_DIR/lib/compat.sh" ]] && source "$SCRIPT_DIR/lib/compat.sh"
|
|
26
|
+
|
|
27
|
+
# ─── Output Helpers ─────────────────────────────────────────────────────────
|
|
28
|
+
info() { echo -e "${CYAN}${BOLD}▸${RESET} $*" >&2; }
|
|
29
|
+
success() { echo -e "${GREEN}${BOLD}✓${RESET} $*" >&2; }
|
|
30
|
+
warn() { echo -e "${YELLOW}${BOLD}⚠${RESET} $*" >&2; }
|
|
31
|
+
error() { echo -e "${RED}${BOLD}✗${RESET} $*" >&2; }
|
|
32
|
+
|
|
33
|
+
now_iso() { date -u +"%Y-%m-%dT%H:%M:%SZ"; }
|
|
34
|
+
now_epoch() { date +%s; }
|
|
35
|
+
|
|
36
|
+
# ─── Paths ─────────────────────────────────────────────────────────────────
|
|
37
|
+
EVENTS_FILE="${HOME}/.shipwright/events.jsonl"
|
|
38
|
+
MODELS_FILE="${HOME}/.shipwright/adaptive-models.json"
|
|
39
|
+
REPO_DIR="${PWD}"
|
|
40
|
+
|
|
41
|
+
# ─── Default Thresholds ─────────────────────────────────────────────────────
|
|
42
|
+
MIN_CONFIDENCE_SAMPLES=10
|
|
43
|
+
MED_CONFIDENCE_SAMPLES=50
|
|
44
|
+
MIN_TIMEOUT=60
|
|
45
|
+
MAX_TIMEOUT=7200
|
|
46
|
+
MIN_ITERATIONS=2
|
|
47
|
+
MAX_ITERATIONS=50
|
|
48
|
+
MIN_POLL_INTERVAL=10
|
|
49
|
+
MAX_POLL_INTERVAL=300
|
|
50
|
+
MIN_COVERAGE=0
|
|
51
|
+
MAX_COVERAGE=100
|
|
52
|
+
|
|
53
|
+
# ─── Emit Event ─────────────────────────────────────────────────────────────
|
|
54
|
+
emit_event() {
|
|
55
|
+
local event_type="$1"
|
|
56
|
+
shift
|
|
57
|
+
local json_fields=""
|
|
58
|
+
for kv in "$@"; do
|
|
59
|
+
local key="${kv%%=*}"
|
|
60
|
+
local val="${kv#*=}"
|
|
61
|
+
if [[ "$val" =~ ^-?[0-9]+\.?[0-9]*$ ]]; then
|
|
62
|
+
json_fields="${json_fields},\"${key}\":${val}"
|
|
63
|
+
else
|
|
64
|
+
val="${val//\"/\\\"}"
|
|
65
|
+
json_fields="${json_fields},\"${key}\":\"${val}\""
|
|
66
|
+
fi
|
|
67
|
+
done
|
|
68
|
+
mkdir -p "${HOME}/.shipwright"
|
|
69
|
+
echo "{\"ts\":\"$(now_iso)\",\"ts_epoch\":$(now_epoch),\"type\":\"${event_type}\"${json_fields}}" >> "$EVENTS_FILE"
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
# ─── JSON Helper: Percentile ────────────────────────────────────────────────
|
|
73
|
+
# Compute P-th percentile of sorted numeric array (bash + jq)
|
|
74
|
+
# Usage: percentile "[1, 5, 10, 15, 20]" 95
|
|
75
|
+
percentile() {
|
|
76
|
+
local arr="$1"
|
|
77
|
+
local p="$2"
|
|
78
|
+
jq -n --arg arr "$arr" --arg p "$p" '
|
|
79
|
+
($arr | fromjson | sort) as $sorted |
|
|
80
|
+
($sorted | length) as $len |
|
|
81
|
+
(($p / 100) * ($len - 1) | floor) as $idx |
|
|
82
|
+
if $len == 0 then null
|
|
83
|
+
elif $idx >= $len - 1 then $sorted[-1]
|
|
84
|
+
else
|
|
85
|
+
($sorted[$idx] + $sorted[$idx + 1]) / 2
|
|
86
|
+
end
|
|
87
|
+
'
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
# ─── JSON Helper: Mean ──────────────────────────────────────────────────────
|
|
91
|
+
mean() {
|
|
92
|
+
local arr="$1"
|
|
93
|
+
jq -n --arg arr "$arr" '
|
|
94
|
+
($arr | fromjson | add / length)
|
|
95
|
+
'
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
# ─── JSON Helper: Median ───────────────────────────────────────────────────
|
|
99
|
+
median() {
|
|
100
|
+
local arr="$1"
|
|
101
|
+
percentile "$arr" 50
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
# ─── JSON Helper: Stddev ───────────────────────────────────────────────────
|
|
105
|
+
stddev() {
|
|
106
|
+
local arr="$1"
|
|
107
|
+
jq -n --arg arr "$arr" '
|
|
108
|
+
($arr | fromjson) as $data |
|
|
109
|
+
($data | add / length) as $mean |
|
|
110
|
+
(($data | map(. - $mean | . * .) | add) / ($data | length)) | sqrt
|
|
111
|
+
'
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
# ─── Determine Confidence Level ─────────────────────────────────────────────
|
|
115
|
+
confidence_level() {
|
|
116
|
+
local samples="$1"
|
|
117
|
+
if [[ "$samples" -lt "$MIN_CONFIDENCE_SAMPLES" ]]; then
|
|
118
|
+
echo "low"
|
|
119
|
+
elif [[ "$samples" -lt "$MED_CONFIDENCE_SAMPLES" ]]; then
|
|
120
|
+
echo "medium"
|
|
121
|
+
else
|
|
122
|
+
echo "high"
|
|
123
|
+
fi
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
# ─── Load Models from Cache ─────────────────────────────────────────────────
|
|
127
|
+
load_models() {
|
|
128
|
+
if [[ -f "$MODELS_FILE" ]]; then
|
|
129
|
+
cat "$MODELS_FILE"
|
|
130
|
+
else
|
|
131
|
+
echo '{}'
|
|
132
|
+
fi
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
# ─── Save Models to Cache ───────────────────────────────────────────────────
|
|
136
|
+
save_models() {
|
|
137
|
+
local models="$1"
|
|
138
|
+
mkdir -p "${HOME}/.shipwright"
|
|
139
|
+
local tmp_file
|
|
140
|
+
tmp_file=$(mktemp "${TMPDIR:-/tmp}/sw-adaptive-models.XXXXXX")
|
|
141
|
+
echo "$models" > "$tmp_file"
|
|
142
|
+
mv "$tmp_file" "$MODELS_FILE"
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
# ─── Query events by field and value ────────────────────────────────────────
|
|
146
|
+
query_events() {
|
|
147
|
+
local field="$1"
|
|
148
|
+
local value="$2"
|
|
149
|
+
if [[ ! -f "$EVENTS_FILE" ]]; then
|
|
150
|
+
echo "[]"
|
|
151
|
+
return
|
|
152
|
+
fi
|
|
153
|
+
jq -s "
|
|
154
|
+
map(select(.${field} == \"${value}\")) | map(.duration, .iterations, .model, .team_size, .template, .quality_score, .coverage // empty) | flatten
|
|
155
|
+
" "$EVENTS_FILE" 2>/dev/null || echo "[]"
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
# ─── Get Timeout Recommendation ─────────────────────────────────────────────
|
|
159
|
+
get_timeout() {
|
|
160
|
+
local stage="${1:-build}"
|
|
161
|
+
local repo="${2:-.}"
|
|
162
|
+
local default="${3:-1800}"
|
|
163
|
+
|
|
164
|
+
# Query events for this stage
|
|
165
|
+
local durations
|
|
166
|
+
durations=$(jq -s "
|
|
167
|
+
map(select(.type == \"stage_complete\" and .stage == \"${stage}\") | .duration // empty) |
|
|
168
|
+
map(select(. > 0)) | map(. / 1000) | sort
|
|
169
|
+
" "$EVENTS_FILE" 2>/dev/null || echo "[]")
|
|
170
|
+
|
|
171
|
+
local samples
|
|
172
|
+
samples=$(echo "$durations" | jq 'length')
|
|
173
|
+
|
|
174
|
+
if [[ "$samples" -lt "$MIN_CONFIDENCE_SAMPLES" ]]; then
|
|
175
|
+
echo "$default"
|
|
176
|
+
return
|
|
177
|
+
fi
|
|
178
|
+
|
|
179
|
+
# Compute P95 + 20% buffer
|
|
180
|
+
local p95
|
|
181
|
+
p95=$(percentile "$durations" 95)
|
|
182
|
+
local timeout
|
|
183
|
+
timeout=$(echo "$p95 * 1.2" | bc 2>/dev/null | cut -d. -f1)
|
|
184
|
+
|
|
185
|
+
# Apply safety bounds
|
|
186
|
+
if [[ "$timeout" -lt "$MIN_TIMEOUT" ]]; then timeout="$MIN_TIMEOUT"; fi
|
|
187
|
+
if [[ "$timeout" -gt "$MAX_TIMEOUT" ]]; then timeout="$MAX_TIMEOUT"; fi
|
|
188
|
+
|
|
189
|
+
echo "$timeout"
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
# ─── Get Iterations Recommendation ─────────────────────────────────────────
|
|
193
|
+
get_iterations() {
|
|
194
|
+
local complexity="${1:-5}"
|
|
195
|
+
local stage="${2:-build}"
|
|
196
|
+
local default="${3:-10}"
|
|
197
|
+
|
|
198
|
+
# Query events for this complexity band
|
|
199
|
+
local iterations_data
|
|
200
|
+
iterations_data=$(jq -s "
|
|
201
|
+
map(select(.type == \"build_complete\" and .stage == \"${stage}\") | .iterations // empty) |
|
|
202
|
+
map(select(. > 0))
|
|
203
|
+
" "$EVENTS_FILE" 2>/dev/null || echo "[]")
|
|
204
|
+
|
|
205
|
+
local samples
|
|
206
|
+
samples=$(echo "$iterations_data" | jq 'length')
|
|
207
|
+
|
|
208
|
+
if [[ "$samples" -lt "$MIN_CONFIDENCE_SAMPLES" ]]; then
|
|
209
|
+
echo "$default"
|
|
210
|
+
return
|
|
211
|
+
fi
|
|
212
|
+
|
|
213
|
+
# Compute mean
|
|
214
|
+
local mean_iters
|
|
215
|
+
mean_iters=$(mean "$iterations_data")
|
|
216
|
+
local result
|
|
217
|
+
result=$(echo "$mean_iters" | cut -d. -f1)
|
|
218
|
+
|
|
219
|
+
# Apply safety bounds
|
|
220
|
+
if [[ "$result" -lt "$MIN_ITERATIONS" ]]; then result="$MIN_ITERATIONS"; fi
|
|
221
|
+
if [[ "$result" -gt "$MAX_ITERATIONS" ]]; then result="$MAX_ITERATIONS"; fi
|
|
222
|
+
|
|
223
|
+
echo "$result"
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
# ─── Get Model Recommendation ───────────────────────────────────────────────
|
|
227
|
+
get_model() {
|
|
228
|
+
local stage="${1:-build}"
|
|
229
|
+
local default="${2:-opus}"
|
|
230
|
+
|
|
231
|
+
# Query events for successful runs by model on this stage
|
|
232
|
+
local model_success
|
|
233
|
+
model_success=$(jq -s "
|
|
234
|
+
group_by(.model) |
|
|
235
|
+
map({
|
|
236
|
+
model: .[0].model,
|
|
237
|
+
total: length,
|
|
238
|
+
success: map(select(.exit_code == 0)) | length,
|
|
239
|
+
cost: (map(.token_cost // 0) | add)
|
|
240
|
+
}) |
|
|
241
|
+
map(select(.total >= 5)) |
|
|
242
|
+
map(select((.success / .total) > 0.9)) |
|
|
243
|
+
sort_by(.cost) |
|
|
244
|
+
.[0].model // \"$default\"
|
|
245
|
+
" "$EVENTS_FILE" 2>/dev/null || echo "\"$default\"")
|
|
246
|
+
|
|
247
|
+
echo "$model_success" | tr -d '"'
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
# ─── Get Team Size Recommendation ───────────────────────────────────────────
|
|
251
|
+
get_team_size() {
|
|
252
|
+
local complexity="${1:-5}"
|
|
253
|
+
local default="${2:-2}"
|
|
254
|
+
|
|
255
|
+
# Query team sizes for similar complexity
|
|
256
|
+
local team_data
|
|
257
|
+
team_data=$(jq -s "
|
|
258
|
+
map(select(.team_size != null) | .team_size) |
|
|
259
|
+
map(select(. > 0))
|
|
260
|
+
" "$EVENTS_FILE" 2>/dev/null || echo "[]")
|
|
261
|
+
|
|
262
|
+
local samples
|
|
263
|
+
samples=$(echo "$team_data" | jq 'length')
|
|
264
|
+
|
|
265
|
+
if [[ "$samples" -lt "$MIN_CONFIDENCE_SAMPLES" ]]; then
|
|
266
|
+
echo "$default"
|
|
267
|
+
return
|
|
268
|
+
fi
|
|
269
|
+
|
|
270
|
+
local mean_team
|
|
271
|
+
mean_team=$(mean "$team_data")
|
|
272
|
+
local result
|
|
273
|
+
result=$(echo "$mean_team" | cut -d. -f1)
|
|
274
|
+
|
|
275
|
+
# Bounds: 1-8 agents
|
|
276
|
+
if [[ "$result" -lt 1 ]]; then result=1; fi
|
|
277
|
+
if [[ "$result" -gt 8 ]]; then result=8; fi
|
|
278
|
+
|
|
279
|
+
echo "$result"
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
# ─── Get Template Recommendation ────────────────────────────────────────────
|
|
283
|
+
get_template() {
|
|
284
|
+
local complexity="${1:-5}"
|
|
285
|
+
local default="${2:-standard}"
|
|
286
|
+
|
|
287
|
+
# Find most successful template for similar complexity
|
|
288
|
+
local template
|
|
289
|
+
template=$(jq -s "
|
|
290
|
+
map(select(.template != null and .complexity_score != null)) |
|
|
291
|
+
group_by(.template) |
|
|
292
|
+
map({
|
|
293
|
+
template: .[0].template,
|
|
294
|
+
success_rate: (map(select(.exit_code == 0)) | length / length)
|
|
295
|
+
}) |
|
|
296
|
+
sort_by(-.success_rate) |
|
|
297
|
+
.[0].template // \"$default\"
|
|
298
|
+
" "$EVENTS_FILE" 2>/dev/null || echo "\"$default\"")
|
|
299
|
+
|
|
300
|
+
echo "$template" | tr -d '"'
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
# ─── Get Poll Interval Recommendation ───────────────────────────────────────
|
|
304
|
+
get_poll_interval() {
|
|
305
|
+
local default="${1:-60}"
|
|
306
|
+
|
|
307
|
+
# Query queue depths to estimate optimal poll interval
|
|
308
|
+
local queue_events
|
|
309
|
+
queue_events=$(jq -s "
|
|
310
|
+
map(select(.type == \"queue_update\") | .queue_depth // 0) |
|
|
311
|
+
map(select(. > 0))
|
|
312
|
+
" "$EVENTS_FILE" 2>/dev/null || echo "[]")
|
|
313
|
+
|
|
314
|
+
local samples
|
|
315
|
+
samples=$(echo "$queue_events" | jq 'length')
|
|
316
|
+
|
|
317
|
+
if [[ "$samples" -lt 5 ]]; then
|
|
318
|
+
echo "$default"
|
|
319
|
+
return
|
|
320
|
+
fi
|
|
321
|
+
|
|
322
|
+
local mean_queue
|
|
323
|
+
mean_queue=$(mean "$queue_events")
|
|
324
|
+
|
|
325
|
+
# Heuristic: deeper queue → shorter interval
|
|
326
|
+
local interval
|
|
327
|
+
interval=$(echo "60 - (${mean_queue} * 2)" | bc 2>/dev/null || echo "$default")
|
|
328
|
+
|
|
329
|
+
# Apply bounds
|
|
330
|
+
if [[ "$interval" -lt "$MIN_POLL_INTERVAL" ]]; then interval="$MIN_POLL_INTERVAL"; fi
|
|
331
|
+
if [[ "$interval" -gt "$MAX_POLL_INTERVAL" ]]; then interval="$MAX_POLL_INTERVAL"; fi
|
|
332
|
+
|
|
333
|
+
echo "$interval"
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
# ─── Get Retry Limit Recommendation ────────────────────────────────────────
|
|
337
|
+
get_retry_limit() {
|
|
338
|
+
local error_class="${1:-generic}"
|
|
339
|
+
local default="${2:-2}"
|
|
340
|
+
|
|
341
|
+
# Query retry success rate by error class
|
|
342
|
+
local retry_data
|
|
343
|
+
retry_data=$(jq -s "
|
|
344
|
+
map(select(.type == \"retry\" and .error_class != null)) |
|
|
345
|
+
group_by(.error_class) |
|
|
346
|
+
map({
|
|
347
|
+
error_class: .[0].error_class,
|
|
348
|
+
retries: (map(.attempt_count) | add // 0),
|
|
349
|
+
successes: (map(select(.exit_code == 0)) | length)
|
|
350
|
+
}) |
|
|
351
|
+
map(select(.error_class == \"${error_class}\")) |
|
|
352
|
+
.[0]
|
|
353
|
+
" "$EVENTS_FILE" 2>/dev/null || echo "{}")
|
|
354
|
+
|
|
355
|
+
# Extract success rate with safe defaults for missing data
|
|
356
|
+
local success_rate
|
|
357
|
+
success_rate=$(echo "$retry_data" | jq 'if .retries and .retries > 0 then .successes / .retries else 0.5 end')
|
|
358
|
+
|
|
359
|
+
# Heuristic: higher success rate → allow more retries (cap at 5)
|
|
360
|
+
local limit
|
|
361
|
+
limit=$(echo "scale=0; ${success_rate} * 5" | bc 2>/dev/null | cut -d. -f1)
|
|
362
|
+
if [[ -z "$limit" ]]; then limit="$default"; fi
|
|
363
|
+
|
|
364
|
+
if [[ "$limit" -lt 1 ]]; then limit=1; fi
|
|
365
|
+
if [[ "$limit" -gt 5 ]]; then limit=5; fi
|
|
366
|
+
|
|
367
|
+
echo "$limit"
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
# ─── Get Quality Threshold Recommendation ───────────────────────────────────
|
|
371
|
+
get_quality_threshold() {
|
|
372
|
+
local default="${1:-70}"
|
|
373
|
+
|
|
374
|
+
# Query quality score distribution on pass vs fail runs
|
|
375
|
+
local quality_data
|
|
376
|
+
quality_data=$(jq -s "
|
|
377
|
+
map(select(.quality_score != null)) |
|
|
378
|
+
map(select(.exit_code == 0)) |
|
|
379
|
+
map(.quality_score) |
|
|
380
|
+
sort
|
|
381
|
+
" "$EVENTS_FILE" 2>/dev/null || echo "[]")
|
|
382
|
+
|
|
383
|
+
local samples
|
|
384
|
+
samples=$(echo "$quality_data" | jq 'length')
|
|
385
|
+
|
|
386
|
+
if [[ "$samples" -lt "$MIN_CONFIDENCE_SAMPLES" ]]; then
|
|
387
|
+
echo "$default"
|
|
388
|
+
return
|
|
389
|
+
fi
|
|
390
|
+
|
|
391
|
+
# Use 25th percentile of passing runs as recommended threshold
|
|
392
|
+
local p25
|
|
393
|
+
p25=$(percentile "$quality_data" 25)
|
|
394
|
+
local result
|
|
395
|
+
result=$(echo "$p25" | cut -d. -f1)
|
|
396
|
+
|
|
397
|
+
# Bounds: 50-95
|
|
398
|
+
if [[ "$result" -lt 50 ]]; then result=50; fi
|
|
399
|
+
if [[ "$result" -gt 95 ]]; then result=95; fi
|
|
400
|
+
|
|
401
|
+
echo "$result"
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
# ─── Get Coverage Min Recommendation ────────────────────────────────────────
|
|
405
|
+
get_coverage_min() {
|
|
406
|
+
local default="${1:-80}"
|
|
407
|
+
|
|
408
|
+
# Query coverage data on successful vs failed runs
|
|
409
|
+
local coverage_data
|
|
410
|
+
coverage_data=$(jq -s "
|
|
411
|
+
map(select(.coverage != null and .exit_code == 0)) |
|
|
412
|
+
map(.coverage) |
|
|
413
|
+
sort
|
|
414
|
+
" "$EVENTS_FILE" 2>/dev/null || echo "[]")
|
|
415
|
+
|
|
416
|
+
local samples
|
|
417
|
+
samples=$(echo "$coverage_data" | jq 'length')
|
|
418
|
+
|
|
419
|
+
if [[ "$samples" -lt "$MIN_CONFIDENCE_SAMPLES" ]]; then
|
|
420
|
+
echo "$default"
|
|
421
|
+
return
|
|
422
|
+
fi
|
|
423
|
+
|
|
424
|
+
# Use median of successful runs
|
|
425
|
+
local med_coverage
|
|
426
|
+
med_coverage=$(median "$coverage_data")
|
|
427
|
+
local result
|
|
428
|
+
result=$(echo "$med_coverage" | cut -d. -f1)
|
|
429
|
+
|
|
430
|
+
# Bounds: 0-100
|
|
431
|
+
if [[ "$result" -lt "$MIN_COVERAGE" ]]; then result="$MIN_COVERAGE"; fi
|
|
432
|
+
if [[ "$result" -gt "$MAX_COVERAGE" ]]; then result="$MAX_COVERAGE"; fi
|
|
433
|
+
|
|
434
|
+
echo "$result"
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
# ─── Main: get subcommand ───────────────────────────────────────────────────
|
|
438
|
+
cmd_get() {
|
|
439
|
+
local metric="${1:-}"
|
|
440
|
+
[[ -n "$metric" ]] && shift || true
|
|
441
|
+
|
|
442
|
+
local stage="build"
|
|
443
|
+
local repo="${REPO_DIR}"
|
|
444
|
+
local complexity=5
|
|
445
|
+
local default=""
|
|
446
|
+
|
|
447
|
+
while [[ $# -gt 0 ]]; do
|
|
448
|
+
case "$1" in
|
|
449
|
+
--stage) stage="$2"; shift 2 ;;
|
|
450
|
+
--repo) repo="$2"; shift 2 ;;
|
|
451
|
+
--complexity) complexity="$2"; shift 2 ;;
|
|
452
|
+
--default) default="$2"; shift 2 ;;
|
|
453
|
+
*) shift ;;
|
|
454
|
+
esac
|
|
455
|
+
done
|
|
456
|
+
|
|
457
|
+
case "$metric" in
|
|
458
|
+
timeout)
|
|
459
|
+
get_timeout "$stage" "$repo" "${default:-1800}"
|
|
460
|
+
;;
|
|
461
|
+
iterations)
|
|
462
|
+
get_iterations "$complexity" "$stage" "${default:-10}"
|
|
463
|
+
;;
|
|
464
|
+
model)
|
|
465
|
+
get_model "$stage" "${default:-opus}"
|
|
466
|
+
;;
|
|
467
|
+
team_size)
|
|
468
|
+
get_team_size "$complexity" "${default:-2}"
|
|
469
|
+
;;
|
|
470
|
+
template)
|
|
471
|
+
get_template "$complexity" "${default:-standard}"
|
|
472
|
+
;;
|
|
473
|
+
poll_interval)
|
|
474
|
+
get_poll_interval "${default:-60}"
|
|
475
|
+
;;
|
|
476
|
+
retry_limit)
|
|
477
|
+
get_retry_limit "generic" "${default:-2}"
|
|
478
|
+
;;
|
|
479
|
+
quality_threshold)
|
|
480
|
+
get_quality_threshold "${default:-70}"
|
|
481
|
+
;;
|
|
482
|
+
coverage_min)
|
|
483
|
+
get_coverage_min "${default:-80}"
|
|
484
|
+
;;
|
|
485
|
+
*)
|
|
486
|
+
error "Unknown metric: $metric"
|
|
487
|
+
echo "Available metrics: timeout, iterations, model, team_size, template, poll_interval, retry_limit, quality_threshold, coverage_min"
|
|
488
|
+
return 1
|
|
489
|
+
;;
|
|
490
|
+
esac
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
# ─── Main: profile subcommand ───────────────────────────────────────────────
|
|
494
|
+
cmd_profile() {
|
|
495
|
+
# profile takes no positional args, just --options
|
|
496
|
+
local repo="${REPO_DIR}"
|
|
497
|
+
while [[ $# -gt 0 ]]; do
|
|
498
|
+
case "$1" in
|
|
499
|
+
--repo) repo="$2"; shift 2 ;;
|
|
500
|
+
*) shift ;;
|
|
501
|
+
esac
|
|
502
|
+
done
|
|
503
|
+
|
|
504
|
+
info "Adaptive Profile for ${CYAN}${repo}${RESET}"
|
|
505
|
+
echo ""
|
|
506
|
+
|
|
507
|
+
# Table header
|
|
508
|
+
printf "%-25s %-15s %-15s %-12s %-10s\n" "Metric" "Learned" "Default" "Samples" "Confidence"
|
|
509
|
+
printf "%s\n" "$(printf '%.0s─' {1..80})"
|
|
510
|
+
|
|
511
|
+
# Timeout
|
|
512
|
+
local timeout_val
|
|
513
|
+
timeout_val=$(get_timeout "build" "$repo" "1800")
|
|
514
|
+
local timeout_samples
|
|
515
|
+
timeout_samples=$(jq -s "map(select(.type == \"stage_complete\" and .stage == \"build\") | .duration) | length" "$EVENTS_FILE" 2>/dev/null || echo "0")
|
|
516
|
+
local timeout_conf
|
|
517
|
+
timeout_conf=$(confidence_level "$timeout_samples")
|
|
518
|
+
printf "%-25s %-15s %-15s %-12s %-10s\n" "timeout (s)" "$timeout_val" "1800" "$timeout_samples" "$timeout_conf"
|
|
519
|
+
|
|
520
|
+
# Iterations
|
|
521
|
+
local iter_val
|
|
522
|
+
iter_val=$(get_iterations 5 "build" "10")
|
|
523
|
+
local iter_samples
|
|
524
|
+
iter_samples=$(jq -s "map(select(.type == \"build_complete\" and .stage == \"build\") | .iterations) | length" "$EVENTS_FILE" 2>/dev/null || echo "0")
|
|
525
|
+
local iter_conf
|
|
526
|
+
iter_conf=$(confidence_level "$iter_samples")
|
|
527
|
+
printf "%-25s %-15s %-15s %-12s %-10s\n" "iterations" "$iter_val" "10" "$iter_samples" "$iter_conf"
|
|
528
|
+
|
|
529
|
+
# Model
|
|
530
|
+
local model_val
|
|
531
|
+
model_val=$(get_model "build" "opus")
|
|
532
|
+
local model_samples
|
|
533
|
+
model_samples=$(jq -s "map(select(.model != null and .type == \"pipeline_complete\")) | length" "$EVENTS_FILE" 2>/dev/null || echo "0")
|
|
534
|
+
local model_conf
|
|
535
|
+
model_conf=$(confidence_level "$model_samples")
|
|
536
|
+
printf "%-25s %-15s %-15s %-12s %-10s\n" "model" "$model_val" "opus" "$model_samples" "$model_conf"
|
|
537
|
+
|
|
538
|
+
# Team size
|
|
539
|
+
local team_val
|
|
540
|
+
team_val=$(get_team_size 5 "2")
|
|
541
|
+
local team_samples
|
|
542
|
+
team_samples=$(jq -s "map(select(.team_size != null)) | length" "$EVENTS_FILE" 2>/dev/null || echo "0")
|
|
543
|
+
local team_conf
|
|
544
|
+
team_conf=$(confidence_level "$team_samples")
|
|
545
|
+
printf "%-25s %-15s %-15s %-12s %-10s\n" "team_size" "$team_val" "2" "$team_samples" "$team_conf"
|
|
546
|
+
|
|
547
|
+
# Template
|
|
548
|
+
local template_val
|
|
549
|
+
template_val=$(get_template 5 "standard")
|
|
550
|
+
local template_samples
|
|
551
|
+
template_samples=$(jq -s "map(select(.template != null)) | length" "$EVENTS_FILE" 2>/dev/null || echo "0")
|
|
552
|
+
local template_conf
|
|
553
|
+
template_conf=$(confidence_level "$template_samples")
|
|
554
|
+
printf "%-25s %-15s %-15s %-12s %-10s\n" "template" "$template_val" "standard" "$template_samples" "$template_conf"
|
|
555
|
+
|
|
556
|
+
# Poll interval
|
|
557
|
+
local poll_val
|
|
558
|
+
poll_val=$(get_poll_interval "60")
|
|
559
|
+
local poll_samples=0
|
|
560
|
+
local poll_conf
|
|
561
|
+
poll_conf=$(confidence_level "$poll_samples")
|
|
562
|
+
printf "%-25s %-15s %-15s %-12s %-10s\n" "poll_interval (s)" "$poll_val" "60" "$poll_samples" "$poll_conf"
|
|
563
|
+
|
|
564
|
+
# Quality threshold
|
|
565
|
+
local quality_val
|
|
566
|
+
quality_val=$(get_quality_threshold "70")
|
|
567
|
+
local quality_samples
|
|
568
|
+
quality_samples=$(jq -s "map(select(.quality_score != null)) | length" "$EVENTS_FILE" 2>/dev/null || echo "0")
|
|
569
|
+
local quality_conf
|
|
570
|
+
quality_conf=$(confidence_level "$quality_samples")
|
|
571
|
+
printf "%-25s %-15s %-15s %-12s %-10s\n" "quality_threshold" "$quality_val" "70" "$quality_samples" "$quality_conf"
|
|
572
|
+
|
|
573
|
+
# Coverage min
|
|
574
|
+
local coverage_val
|
|
575
|
+
coverage_val=$(get_coverage_min "80")
|
|
576
|
+
local coverage_samples
|
|
577
|
+
coverage_samples=$(jq -s "map(select(.coverage != null)) | length" "$EVENTS_FILE" 2>/dev/null || echo "0")
|
|
578
|
+
local coverage_conf
|
|
579
|
+
coverage_conf=$(confidence_level "$coverage_samples")
|
|
580
|
+
printf "%-25s %-15s %-15s %-12s %-10s\n" "coverage_min (%)" "$coverage_val" "80" "$coverage_samples" "$coverage_conf"
|
|
581
|
+
|
|
582
|
+
echo ""
|
|
583
|
+
}
|
|
584
|
+
|
|
585
|
+
# ─── Main: train subcommand ─────────────────────────────────────────────────
|
|
586
|
+
cmd_train() {
|
|
587
|
+
# train takes no positional args, just --options
|
|
588
|
+
local repo="${REPO_DIR}"
|
|
589
|
+
while [[ $# -gt 0 ]]; do
|
|
590
|
+
case "$1" in
|
|
591
|
+
--repo) repo="$2"; shift 2 ;;
|
|
592
|
+
*) shift ;;
|
|
593
|
+
esac
|
|
594
|
+
done
|
|
595
|
+
|
|
596
|
+
if [[ ! -f "$EVENTS_FILE" ]]; then
|
|
597
|
+
warn "No events file found: $EVENTS_FILE"
|
|
598
|
+
return 1
|
|
599
|
+
fi
|
|
600
|
+
|
|
601
|
+
info "Training adaptive models from ${CYAN}${EVENTS_FILE}${RESET}"
|
|
602
|
+
|
|
603
|
+
local event_count
|
|
604
|
+
event_count=$(jq -s 'length' "$EVENTS_FILE" 2>/dev/null || echo 0)
|
|
605
|
+
info "Processing ${CYAN}${event_count}${RESET} events..."
|
|
606
|
+
|
|
607
|
+
# Build comprehensive models JSON using jq directly
|
|
608
|
+
local timeout_learned timeout_samples
|
|
609
|
+
timeout_learned=$(get_timeout "build" "$repo" "1800")
|
|
610
|
+
timeout_samples=$(jq -s "map(select(.type == \"stage_complete\" and .stage == \"build\") | .duration) | length" "$EVENTS_FILE" 2>/dev/null || echo 0)
|
|
611
|
+
|
|
612
|
+
local iterations_learned iterations_samples
|
|
613
|
+
iterations_learned=$(get_iterations 5 "build" "10")
|
|
614
|
+
iterations_samples=$(jq -s "map(select(.type == \"build_complete\") | .iterations) | length" "$EVENTS_FILE" 2>/dev/null || echo 0)
|
|
615
|
+
|
|
616
|
+
local model_learned model_samples
|
|
617
|
+
model_learned=$(get_model "build" "opus")
|
|
618
|
+
model_samples=$(jq -s "map(select(.model != null)) | length" "$EVENTS_FILE" 2>/dev/null || echo 0)
|
|
619
|
+
|
|
620
|
+
local team_learned team_samples
|
|
621
|
+
team_learned=$(get_team_size 5 "2")
|
|
622
|
+
team_samples=$(jq -s "map(select(.team_size != null)) | length" "$EVENTS_FILE" 2>/dev/null || echo 0)
|
|
623
|
+
|
|
624
|
+
local quality_learned quality_samples
|
|
625
|
+
quality_learned=$(get_quality_threshold "70")
|
|
626
|
+
quality_samples=$(jq -s "map(select(.quality_score != null)) | length" "$EVENTS_FILE" 2>/dev/null || echo 0)
|
|
627
|
+
|
|
628
|
+
local coverage_learned coverage_samples
|
|
629
|
+
coverage_learned=$(get_coverage_min "80")
|
|
630
|
+
coverage_samples=$(jq -s "map(select(.coverage != null)) | length" "$EVENTS_FILE" 2>/dev/null || echo 0)
|
|
631
|
+
|
|
632
|
+
local trained_at
|
|
633
|
+
trained_at=$(now_iso)
|
|
634
|
+
|
|
635
|
+
# Build JSON using jq with variables
|
|
636
|
+
local models
|
|
637
|
+
models=$(jq -n \
|
|
638
|
+
--arg trained_at "$trained_at" \
|
|
639
|
+
--arg timeout_learned "$timeout_learned" \
|
|
640
|
+
--arg iterations_learned "$iterations_learned" \
|
|
641
|
+
--arg model_learned "$model_learned" \
|
|
642
|
+
--arg team_learned "$team_learned" \
|
|
643
|
+
--arg quality_learned "$quality_learned" \
|
|
644
|
+
--arg coverage_learned "$coverage_learned" \
|
|
645
|
+
--arg timeout_samples "$timeout_samples" \
|
|
646
|
+
--arg iterations_samples "$iterations_samples" \
|
|
647
|
+
--arg model_samples "$model_samples" \
|
|
648
|
+
--arg team_samples "$team_samples" \
|
|
649
|
+
--arg quality_samples "$quality_samples" \
|
|
650
|
+
--arg coverage_samples "$coverage_samples" \
|
|
651
|
+
'{
|
|
652
|
+
timeout: {
|
|
653
|
+
learned: ($timeout_learned | tonumber),
|
|
654
|
+
default: 1800,
|
|
655
|
+
samples: ($timeout_samples | tonumber)
|
|
656
|
+
},
|
|
657
|
+
iterations: {
|
|
658
|
+
learned: ($iterations_learned | tonumber),
|
|
659
|
+
default: 10,
|
|
660
|
+
samples: ($iterations_samples | tonumber)
|
|
661
|
+
},
|
|
662
|
+
model: {
|
|
663
|
+
learned: $model_learned,
|
|
664
|
+
default: "opus",
|
|
665
|
+
samples: ($model_samples | tonumber)
|
|
666
|
+
},
|
|
667
|
+
team_size: {
|
|
668
|
+
learned: ($team_learned | tonumber),
|
|
669
|
+
default: 2,
|
|
670
|
+
samples: ($team_samples | tonumber)
|
|
671
|
+
},
|
|
672
|
+
quality_threshold: {
|
|
673
|
+
learned: ($quality_learned | tonumber),
|
|
674
|
+
default: 70,
|
|
675
|
+
samples: ($quality_samples | tonumber)
|
|
676
|
+
},
|
|
677
|
+
coverage_min: {
|
|
678
|
+
learned: ($coverage_learned | tonumber),
|
|
679
|
+
default: 80,
|
|
680
|
+
samples: ($coverage_samples | tonumber)
|
|
681
|
+
},
|
|
682
|
+
trained_at: $trained_at
|
|
683
|
+
}')
|
|
684
|
+
|
|
685
|
+
save_models "$models"
|
|
686
|
+
success "Models trained and saved to ${CYAN}${MODELS_FILE}${RESET}"
|
|
687
|
+
}
|
|
688
|
+
|
|
689
|
+
# ─── Main: compare subcommand ───────────────────────────────────────────────
|
|
690
|
+
cmd_compare() {
|
|
691
|
+
# compare takes no positional args, just --options
|
|
692
|
+
local repo="${REPO_DIR}"
|
|
693
|
+
while [[ $# -gt 0 ]]; do
|
|
694
|
+
case "$1" in
|
|
695
|
+
--repo) repo="$2"; shift 2 ;;
|
|
696
|
+
*) shift ;;
|
|
697
|
+
esac
|
|
698
|
+
done
|
|
699
|
+
|
|
700
|
+
info "Learned vs Hardcoded Values for ${CYAN}${repo}${RESET}"
|
|
701
|
+
echo ""
|
|
702
|
+
|
|
703
|
+
printf "%-25s %-15s %-15s %-15s\n" "Metric" "Hardcoded" "Learned" "Difference"
|
|
704
|
+
printf "%s\n" "$(printf '%.0s─' {1..70})"
|
|
705
|
+
|
|
706
|
+
# Timeout
|
|
707
|
+
local timeout_hard=1800
|
|
708
|
+
local timeout_learn
|
|
709
|
+
timeout_learn=$(get_timeout "build" "$repo" "$timeout_hard")
|
|
710
|
+
local timeout_diff=$((timeout_learn - timeout_hard))
|
|
711
|
+
printf "%-25s %-15s %-15s %-15s\n" "timeout (s)" "$timeout_hard" "$timeout_learn" "$timeout_diff"
|
|
712
|
+
|
|
713
|
+
# Iterations
|
|
714
|
+
local iter_hard=10
|
|
715
|
+
local iter_learn
|
|
716
|
+
iter_learn=$(get_iterations 5 "build" "$iter_hard")
|
|
717
|
+
local iter_diff=$((iter_learn - iter_hard))
|
|
718
|
+
printf "%-25s %-15s %-15s %-15s\n" "iterations" "$iter_hard" "$iter_learn" "$iter_diff"
|
|
719
|
+
|
|
720
|
+
# Model
|
|
721
|
+
local model_hard="opus"
|
|
722
|
+
local model_learn
|
|
723
|
+
model_learn=$(get_model "build" "$model_hard")
|
|
724
|
+
printf "%-25s %-15s %-15s %-15s\n" "model" "$model_hard" "$model_learn" "-"
|
|
725
|
+
|
|
726
|
+
# Team size
|
|
727
|
+
local team_hard=2
|
|
728
|
+
local team_learn
|
|
729
|
+
team_learn=$(get_team_size 5 "$team_hard")
|
|
730
|
+
local team_diff=$((team_learn - team_hard))
|
|
731
|
+
printf "%-25s %-15s %-15s %-15s\n" "team_size" "$team_hard" "$team_learn" "$team_diff"
|
|
732
|
+
|
|
733
|
+
# Quality threshold
|
|
734
|
+
local quality_hard=70
|
|
735
|
+
local quality_learn
|
|
736
|
+
quality_learn=$(get_quality_threshold "$quality_hard")
|
|
737
|
+
local quality_diff=$((quality_learn - quality_hard))
|
|
738
|
+
printf "%-25s %-15s %-15s %-15s\n" "quality_threshold" "$quality_hard" "$quality_learn" "$quality_diff"
|
|
739
|
+
|
|
740
|
+
# Coverage min
|
|
741
|
+
local coverage_hard=80
|
|
742
|
+
local coverage_learn
|
|
743
|
+
coverage_learn=$(get_coverage_min "$coverage_hard")
|
|
744
|
+
local coverage_diff=$((coverage_learn - coverage_hard))
|
|
745
|
+
printf "%-25s %-15s %-15s %-15s\n" "coverage_min (%)" "$coverage_hard" "$coverage_learn" "$coverage_diff"
|
|
746
|
+
|
|
747
|
+
echo ""
|
|
748
|
+
}
|
|
749
|
+
|
|
750
|
+
# ─── Main: recommend subcommand ─────────────────────────────────────────────
|
|
751
|
+
cmd_recommend() {
|
|
752
|
+
# recommend takes --issue as required option
|
|
753
|
+
local issue=""
|
|
754
|
+
local repo="${REPO_DIR}"
|
|
755
|
+
|
|
756
|
+
while [[ $# -gt 0 ]]; do
|
|
757
|
+
case "$1" in
|
|
758
|
+
--issue) issue="$2"; shift 2 ;;
|
|
759
|
+
--repo) repo="$2"; shift 2 ;;
|
|
760
|
+
*) shift ;;
|
|
761
|
+
esac
|
|
762
|
+
done
|
|
763
|
+
|
|
764
|
+
if [[ -z "$issue" ]]; then
|
|
765
|
+
error "Missing --issue argument"
|
|
766
|
+
return 1
|
|
767
|
+
fi
|
|
768
|
+
|
|
769
|
+
info "Generating recommendation for issue ${CYAN}#${issue}${RESET}..."
|
|
770
|
+
|
|
771
|
+
# Simulate complexity score (in real implementation, query GitHub API)
|
|
772
|
+
local complexity=5
|
|
773
|
+
|
|
774
|
+
# Build JSON recommendation
|
|
775
|
+
local recommendation
|
|
776
|
+
recommendation=$(jq -n "{
|
|
777
|
+
issue: ${issue},
|
|
778
|
+
template: \"$(get_template "$complexity" "standard")\",
|
|
779
|
+
model: \"$(get_model "build" "opus")\",
|
|
780
|
+
max_iterations: $(get_iterations "$complexity" "build" "10"),
|
|
781
|
+
team_size: $(get_team_size "$complexity" "2"),
|
|
782
|
+
timeout: $(get_timeout "build" "$repo" "1800"),
|
|
783
|
+
quality_threshold: $(get_quality_threshold "70"),
|
|
784
|
+
poll_interval: $(get_poll_interval "60"),
|
|
785
|
+
coverage_min: $(get_coverage_min "80"),
|
|
786
|
+
confidence: \"high\",
|
|
787
|
+
reasoning: \"Based on $(jq -s 'length' "$EVENTS_FILE" 2>/dev/null || echo 0) historical events\"
|
|
788
|
+
}")
|
|
789
|
+
|
|
790
|
+
echo "$recommendation" | jq .
|
|
791
|
+
}
|
|
792
|
+
|
|
793
|
+
# ─── Main: reset subcommand ─────────────────────────────────────────────────
|
|
794
|
+
cmd_reset() {
|
|
795
|
+
# reset takes optional --metric
|
|
796
|
+
local metric=""
|
|
797
|
+
|
|
798
|
+
while [[ $# -gt 0 ]]; do
|
|
799
|
+
case "$1" in
|
|
800
|
+
--metric) metric="$2"; shift 2 ;;
|
|
801
|
+
*) shift ;;
|
|
802
|
+
esac
|
|
803
|
+
done
|
|
804
|
+
|
|
805
|
+
if [[ -z "$metric" ]]; then
|
|
806
|
+
info "Clearing all learned data..."
|
|
807
|
+
rm -f "$MODELS_FILE"
|
|
808
|
+
success "Cleared ${CYAN}${MODELS_FILE}${RESET}"
|
|
809
|
+
else
|
|
810
|
+
info "Clearing learned data for metric: ${CYAN}${metric}${RESET}"
|
|
811
|
+
local models
|
|
812
|
+
models=$(load_models)
|
|
813
|
+
models=$(echo "$models" | jq "del(.${metric})")
|
|
814
|
+
save_models "$models"
|
|
815
|
+
success "Reset metric ${CYAN}${metric}${RESET}"
|
|
816
|
+
fi
|
|
817
|
+
}
|
|
818
|
+
|
|
819
|
+
# ─── Main: help subcommand ──────────────────────────────────────────────────
|
|
820
|
+
cmd_help() {
|
|
821
|
+
cat <<EOF
|
|
822
|
+
${BOLD}shipwright adaptive${RESET} — Data-Driven Pipeline Tuning
|
|
823
|
+
|
|
824
|
+
${BOLD}USAGE${RESET}
|
|
825
|
+
sw adaptive <subcommand> [options]
|
|
826
|
+
|
|
827
|
+
${BOLD}SUBCOMMANDS${RESET}
|
|
828
|
+
${CYAN}get${RESET} <metric> [--stage S] [--repo R] [--complexity C] [--default V]
|
|
829
|
+
Return adaptive value for a metric (replaces hardcoded defaults)
|
|
830
|
+
Metrics: timeout, iterations, model, team_size, template, poll_interval,
|
|
831
|
+
retry_limit, quality_threshold, coverage_min
|
|
832
|
+
|
|
833
|
+
${CYAN}profile${RESET} [--repo REPO]
|
|
834
|
+
Show all learned parameters with confidence levels
|
|
835
|
+
|
|
836
|
+
${CYAN}train${RESET} [--repo REPO]
|
|
837
|
+
Rebuild models from events.jsonl (run after significant pipeline activity)
|
|
838
|
+
|
|
839
|
+
${CYAN}compare${RESET} [--repo REPO]
|
|
840
|
+
Side-by-side table: learned vs hardcoded values
|
|
841
|
+
|
|
842
|
+
${CYAN}recommend${RESET} --issue N [--repo REPO]
|
|
843
|
+
Full JSON recommendation for an issue (template, model, team_size, etc.)
|
|
844
|
+
|
|
845
|
+
${CYAN}reset${RESET} [--metric METRIC]
|
|
846
|
+
Clear learned data (all, or specific metric)
|
|
847
|
+
|
|
848
|
+
${CYAN}help${RESET}
|
|
849
|
+
Show this help message
|
|
850
|
+
|
|
851
|
+
${BOLD}EXAMPLES${RESET}
|
|
852
|
+
# Get learned timeout for build stage
|
|
853
|
+
sw adaptive get timeout --stage build
|
|
854
|
+
|
|
855
|
+
# Show all learned parameters
|
|
856
|
+
sw adaptive profile
|
|
857
|
+
|
|
858
|
+
# Train models from events (run after major pipeline activity)
|
|
859
|
+
sw adaptive train
|
|
860
|
+
|
|
861
|
+
# Get complete recommendation for issue #42
|
|
862
|
+
sw adaptive recommend --issue 42
|
|
863
|
+
|
|
864
|
+
# Compare learned vs hardcoded
|
|
865
|
+
sw adaptive compare
|
|
866
|
+
|
|
867
|
+
${BOLD}STORAGE${RESET}
|
|
868
|
+
Events: ${CYAN}${EVENTS_FILE}${RESET}
|
|
869
|
+
Models: ${CYAN}${MODELS_FILE}${RESET}
|
|
870
|
+
|
|
871
|
+
${BOLD}STATISTICS${RESET}
|
|
872
|
+
• Low confidence: < 10 samples
|
|
873
|
+
• Medium confidence: 10-50 samples
|
|
874
|
+
• High confidence: > 50 samples
|
|
875
|
+
|
|
876
|
+
• Timeout: P95 of historical stage durations + 20% buffer
|
|
877
|
+
• Iterations: Mean of successful build iterations
|
|
878
|
+
• Model: Cheapest model with >90% success rate
|
|
879
|
+
• Team size: Mean team size from historical runs
|
|
880
|
+
• Quality threshold: 25th percentile of passing quality scores
|
|
881
|
+
• Coverage: Median coverage from successful runs
|
|
882
|
+
EOF
|
|
883
|
+
}
|
|
884
|
+
|
|
885
|
+
# ─── Main Entry Point ────────────────────────────────────────────────────────
|
|
886
|
+
main() {
|
|
887
|
+
local cmd="${1:-help}"
|
|
888
|
+
shift 2>/dev/null || true
|
|
889
|
+
|
|
890
|
+
case "$cmd" in
|
|
891
|
+
get)
|
|
892
|
+
cmd_get "$@"
|
|
893
|
+
;;
|
|
894
|
+
profile)
|
|
895
|
+
cmd_profile "$@"
|
|
896
|
+
;;
|
|
897
|
+
train)
|
|
898
|
+
cmd_train "$@"
|
|
899
|
+
;;
|
|
900
|
+
compare)
|
|
901
|
+
cmd_compare "$@"
|
|
902
|
+
;;
|
|
903
|
+
recommend)
|
|
904
|
+
cmd_recommend "$@"
|
|
905
|
+
;;
|
|
906
|
+
reset)
|
|
907
|
+
cmd_reset "$@"
|
|
908
|
+
;;
|
|
909
|
+
help)
|
|
910
|
+
cmd_help
|
|
911
|
+
;;
|
|
912
|
+
version)
|
|
913
|
+
echo "sw-adaptive v${VERSION}"
|
|
914
|
+
;;
|
|
915
|
+
*)
|
|
916
|
+
error "Unknown command: $cmd"
|
|
917
|
+
cmd_help
|
|
918
|
+
return 1
|
|
919
|
+
;;
|
|
920
|
+
esac
|
|
921
|
+
}
|
|
922
|
+
|
|
923
|
+
if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then
|
|
924
|
+
main "$@"
|
|
925
|
+
fi
|