codeharness 0.26.4 → 0.27.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{chunk-KZQETPQS.js → chunk-JMYDBV6O.js} +127 -430
- package/dist/{docker-JEC7THRT.js → docker-5LUADX2H.js} +1 -1
- package/dist/index.js +5150 -2054
- package/package.json +5 -3
- package/patches/AGENTS.md +1 -1
- package/patches/dev/enforcement.md +16 -7
- package/patches/retro/enforcement.md +2 -2
- package/patches/review/enforcement.md +24 -3
- package/patches/verify/story-verification.md +25 -11
- package/templates/agents/analyst.yaml +10 -0
- package/templates/agents/architect.yaml +11 -0
- package/templates/agents/dev.yaml +10 -0
- package/templates/agents/evaluator.yaml +92 -0
- package/templates/agents/pm.yaml +12 -0
- package/templates/agents/qa.yaml +15 -0
- package/templates/agents/sm.yaml +10 -0
- package/templates/agents/tech-writer.yaml +11 -0
- package/templates/agents/ux-designer.yaml +13 -0
- package/templates/workflows/default.yaml +23 -0
- package/ralph/AGENTS.md +0 -48
- package/ralph/bridge.sh +0 -424
- package/ralph/db_schema_gen.sh +0 -109
- package/ralph/drivers/claude-code.sh +0 -140
- package/ralph/exec_plans.sh +0 -252
- package/ralph/harness_status.sh +0 -147
- package/ralph/lib/circuit_breaker.sh +0 -210
- package/ralph/lib/date_utils.sh +0 -60
- package/ralph/lib/timeout_utils.sh +0 -77
- package/ralph/onboard.sh +0 -83
- package/ralph/ralph.sh +0 -1402
- package/ralph/validate_epic_docs.sh +0 -129
- package/ralph/verify_gates.sh +0 -210
package/ralph/bridge.sh
DELETED
|
@@ -1,424 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env bash
|
|
2
|
-
# bridge.sh — BMAD→Ralph Task Bridge
|
|
3
|
-
# Converts BMAD epics/stories to Ralph execution tasks with verification requirements.
|
|
4
|
-
# Produces ralph/progress.json consumed by ralph.sh loop.
|
|
5
|
-
#
|
|
6
|
-
# Usage:
|
|
7
|
-
# ralph/bridge.sh --epics PATH --output PATH [--sprint-status PATH]
|
|
8
|
-
# ralph/bridge.sh --tasks PATH --output PATH (standalone mode)
|
|
9
|
-
|
|
10
|
-
set -e
|
|
11
|
-
|
|
12
|
-
# ─── CLI Arguments ────────────────────────────────────────────────────────
|
|
13
|
-
|
|
14
|
-
EPICS_FILE=""
|
|
15
|
-
SPRINT_STATUS_FILE=""
|
|
16
|
-
TASKS_FILE=""
|
|
17
|
-
OUTPUT_FILE=""
|
|
18
|
-
|
|
19
|
-
show_help() {
|
|
20
|
-
cat << 'HELPEOF'
|
|
21
|
-
BMAD→Ralph Task Bridge — converts stories to execution tasks
|
|
22
|
-
|
|
23
|
-
Usage:
|
|
24
|
-
ralph/bridge.sh --epics PATH --output PATH [OPTIONS]
|
|
25
|
-
ralph/bridge.sh --tasks PATH --output PATH (standalone mode)
|
|
26
|
-
|
|
27
|
-
BMAD Mode:
|
|
28
|
-
--epics PATH Path to BMAD epics.md file
|
|
29
|
-
--sprint-status PATH Path to sprint-status.yaml (optional, maps story states)
|
|
30
|
-
--output PATH Output path for progress.json
|
|
31
|
-
|
|
32
|
-
Standalone Mode:
|
|
33
|
-
--tasks PATH Path to markdown checklist or plain text task list
|
|
34
|
-
--output PATH Output path for progress.json
|
|
35
|
-
|
|
36
|
-
Options:
|
|
37
|
-
-h, --help Show this help message
|
|
38
|
-
|
|
39
|
-
The bridge parses BMAD stories and produces ralph/progress.json with:
|
|
40
|
-
- Story ID, title, epic, description, acceptance criteria
|
|
41
|
-
- Verification requirements (proof path, observability)
|
|
42
|
-
- Task status mapped from sprint status (or default: pending)
|
|
43
|
-
HELPEOF
|
|
44
|
-
}
|
|
45
|
-
|
|
46
|
-
# ─── Parse Arguments ──────────────────────────────────────────────────────
|
|
47
|
-
|
|
48
|
-
while [[ $# -gt 0 ]]; do
|
|
49
|
-
case $1 in
|
|
50
|
-
-h|--help)
|
|
51
|
-
show_help
|
|
52
|
-
exit 0
|
|
53
|
-
;;
|
|
54
|
-
--epics)
|
|
55
|
-
EPICS_FILE="$2"
|
|
56
|
-
shift 2
|
|
57
|
-
;;
|
|
58
|
-
--sprint-status)
|
|
59
|
-
SPRINT_STATUS_FILE="$2"
|
|
60
|
-
shift 2
|
|
61
|
-
;;
|
|
62
|
-
--tasks)
|
|
63
|
-
TASKS_FILE="$2"
|
|
64
|
-
shift 2
|
|
65
|
-
;;
|
|
66
|
-
--output)
|
|
67
|
-
OUTPUT_FILE="$2"
|
|
68
|
-
shift 2
|
|
69
|
-
;;
|
|
70
|
-
*)
|
|
71
|
-
echo "Unknown option: $1" >&2
|
|
72
|
-
show_help
|
|
73
|
-
exit 1
|
|
74
|
-
;;
|
|
75
|
-
esac
|
|
76
|
-
done
|
|
77
|
-
|
|
78
|
-
# ─── Validation ───────────────────────────────────────────────────────────
|
|
79
|
-
|
|
80
|
-
if [[ -z "$OUTPUT_FILE" ]]; then
|
|
81
|
-
echo "Error: --output is required" >&2
|
|
82
|
-
exit 1
|
|
83
|
-
fi
|
|
84
|
-
|
|
85
|
-
if [[ -z "$EPICS_FILE" && -z "$TASKS_FILE" ]]; then
|
|
86
|
-
echo "Error: either --epics or --tasks is required" >&2
|
|
87
|
-
exit 1
|
|
88
|
-
fi
|
|
89
|
-
|
|
90
|
-
if [[ -n "$EPICS_FILE" && ! -f "$EPICS_FILE" ]]; then
|
|
91
|
-
echo "Error: epics file not found: $EPICS_FILE" >&2
|
|
92
|
-
exit 1
|
|
93
|
-
fi
|
|
94
|
-
|
|
95
|
-
if [[ -n "$TASKS_FILE" && ! -f "$TASKS_FILE" ]]; then
|
|
96
|
-
echo "Error: tasks file not found: $TASKS_FILE" >&2
|
|
97
|
-
exit 1
|
|
98
|
-
fi
|
|
99
|
-
|
|
100
|
-
# ─── Sprint Status Parsing ────────────────────────────────────────────────
|
|
101
|
-
|
|
102
|
-
# Parse sprint status YAML into a newline-delimited key=value store
|
|
103
|
-
# Maps story slug (e.g., "1-1-login-page") to status
|
|
104
|
-
SPRINT_STATUSES=""
|
|
105
|
-
|
|
106
|
-
parse_sprint_status() {
|
|
107
|
-
local file="$1"
|
|
108
|
-
if [[ ! -f "$file" ]]; then
|
|
109
|
-
return
|
|
110
|
-
fi
|
|
111
|
-
|
|
112
|
-
local in_dev_status=false
|
|
113
|
-
while IFS= read -r line; do
|
|
114
|
-
# Detect development_status section
|
|
115
|
-
if [[ "$line" =~ ^development_status: ]]; then
|
|
116
|
-
in_dev_status=true
|
|
117
|
-
continue
|
|
118
|
-
fi
|
|
119
|
-
|
|
120
|
-
# Exit section on non-indented line
|
|
121
|
-
if [[ "$in_dev_status" == "true" && -n "$line" && ! "$line" =~ ^[[:space:]] ]]; then
|
|
122
|
-
in_dev_status=false
|
|
123
|
-
continue
|
|
124
|
-
fi
|
|
125
|
-
|
|
126
|
-
if [[ "$in_dev_status" == "true" ]]; then
|
|
127
|
-
# Parse " key: value" lines
|
|
128
|
-
local key value
|
|
129
|
-
key=$(echo "$line" | sed 's/^[[:space:]]*//' | cut -d: -f1 | sed 's/[[:space:]]*$//')
|
|
130
|
-
value=$(echo "$line" | cut -d: -f2- | sed 's/^[[:space:]]*//')
|
|
131
|
-
|
|
132
|
-
if [[ -n "$key" && -n "$value" ]]; then
|
|
133
|
-
# Extract story number from slug: "1-1-login-page" -> "1.1"
|
|
134
|
-
if [[ "$key" =~ ^([0-9]+)-([0-9]+) ]]; then
|
|
135
|
-
local story_id="${BASH_REMATCH[1]}.${BASH_REMATCH[2]}"
|
|
136
|
-
SPRINT_STATUSES="${SPRINT_STATUSES}${story_id}=${value}
|
|
137
|
-
"
|
|
138
|
-
fi
|
|
139
|
-
fi
|
|
140
|
-
fi
|
|
141
|
-
done < "$file"
|
|
142
|
-
}
|
|
143
|
-
|
|
144
|
-
# Map BMAD status to Ralph task status
|
|
145
|
-
map_status() {
|
|
146
|
-
local bmad_status="$1"
|
|
147
|
-
case "$bmad_status" in
|
|
148
|
-
done) echo "complete" ;;
|
|
149
|
-
in-progress) echo "in_progress" ;;
|
|
150
|
-
review) echo "in_progress" ;;
|
|
151
|
-
ready-for-dev|backlog|"") echo "pending" ;;
|
|
152
|
-
*) echo "pending" ;;
|
|
153
|
-
esac
|
|
154
|
-
}
|
|
155
|
-
|
|
156
|
-
# ─── BMAD Epics Parsing ──────────────────────────────────────────────────
|
|
157
|
-
|
|
158
|
-
parse_epics() {
|
|
159
|
-
local epics_file="$1"
|
|
160
|
-
local output_file="$2"
|
|
161
|
-
|
|
162
|
-
# Load sprint status if provided
|
|
163
|
-
if [[ -n "$SPRINT_STATUS_FILE" ]]; then
|
|
164
|
-
parse_sprint_status "$SPRINT_STATUS_FILE"
|
|
165
|
-
fi
|
|
166
|
-
|
|
167
|
-
local tasks_json="[]"
|
|
168
|
-
local current_epic=""
|
|
169
|
-
local current_story_id=""
|
|
170
|
-
local current_story_title=""
|
|
171
|
-
local current_description=""
|
|
172
|
-
local current_ac=""
|
|
173
|
-
local in_story=false
|
|
174
|
-
local in_ac=false
|
|
175
|
-
local in_description=false
|
|
176
|
-
|
|
177
|
-
# Flush the current story into tasks_json
|
|
178
|
-
flush_story() {
|
|
179
|
-
if [[ -z "$current_story_id" ]]; then
|
|
180
|
-
return
|
|
181
|
-
fi
|
|
182
|
-
|
|
183
|
-
# Determine status from sprint status or default to pending
|
|
184
|
-
local status="pending"
|
|
185
|
-
local _sprint_val
|
|
186
|
-
_sprint_val=$(echo "$SPRINT_STATUSES" | grep "^${current_story_id}=" | head -1 | cut -d= -f2-)
|
|
187
|
-
if [[ -n "$_sprint_val" ]]; then
|
|
188
|
-
status=$(map_status "$_sprint_val")
|
|
189
|
-
fi
|
|
190
|
-
|
|
191
|
-
# Clean up description
|
|
192
|
-
current_description=$(echo "$current_description" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')
|
|
193
|
-
|
|
194
|
-
# Build acceptance criteria array from collected lines
|
|
195
|
-
local ac_array="[]"
|
|
196
|
-
if [[ -n "$current_ac" ]]; then
|
|
197
|
-
ac_array=$(echo "$current_ac" | while IFS= read -r ac_line; do
|
|
198
|
-
ac_line=$(echo "$ac_line" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')
|
|
199
|
-
if [[ -n "$ac_line" ]]; then
|
|
200
|
-
echo "$ac_line"
|
|
201
|
-
fi
|
|
202
|
-
done | jq -R '[inputs]' 2>/dev/null || echo '[]')
|
|
203
|
-
# Handle case where jq gets no input
|
|
204
|
-
if [[ -z "$ac_array" || "$ac_array" == "null" ]]; then
|
|
205
|
-
ac_array="[]"
|
|
206
|
-
fi
|
|
207
|
-
fi
|
|
208
|
-
|
|
209
|
-
# Build task JSON
|
|
210
|
-
local task
|
|
211
|
-
task=$(jq -n \
|
|
212
|
-
--arg id "$current_story_id" \
|
|
213
|
-
--arg title "$current_story_title" \
|
|
214
|
-
--arg epic "$current_epic" \
|
|
215
|
-
--arg description "$current_description" \
|
|
216
|
-
--arg status "$status" \
|
|
217
|
-
--argjson acceptance_criteria "$ac_array" \
|
|
218
|
-
--arg proof_path "verification/${current_story_id}-proof.md" \
|
|
219
|
-
'{
|
|
220
|
-
id: $id,
|
|
221
|
-
title: $title,
|
|
222
|
-
epic: $epic,
|
|
223
|
-
description: $description,
|
|
224
|
-
status: $status,
|
|
225
|
-
acceptance_criteria: $acceptance_criteria,
|
|
226
|
-
verification: {
|
|
227
|
-
proof_path: $proof_path,
|
|
228
|
-
observability: {
|
|
229
|
-
query_logs: true,
|
|
230
|
-
check_traces: true
|
|
231
|
-
},
|
|
232
|
-
showboat: {
|
|
233
|
-
required: true,
|
|
234
|
-
template: "templates/showboat-template.md"
|
|
235
|
-
}
|
|
236
|
-
}
|
|
237
|
-
}')
|
|
238
|
-
|
|
239
|
-
tasks_json=$(echo "$tasks_json" | jq --argjson task "$task" '. += [$task]')
|
|
240
|
-
|
|
241
|
-
# Reset
|
|
242
|
-
current_story_id=""
|
|
243
|
-
current_story_title=""
|
|
244
|
-
current_description=""
|
|
245
|
-
current_ac=""
|
|
246
|
-
in_story=false
|
|
247
|
-
in_ac=false
|
|
248
|
-
in_description=false
|
|
249
|
-
}
|
|
250
|
-
|
|
251
|
-
while IFS= read -r line; do
|
|
252
|
-
# Detect epic headers: ## Epic N: Title
|
|
253
|
-
if [[ "$line" =~ ^##[[:space:]]+Epic[[:space:]]+([0-9]+):[[:space:]]*(.*) ]]; then
|
|
254
|
-
flush_story
|
|
255
|
-
current_epic="Epic ${BASH_REMATCH[1]}: ${BASH_REMATCH[2]}"
|
|
256
|
-
continue
|
|
257
|
-
fi
|
|
258
|
-
|
|
259
|
-
# Detect story headers: ### Story N.M: Title
|
|
260
|
-
if [[ "$line" =~ ^###[[:space:]]+Story[[:space:]]+([0-9]+\.[0-9]+):[[:space:]]*(.*) ]]; then
|
|
261
|
-
flush_story
|
|
262
|
-
current_story_id="${BASH_REMATCH[1]}"
|
|
263
|
-
current_story_title="${BASH_REMATCH[2]}"
|
|
264
|
-
in_story=true
|
|
265
|
-
in_description=true
|
|
266
|
-
in_ac=false
|
|
267
|
-
continue
|
|
268
|
-
fi
|
|
269
|
-
|
|
270
|
-
# Skip if not in a story
|
|
271
|
-
if [[ "$in_story" != "true" ]]; then
|
|
272
|
-
continue
|
|
273
|
-
fi
|
|
274
|
-
|
|
275
|
-
# Detect acceptance criteria section
|
|
276
|
-
if [[ "$line" =~ ^\*\*Acceptance[[:space:]]+Criteria ]]; then
|
|
277
|
-
in_description=false
|
|
278
|
-
in_ac=true
|
|
279
|
-
continue
|
|
280
|
-
fi
|
|
281
|
-
|
|
282
|
-
# Collect description (user story lines: As a / I want / So that)
|
|
283
|
-
if [[ "$in_description" == "true" ]]; then
|
|
284
|
-
if [[ "$line" =~ ^(As[[:space:]]a|I[[:space:]]want|So[[:space:]]that) ]]; then
|
|
285
|
-
if [[ -n "$current_description" ]]; then
|
|
286
|
-
current_description+=" "
|
|
287
|
-
fi
|
|
288
|
-
current_description+="$line"
|
|
289
|
-
fi
|
|
290
|
-
continue
|
|
291
|
-
fi
|
|
292
|
-
|
|
293
|
-
# Collect acceptance criteria lines
|
|
294
|
-
if [[ "$in_ac" == "true" ]]; then
|
|
295
|
-
# **Given**, **When**, **Then**, **And** lines
|
|
296
|
-
if [[ "$line" =~ ^\*\*(Given|When|Then|And)\*\*[[:space:]]*(.*) ]]; then
|
|
297
|
-
local keyword="${BASH_REMATCH[1]}"
|
|
298
|
-
local rest="${BASH_REMATCH[2]}"
|
|
299
|
-
current_ac+="${keyword} ${rest}"$'\n'
|
|
300
|
-
fi
|
|
301
|
-
continue
|
|
302
|
-
fi
|
|
303
|
-
done < "$epics_file"
|
|
304
|
-
|
|
305
|
-
# Flush last story
|
|
306
|
-
flush_story
|
|
307
|
-
|
|
308
|
-
# Build final output
|
|
309
|
-
local generated_at
|
|
310
|
-
generated_at=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
|
311
|
-
|
|
312
|
-
jq -n \
|
|
313
|
-
--arg generated_at "$generated_at" \
|
|
314
|
-
--arg source "$epics_file" \
|
|
315
|
-
--argjson tasks "$tasks_json" \
|
|
316
|
-
'{
|
|
317
|
-
generated_at: $generated_at,
|
|
318
|
-
source: $source,
|
|
319
|
-
tasks: $tasks
|
|
320
|
-
}' > "$output_file"
|
|
321
|
-
}
|
|
322
|
-
|
|
323
|
-
# ─── Standalone Tasks Parsing ─────────────────────────────────────────────
|
|
324
|
-
|
|
325
|
-
parse_standalone_tasks() {
|
|
326
|
-
local tasks_file="$1"
|
|
327
|
-
local output_file="$2"
|
|
328
|
-
|
|
329
|
-
local tasks_json="[]"
|
|
330
|
-
local task_num=0
|
|
331
|
-
|
|
332
|
-
while IFS= read -r line; do
|
|
333
|
-
# Skip empty lines
|
|
334
|
-
[[ -z "$line" ]] && continue
|
|
335
|
-
|
|
336
|
-
# Parse markdown checklist: - [ ] or - [x]
|
|
337
|
-
if [[ "$line" =~ ^[[:space:]]*-[[:space:]]+\[([[:space:]xX])\][[:space:]]+(.*) ]]; then
|
|
338
|
-
task_num=$((task_num + 1))
|
|
339
|
-
local check="${BASH_REMATCH[1]}"
|
|
340
|
-
local title="${BASH_REMATCH[2]}"
|
|
341
|
-
local status="pending"
|
|
342
|
-
if [[ "$check" == "x" || "$check" == "X" ]]; then
|
|
343
|
-
status="complete"
|
|
344
|
-
fi
|
|
345
|
-
|
|
346
|
-
local task
|
|
347
|
-
task=$(jq -n \
|
|
348
|
-
--arg id "T-$(printf '%03d' $task_num)" \
|
|
349
|
-
--arg title "$title" \
|
|
350
|
-
--arg status "$status" \
|
|
351
|
-
'{
|
|
352
|
-
id: $id,
|
|
353
|
-
title: $title,
|
|
354
|
-
epic: "Standalone",
|
|
355
|
-
description: $title,
|
|
356
|
-
status: $status,
|
|
357
|
-
acceptance_criteria: [],
|
|
358
|
-
verification: {
|
|
359
|
-
proof_path: ("verification/" + $id + "-proof.md"),
|
|
360
|
-
observability: { query_logs: true, check_traces: true },
|
|
361
|
-
showboat: { required: true, template: "templates/showboat-template.md" }
|
|
362
|
-
}
|
|
363
|
-
}')
|
|
364
|
-
|
|
365
|
-
tasks_json=$(echo "$tasks_json" | jq --argjson task "$task" '. += [$task]')
|
|
366
|
-
continue
|
|
367
|
-
fi
|
|
368
|
-
|
|
369
|
-
# Plain text: one task per non-empty line
|
|
370
|
-
if [[ ! "$line" =~ ^[[:space:]]*# ]]; then
|
|
371
|
-
task_num=$((task_num + 1))
|
|
372
|
-
local title
|
|
373
|
-
title=$(echo "$line" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')
|
|
374
|
-
|
|
375
|
-
if [[ -n "$title" ]]; then
|
|
376
|
-
local task
|
|
377
|
-
task=$(jq -n \
|
|
378
|
-
--arg id "T-$(printf '%03d' $task_num)" \
|
|
379
|
-
--arg title "$title" \
|
|
380
|
-
--arg status "pending" \
|
|
381
|
-
'{
|
|
382
|
-
id: $id,
|
|
383
|
-
title: $title,
|
|
384
|
-
epic: "Standalone",
|
|
385
|
-
description: $title,
|
|
386
|
-
status: $status,
|
|
387
|
-
acceptance_criteria: [],
|
|
388
|
-
verification: {
|
|
389
|
-
proof_path: ("verification/" + $id + "-proof.md"),
|
|
390
|
-
observability: { query_logs: true, check_traces: true },
|
|
391
|
-
showboat: { required: true, template: "templates/showboat-template.md" }
|
|
392
|
-
}
|
|
393
|
-
}')
|
|
394
|
-
|
|
395
|
-
tasks_json=$(echo "$tasks_json" | jq --argjson task "$task" '. += [$task]')
|
|
396
|
-
fi
|
|
397
|
-
fi
|
|
398
|
-
done < "$tasks_file"
|
|
399
|
-
|
|
400
|
-
local generated_at
|
|
401
|
-
generated_at=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
|
402
|
-
|
|
403
|
-
jq -n \
|
|
404
|
-
--arg generated_at "$generated_at" \
|
|
405
|
-
--arg source "$tasks_file" \
|
|
406
|
-
--argjson tasks "$tasks_json" \
|
|
407
|
-
'{
|
|
408
|
-
generated_at: $generated_at,
|
|
409
|
-
source: $source,
|
|
410
|
-
tasks: $tasks
|
|
411
|
-
}' > "$output_file"
|
|
412
|
-
}
|
|
413
|
-
|
|
414
|
-
# ─── Main ─────────────────────────────────────────────────────────────────
|
|
415
|
-
|
|
416
|
-
mkdir -p "$(dirname "$OUTPUT_FILE")"
|
|
417
|
-
|
|
418
|
-
if [[ -n "$EPICS_FILE" ]]; then
|
|
419
|
-
parse_epics "$EPICS_FILE" "$OUTPUT_FILE"
|
|
420
|
-
elif [[ -n "$TASKS_FILE" ]]; then
|
|
421
|
-
parse_standalone_tasks "$TASKS_FILE" "$OUTPUT_FILE"
|
|
422
|
-
fi
|
|
423
|
-
|
|
424
|
-
echo "[OK] Bridge: $(jq '.tasks | length' "$OUTPUT_FILE") tasks generated → $OUTPUT_FILE"
|
package/ralph/db_schema_gen.sh
DELETED
|
@@ -1,109 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env bash
|
|
2
|
-
# db_schema_gen.sh — Generate docs/generated/db-schema.md from schema JSON
|
|
3
|
-
# Schema JSON can come from DB MCP queries or be provided directly.
|
|
4
|
-
#
|
|
5
|
-
# Usage: ralph/db_schema_gen.sh --project-dir DIR --schema-file PATH
|
|
6
|
-
|
|
7
|
-
set -e
|
|
8
|
-
|
|
9
|
-
PROJECT_DIR=""
|
|
10
|
-
SCHEMA_FILE=""
|
|
11
|
-
|
|
12
|
-
show_help() {
|
|
13
|
-
cat << 'HELPEOF'
|
|
14
|
-
DB Schema Generator — create db-schema.md from schema data
|
|
15
|
-
|
|
16
|
-
Usage:
|
|
17
|
-
ralph/db_schema_gen.sh --project-dir DIR --schema-file PATH
|
|
18
|
-
|
|
19
|
-
The schema file should be JSON with this structure:
|
|
20
|
-
{"tables": [{"name": "...", "columns": [{"name": "...", "type": "...", ...}]}]}
|
|
21
|
-
|
|
22
|
-
Options:
|
|
23
|
-
--project-dir DIR Project root directory
|
|
24
|
-
--schema-file PATH Path to schema JSON file (from DB MCP or manual)
|
|
25
|
-
-h, --help Show this help message
|
|
26
|
-
HELPEOF
|
|
27
|
-
}
|
|
28
|
-
|
|
29
|
-
while [[ $# -gt 0 ]]; do
|
|
30
|
-
case $1 in
|
|
31
|
-
-h|--help)
|
|
32
|
-
show_help
|
|
33
|
-
exit 0
|
|
34
|
-
;;
|
|
35
|
-
--project-dir)
|
|
36
|
-
PROJECT_DIR="$2"
|
|
37
|
-
shift 2
|
|
38
|
-
;;
|
|
39
|
-
--schema-file)
|
|
40
|
-
SCHEMA_FILE="$2"
|
|
41
|
-
shift 2
|
|
42
|
-
;;
|
|
43
|
-
*)
|
|
44
|
-
echo "Unknown option: $1" >&2
|
|
45
|
-
exit 1
|
|
46
|
-
;;
|
|
47
|
-
esac
|
|
48
|
-
done
|
|
49
|
-
|
|
50
|
-
if [[ -z "$PROJECT_DIR" ]]; then
|
|
51
|
-
echo "Error: --project-dir is required" >&2
|
|
52
|
-
exit 1
|
|
53
|
-
fi
|
|
54
|
-
|
|
55
|
-
if [[ -z "$SCHEMA_FILE" ]]; then
|
|
56
|
-
echo "Error: --schema-file is required" >&2
|
|
57
|
-
exit 1
|
|
58
|
-
fi
|
|
59
|
-
|
|
60
|
-
if [[ ! -f "$SCHEMA_FILE" ]]; then
|
|
61
|
-
echo "Error: schema file not found: $SCHEMA_FILE" >&2
|
|
62
|
-
exit 1
|
|
63
|
-
fi
|
|
64
|
-
|
|
65
|
-
# ─── Generate ─────────────────────────────────────────────────────────────
|
|
66
|
-
|
|
67
|
-
OUTPUT_FILE="$PROJECT_DIR/docs/generated/db-schema.md"
|
|
68
|
-
mkdir -p "$(dirname "$OUTPUT_FILE")"
|
|
69
|
-
|
|
70
|
-
timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
|
71
|
-
table_count=$(jq '.tables | length' "$SCHEMA_FILE")
|
|
72
|
-
|
|
73
|
-
{
|
|
74
|
-
echo "<!-- DO NOT EDIT MANUALLY — generated by db_schema_gen.sh -->"
|
|
75
|
-
echo ""
|
|
76
|
-
echo "# Database Schema"
|
|
77
|
-
echo ""
|
|
78
|
-
echo "**Generated:** $timestamp"
|
|
79
|
-
echo "**Tables:** $table_count"
|
|
80
|
-
echo ""
|
|
81
|
-
|
|
82
|
-
for ((i=0; i<table_count; i++)); do
|
|
83
|
-
table_name=$(jq -r ".tables[$i].name" "$SCHEMA_FILE")
|
|
84
|
-
echo "## $table_name"
|
|
85
|
-
echo ""
|
|
86
|
-
echo "| Column | Type | Nullable | Key | References |"
|
|
87
|
-
echo "|--------|------|----------|-----|------------|"
|
|
88
|
-
|
|
89
|
-
col_count=$(jq ".tables[$i].columns | length" "$SCHEMA_FILE")
|
|
90
|
-
for ((j=0; j<col_count; j++)); do
|
|
91
|
-
col_name=$(jq -r ".tables[$i].columns[$j].name" "$SCHEMA_FILE")
|
|
92
|
-
col_type=$(jq -r ".tables[$i].columns[$j].type" "$SCHEMA_FILE")
|
|
93
|
-
nullable=$(jq -r ".tables[$i].columns[$j].nullable // true" "$SCHEMA_FILE")
|
|
94
|
-
pk=$(jq -r ".tables[$i].columns[$j].primary_key // false" "$SCHEMA_FILE")
|
|
95
|
-
refs=$(jq -r ".tables[$i].columns[$j].references // \"-\"" "$SCHEMA_FILE")
|
|
96
|
-
|
|
97
|
-
nullable_text="YES"
|
|
98
|
-
[[ "$nullable" == "false" ]] && nullable_text="NO"
|
|
99
|
-
|
|
100
|
-
key_text="-"
|
|
101
|
-
[[ "$pk" == "true" ]] && key_text="PK"
|
|
102
|
-
|
|
103
|
-
echo "| $col_name | $col_type | $nullable_text | $key_text | $refs |"
|
|
104
|
-
done
|
|
105
|
-
echo ""
|
|
106
|
-
done
|
|
107
|
-
} > "$OUTPUT_FILE"
|
|
108
|
-
|
|
109
|
-
echo "[OK] DB schema: $table_count tables → $OUTPUT_FILE"
|
|
@@ -1,140 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env bash
|
|
2
|
-
# Claude Code driver for codeharness Ralph loop
|
|
3
|
-
# Handles instance lifecycle: spawn, monitor, terminate
|
|
4
|
-
# Each iteration gets a fresh Claude Code instance with the codeharness plugin
|
|
5
|
-
|
|
6
|
-
# Driver identification
|
|
7
|
-
driver_name() {
|
|
8
|
-
echo "claude-code"
|
|
9
|
-
}
|
|
10
|
-
|
|
11
|
-
driver_display_name() {
|
|
12
|
-
echo "Claude Code"
|
|
13
|
-
}
|
|
14
|
-
|
|
15
|
-
driver_cli_binary() {
|
|
16
|
-
echo "claude"
|
|
17
|
-
}
|
|
18
|
-
|
|
19
|
-
driver_min_version() {
|
|
20
|
-
echo "2.0.76"
|
|
21
|
-
}
|
|
22
|
-
|
|
23
|
-
# Check if the CLI binary is available
|
|
24
|
-
driver_check_available() {
|
|
25
|
-
command -v "$(driver_cli_binary)" &>/dev/null
|
|
26
|
-
}
|
|
27
|
-
|
|
28
|
-
# Valid tool patterns for --allowedTools validation
|
|
29
|
-
driver_valid_tools() {
|
|
30
|
-
VALID_TOOL_PATTERNS=(
|
|
31
|
-
"Write"
|
|
32
|
-
"Read"
|
|
33
|
-
"Edit"
|
|
34
|
-
"MultiEdit"
|
|
35
|
-
"Glob"
|
|
36
|
-
"Grep"
|
|
37
|
-
"Task"
|
|
38
|
-
"TodoWrite"
|
|
39
|
-
"WebFetch"
|
|
40
|
-
"WebSearch"
|
|
41
|
-
"Bash"
|
|
42
|
-
"Bash(git *)"
|
|
43
|
-
"Bash(npm *)"
|
|
44
|
-
"Bash(npx *)"
|
|
45
|
-
"Bash(bats *)"
|
|
46
|
-
"Bash(python *)"
|
|
47
|
-
"Bash(node *)"
|
|
48
|
-
"Bash(showboat *)"
|
|
49
|
-
"Bash(codeharness *)"
|
|
50
|
-
"NotebookEdit"
|
|
51
|
-
"Skill"
|
|
52
|
-
"Agent"
|
|
53
|
-
"ToolSearch"
|
|
54
|
-
)
|
|
55
|
-
}
|
|
56
|
-
|
|
57
|
-
# Build the CLI command arguments
|
|
58
|
-
# Populates global CLAUDE_CMD_ARGS array
|
|
59
|
-
# Parameters:
|
|
60
|
-
# $1 - prompt_file: path to the prompt file
|
|
61
|
-
# $2 - loop_context: context string for session continuity
|
|
62
|
-
# $3 - session_id: session ID for resume (empty for new session)
|
|
63
|
-
# $4 - plugin_dir: plugin directory (for --plugin-dir flag)
|
|
64
|
-
driver_build_command() {
|
|
65
|
-
local prompt_file=$1
|
|
66
|
-
local loop_context=$2
|
|
67
|
-
local session_id=$3
|
|
68
|
-
local plugin_dir=${4:-""}
|
|
69
|
-
|
|
70
|
-
CLAUDE_CMD_ARGS=("$(driver_cli_binary)")
|
|
71
|
-
|
|
72
|
-
if [[ ! -f "$prompt_file" ]]; then
|
|
73
|
-
echo "ERROR: Prompt file not found: $prompt_file" >&2
|
|
74
|
-
return 1
|
|
75
|
-
fi
|
|
76
|
-
|
|
77
|
-
# Plugin directory
|
|
78
|
-
if [[ -n "$plugin_dir" ]]; then
|
|
79
|
-
CLAUDE_CMD_ARGS+=("--plugin-dir" "$plugin_dir")
|
|
80
|
-
fi
|
|
81
|
-
|
|
82
|
-
# Output format — always stream-json for real-time NDJSON output
|
|
83
|
-
CLAUDE_CMD_ARGS+=("--output-format" "stream-json" "--verbose" "--include-partial-messages")
|
|
84
|
-
|
|
85
|
-
# Allowed tools
|
|
86
|
-
if [[ -n "$CLAUDE_ALLOWED_TOOLS" ]]; then
|
|
87
|
-
CLAUDE_CMD_ARGS+=("--allowedTools")
|
|
88
|
-
local IFS=','
|
|
89
|
-
read -ra tools_array <<< "$CLAUDE_ALLOWED_TOOLS"
|
|
90
|
-
for tool in "${tools_array[@]}"; do
|
|
91
|
-
tool=$(echo "$tool" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')
|
|
92
|
-
if [[ -n "$tool" ]]; then
|
|
93
|
-
CLAUDE_CMD_ARGS+=("$tool")
|
|
94
|
-
fi
|
|
95
|
-
done
|
|
96
|
-
fi
|
|
97
|
-
|
|
98
|
-
# Session resume
|
|
99
|
-
if [[ "$CLAUDE_USE_CONTINUE" == "true" && -n "$session_id" ]]; then
|
|
100
|
-
CLAUDE_CMD_ARGS+=("--resume" "$session_id")
|
|
101
|
-
fi
|
|
102
|
-
|
|
103
|
-
# Loop context as system prompt
|
|
104
|
-
if [[ -n "$loop_context" ]]; then
|
|
105
|
-
CLAUDE_CMD_ARGS+=("--append-system-prompt" "$loop_context")
|
|
106
|
-
fi
|
|
107
|
-
|
|
108
|
-
# Prompt content
|
|
109
|
-
local prompt_content
|
|
110
|
-
prompt_content=$(cat "$prompt_file")
|
|
111
|
-
CLAUDE_CMD_ARGS+=("-p" "$prompt_content")
|
|
112
|
-
}
|
|
113
|
-
|
|
114
|
-
# Whether this driver supports session continuity
|
|
115
|
-
driver_supports_sessions() {
|
|
116
|
-
return 0
|
|
117
|
-
}
|
|
118
|
-
|
|
119
|
-
# Claude Code supports stream-json live output
|
|
120
|
-
driver_supports_live_output() {
|
|
121
|
-
return 0
|
|
122
|
-
}
|
|
123
|
-
|
|
124
|
-
# Stream filter for raw Claude stream-json events
|
|
125
|
-
driver_stream_filter() {
|
|
126
|
-
echo '
|
|
127
|
-
if .type == "stream_event" then
|
|
128
|
-
if .event.type == "content_block_delta" and .event.delta.type == "text_delta" then
|
|
129
|
-
.event.delta.text
|
|
130
|
-
elif .event.type == "content_block_start" and .event.content_block.type == "tool_use" then
|
|
131
|
-
"\n\n⚡ [" + .event.content_block.name + "]\n"
|
|
132
|
-
elif .event.type == "content_block_stop" then
|
|
133
|
-
"\n"
|
|
134
|
-
else
|
|
135
|
-
empty
|
|
136
|
-
end
|
|
137
|
-
else
|
|
138
|
-
empty
|
|
139
|
-
end'
|
|
140
|
-
}
|