codeharness 0.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/codeharness +9 -0
- package/dist/chunk-7ZD2ZNDU.js +540 -0
- package/dist/docker-CT57JGM7.js +33 -0
- package/dist/index.js +6104 -0
- package/package.json +39 -0
- package/ralph/AGENTS.md +38 -0
- package/ralph/bridge.sh +421 -0
- package/ralph/db_schema_gen.sh +109 -0
- package/ralph/doc_gardener.sh +352 -0
- package/ralph/drivers/claude-code.sh +160 -0
- package/ralph/exec_plans.sh +252 -0
- package/ralph/harness_status.sh +156 -0
- package/ralph/lib/circuit_breaker.sh +210 -0
- package/ralph/lib/date_utils.sh +60 -0
- package/ralph/lib/timeout_utils.sh +77 -0
- package/ralph/onboard.sh +83 -0
- package/ralph/ralph.sh +1006 -0
- package/ralph/retro.sh +298 -0
- package/ralph/validate_epic_docs.sh +129 -0
- package/ralph/verify_gates.sh +241 -0
package/package.json
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "codeharness",
|
|
3
|
+
"version": "0.6.1",
|
|
4
|
+
"type": "module",
|
|
5
|
+
"description": "CLI for codeharness — makes autonomous coding agents produce software that actually works",
|
|
6
|
+
"bin": {
|
|
7
|
+
"codeharness": "dist/index.js"
|
|
8
|
+
},
|
|
9
|
+
"files": [
|
|
10
|
+
"dist",
|
|
11
|
+
"bin",
|
|
12
|
+
"ralph/**/*.sh",
|
|
13
|
+
"ralph/AGENTS.md"
|
|
14
|
+
],
|
|
15
|
+
"engines": {
|
|
16
|
+
"node": ">=18"
|
|
17
|
+
},
|
|
18
|
+
"scripts": {
|
|
19
|
+
"build": "tsup",
|
|
20
|
+
"test": "bats tests/",
|
|
21
|
+
"test:unit": "vitest run",
|
|
22
|
+
"test:coverage": "vitest run --coverage"
|
|
23
|
+
},
|
|
24
|
+
"dependencies": {
|
|
25
|
+
"commander": "^14.0.3",
|
|
26
|
+
"yaml": "^2.8.2"
|
|
27
|
+
},
|
|
28
|
+
"devDependencies": {
|
|
29
|
+
"@opentelemetry/auto-instrumentations-node": "^0.71.0",
|
|
30
|
+
"@opentelemetry/exporter-logs-otlp-http": "^0.213.0",
|
|
31
|
+
"@opentelemetry/exporter-metrics-otlp-http": "^0.213.0",
|
|
32
|
+
"@opentelemetry/exporter-trace-otlp-http": "^0.213.0",
|
|
33
|
+
"@types/node": "^25.5.0",
|
|
34
|
+
"@vitest/coverage-v8": "^4.1.0",
|
|
35
|
+
"tsup": "^8.5.1",
|
|
36
|
+
"typescript": "^5.9.3",
|
|
37
|
+
"vitest": "^4.1.0"
|
|
38
|
+
}
|
|
39
|
+
}
|
package/ralph/AGENTS.md
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
# ralph/
|
|
2
|
+
|
|
3
|
+
Vendored autonomous execution loop. Spawns fresh Claude Code instances per iteration with verification gates, circuit breaker protection, and crash recovery.
|
|
4
|
+
|
|
5
|
+
## Key Files
|
|
6
|
+
|
|
7
|
+
| File | Purpose |
|
|
8
|
+
|------|---------|
|
|
9
|
+
| ralph.sh | Core loop — iteration, termination, rate limiting |
|
|
10
|
+
| bridge.sh | BMAD→Ralph task bridge — converts epics to progress.json |
|
|
11
|
+
| verify_gates.sh | Per-story verification gate checks (4 gates) |
|
|
12
|
+
| drivers/claude-code.sh | Claude Code instance lifecycle and command building |
|
|
13
|
+
| lib/date_utils.sh | Cross-platform date/timestamp utilities |
|
|
14
|
+
| lib/timeout_utils.sh | Cross-platform timeout command detection |
|
|
15
|
+
| lib/circuit_breaker.sh | Stagnation detection (CLOSED→HALF_OPEN→OPEN) |
|
|
16
|
+
|
|
17
|
+
## Dependencies
|
|
18
|
+
|
|
19
|
+
- `jq`: JSON processing for progress/status files
|
|
20
|
+
- `gtimeout`/`timeout`: Per-iteration timeout protection
|
|
21
|
+
- `git`: Progress detection via commit diff
|
|
22
|
+
|
|
23
|
+
## Conventions
|
|
24
|
+
|
|
25
|
+
- All scripts use `set -e` and are POSIX-compatible bash
|
|
26
|
+
- Driver pattern: `drivers/{name}.sh` implements the driver interface
|
|
27
|
+
- State files: `status.json` (loop state), `progress.json` (task tracking)
|
|
28
|
+
- Logs written to `logs/ralph.log`
|
|
29
|
+
- Scripts guard main execution with `[[ "${BASH_SOURCE[0]}" == "${0}" ]]`
|
|
30
|
+
|
|
31
|
+
## Testing
|
|
32
|
+
|
|
33
|
+
```bash
|
|
34
|
+
bats tests/ # All tests
|
|
35
|
+
bats tests/ralph_core.bats # Core loop functions
|
|
36
|
+
bats tests/bridge.bats # Bridge script
|
|
37
|
+
bats tests/verify_gates.bats # Verification gates
|
|
38
|
+
```
|
package/ralph/bridge.sh
ADDED
|
@@ -0,0 +1,421 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# bridge.sh — BMAD→Ralph Task Bridge
|
|
3
|
+
# Converts BMAD epics/stories to Ralph execution tasks with verification requirements.
|
|
4
|
+
# Produces ralph/progress.json consumed by ralph.sh loop.
|
|
5
|
+
#
|
|
6
|
+
# Usage:
|
|
7
|
+
# ralph/bridge.sh --epics PATH --output PATH [--sprint-status PATH]
|
|
8
|
+
# ralph/bridge.sh --tasks PATH --output PATH (standalone mode)
|
|
9
|
+
|
|
10
|
+
set -e
|
|
11
|
+
|
|
12
|
+
# ─── CLI Arguments ────────────────────────────────────────────────────────
|
|
13
|
+
|
|
14
|
+
EPICS_FILE=""
|
|
15
|
+
SPRINT_STATUS_FILE=""
|
|
16
|
+
TASKS_FILE=""
|
|
17
|
+
OUTPUT_FILE=""
|
|
18
|
+
|
|
19
|
+
show_help() {
|
|
20
|
+
cat << 'HELPEOF'
|
|
21
|
+
BMAD→Ralph Task Bridge — converts stories to execution tasks
|
|
22
|
+
|
|
23
|
+
Usage:
|
|
24
|
+
ralph/bridge.sh --epics PATH --output PATH [OPTIONS]
|
|
25
|
+
ralph/bridge.sh --tasks PATH --output PATH (standalone mode)
|
|
26
|
+
|
|
27
|
+
BMAD Mode:
|
|
28
|
+
--epics PATH Path to BMAD epics.md file
|
|
29
|
+
--sprint-status PATH Path to sprint-status.yaml (optional, maps story states)
|
|
30
|
+
--output PATH Output path for progress.json
|
|
31
|
+
|
|
32
|
+
Standalone Mode:
|
|
33
|
+
--tasks PATH Path to markdown checklist or plain text task list
|
|
34
|
+
--output PATH Output path for progress.json
|
|
35
|
+
|
|
36
|
+
Options:
|
|
37
|
+
-h, --help Show this help message
|
|
38
|
+
|
|
39
|
+
The bridge parses BMAD stories and produces ralph/progress.json with:
|
|
40
|
+
- Story ID, title, epic, description, acceptance criteria
|
|
41
|
+
- Verification requirements (proof path, observability)
|
|
42
|
+
- Task status mapped from sprint status (or default: pending)
|
|
43
|
+
HELPEOF
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
# ─── Parse Arguments ──────────────────────────────────────────────────────
|
|
47
|
+
|
|
48
|
+
while [[ $# -gt 0 ]]; do
|
|
49
|
+
case $1 in
|
|
50
|
+
-h|--help)
|
|
51
|
+
show_help
|
|
52
|
+
exit 0
|
|
53
|
+
;;
|
|
54
|
+
--epics)
|
|
55
|
+
EPICS_FILE="$2"
|
|
56
|
+
shift 2
|
|
57
|
+
;;
|
|
58
|
+
--sprint-status)
|
|
59
|
+
SPRINT_STATUS_FILE="$2"
|
|
60
|
+
shift 2
|
|
61
|
+
;;
|
|
62
|
+
--tasks)
|
|
63
|
+
TASKS_FILE="$2"
|
|
64
|
+
shift 2
|
|
65
|
+
;;
|
|
66
|
+
--output)
|
|
67
|
+
OUTPUT_FILE="$2"
|
|
68
|
+
shift 2
|
|
69
|
+
;;
|
|
70
|
+
*)
|
|
71
|
+
echo "Unknown option: $1" >&2
|
|
72
|
+
show_help
|
|
73
|
+
exit 1
|
|
74
|
+
;;
|
|
75
|
+
esac
|
|
76
|
+
done
|
|
77
|
+
|
|
78
|
+
# ─── Validation ───────────────────────────────────────────────────────────
|
|
79
|
+
|
|
80
|
+
if [[ -z "$OUTPUT_FILE" ]]; then
|
|
81
|
+
echo "Error: --output is required" >&2
|
|
82
|
+
exit 1
|
|
83
|
+
fi
|
|
84
|
+
|
|
85
|
+
if [[ -z "$EPICS_FILE" && -z "$TASKS_FILE" ]]; then
|
|
86
|
+
echo "Error: either --epics or --tasks is required" >&2
|
|
87
|
+
exit 1
|
|
88
|
+
fi
|
|
89
|
+
|
|
90
|
+
if [[ -n "$EPICS_FILE" && ! -f "$EPICS_FILE" ]]; then
|
|
91
|
+
echo "Error: epics file not found: $EPICS_FILE" >&2
|
|
92
|
+
exit 1
|
|
93
|
+
fi
|
|
94
|
+
|
|
95
|
+
if [[ -n "$TASKS_FILE" && ! -f "$TASKS_FILE" ]]; then
|
|
96
|
+
echo "Error: tasks file not found: $TASKS_FILE" >&2
|
|
97
|
+
exit 1
|
|
98
|
+
fi
|
|
99
|
+
|
|
100
|
+
# ─── Sprint Status Parsing ────────────────────────────────────────────────
|
|
101
|
+
|
|
102
|
+
# Parse sprint status YAML into an associative array
|
|
103
|
+
# Maps story slug (e.g., "1-1-login-page") to status
|
|
104
|
+
declare -A SPRINT_STATUSES
|
|
105
|
+
|
|
106
|
+
parse_sprint_status() {
|
|
107
|
+
local file="$1"
|
|
108
|
+
if [[ ! -f "$file" ]]; then
|
|
109
|
+
return
|
|
110
|
+
fi
|
|
111
|
+
|
|
112
|
+
local in_dev_status=false
|
|
113
|
+
while IFS= read -r line; do
|
|
114
|
+
# Detect development_status section
|
|
115
|
+
if [[ "$line" =~ ^development_status: ]]; then
|
|
116
|
+
in_dev_status=true
|
|
117
|
+
continue
|
|
118
|
+
fi
|
|
119
|
+
|
|
120
|
+
# Exit section on non-indented line
|
|
121
|
+
if [[ "$in_dev_status" == "true" && -n "$line" && ! "$line" =~ ^[[:space:]] ]]; then
|
|
122
|
+
in_dev_status=false
|
|
123
|
+
continue
|
|
124
|
+
fi
|
|
125
|
+
|
|
126
|
+
if [[ "$in_dev_status" == "true" ]]; then
|
|
127
|
+
# Parse " key: value" lines
|
|
128
|
+
local key value
|
|
129
|
+
key=$(echo "$line" | sed 's/^[[:space:]]*//' | cut -d: -f1 | sed 's/[[:space:]]*$//')
|
|
130
|
+
value=$(echo "$line" | cut -d: -f2- | sed 's/^[[:space:]]*//')
|
|
131
|
+
|
|
132
|
+
if [[ -n "$key" && -n "$value" ]]; then
|
|
133
|
+
# Extract story number from slug: "1-1-login-page" -> "1.1"
|
|
134
|
+
if [[ "$key" =~ ^([0-9]+)-([0-9]+) ]]; then
|
|
135
|
+
local story_id="${BASH_REMATCH[1]}.${BASH_REMATCH[2]}"
|
|
136
|
+
SPRINT_STATUSES["$story_id"]="$value"
|
|
137
|
+
fi
|
|
138
|
+
fi
|
|
139
|
+
fi
|
|
140
|
+
done < "$file"
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
# Map BMAD status to Ralph task status
|
|
144
|
+
map_status() {
|
|
145
|
+
local bmad_status="$1"
|
|
146
|
+
case "$bmad_status" in
|
|
147
|
+
done) echo "complete" ;;
|
|
148
|
+
in-progress) echo "in_progress" ;;
|
|
149
|
+
review) echo "in_progress" ;;
|
|
150
|
+
ready-for-dev|backlog|"") echo "pending" ;;
|
|
151
|
+
*) echo "pending" ;;
|
|
152
|
+
esac
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
# ─── BMAD Epics Parsing ──────────────────────────────────────────────────
|
|
156
|
+
|
|
157
|
+
parse_epics() {
|
|
158
|
+
local epics_file="$1"
|
|
159
|
+
local output_file="$2"
|
|
160
|
+
|
|
161
|
+
# Load sprint status if provided
|
|
162
|
+
if [[ -n "$SPRINT_STATUS_FILE" ]]; then
|
|
163
|
+
parse_sprint_status "$SPRINT_STATUS_FILE"
|
|
164
|
+
fi
|
|
165
|
+
|
|
166
|
+
local tasks_json="[]"
|
|
167
|
+
local current_epic=""
|
|
168
|
+
local current_story_id=""
|
|
169
|
+
local current_story_title=""
|
|
170
|
+
local current_description=""
|
|
171
|
+
local current_ac=""
|
|
172
|
+
local in_story=false
|
|
173
|
+
local in_ac=false
|
|
174
|
+
local in_description=false
|
|
175
|
+
|
|
176
|
+
# Flush the current story into tasks_json
|
|
177
|
+
flush_story() {
|
|
178
|
+
if [[ -z "$current_story_id" ]]; then
|
|
179
|
+
return
|
|
180
|
+
fi
|
|
181
|
+
|
|
182
|
+
# Determine status from sprint status or default to pending
|
|
183
|
+
local status="pending"
|
|
184
|
+
if [[ -n "${SPRINT_STATUSES[$current_story_id]:-}" ]]; then
|
|
185
|
+
status=$(map_status "${SPRINT_STATUSES[$current_story_id]}")
|
|
186
|
+
fi
|
|
187
|
+
|
|
188
|
+
# Clean up description
|
|
189
|
+
current_description=$(echo "$current_description" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')
|
|
190
|
+
|
|
191
|
+
# Build acceptance criteria array from collected lines
|
|
192
|
+
local ac_array="[]"
|
|
193
|
+
if [[ -n "$current_ac" ]]; then
|
|
194
|
+
ac_array=$(echo "$current_ac" | while IFS= read -r ac_line; do
|
|
195
|
+
ac_line=$(echo "$ac_line" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')
|
|
196
|
+
if [[ -n "$ac_line" ]]; then
|
|
197
|
+
echo "$ac_line"
|
|
198
|
+
fi
|
|
199
|
+
done | jq -R '[inputs]' 2>/dev/null || echo '[]')
|
|
200
|
+
# Handle case where jq gets no input
|
|
201
|
+
if [[ -z "$ac_array" || "$ac_array" == "null" ]]; then
|
|
202
|
+
ac_array="[]"
|
|
203
|
+
fi
|
|
204
|
+
fi
|
|
205
|
+
|
|
206
|
+
# Build task JSON
|
|
207
|
+
local task
|
|
208
|
+
task=$(jq -n \
|
|
209
|
+
--arg id "$current_story_id" \
|
|
210
|
+
--arg title "$current_story_title" \
|
|
211
|
+
--arg epic "$current_epic" \
|
|
212
|
+
--arg description "$current_description" \
|
|
213
|
+
--arg status "$status" \
|
|
214
|
+
--argjson acceptance_criteria "$ac_array" \
|
|
215
|
+
--arg proof_path "verification/${current_story_id}-proof.md" \
|
|
216
|
+
'{
|
|
217
|
+
id: $id,
|
|
218
|
+
title: $title,
|
|
219
|
+
epic: $epic,
|
|
220
|
+
description: $description,
|
|
221
|
+
status: $status,
|
|
222
|
+
acceptance_criteria: $acceptance_criteria,
|
|
223
|
+
verification: {
|
|
224
|
+
proof_path: $proof_path,
|
|
225
|
+
observability: {
|
|
226
|
+
query_logs: true,
|
|
227
|
+
check_traces: true
|
|
228
|
+
},
|
|
229
|
+
showboat: {
|
|
230
|
+
required: true,
|
|
231
|
+
template: "templates/showboat-template.md"
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
}')
|
|
235
|
+
|
|
236
|
+
tasks_json=$(echo "$tasks_json" | jq --argjson task "$task" '. += [$task]')
|
|
237
|
+
|
|
238
|
+
# Reset
|
|
239
|
+
current_story_id=""
|
|
240
|
+
current_story_title=""
|
|
241
|
+
current_description=""
|
|
242
|
+
current_ac=""
|
|
243
|
+
in_story=false
|
|
244
|
+
in_ac=false
|
|
245
|
+
in_description=false
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
while IFS= read -r line; do
|
|
249
|
+
# Detect epic headers: ## Epic N: Title
|
|
250
|
+
if [[ "$line" =~ ^##[[:space:]]+Epic[[:space:]]+([0-9]+):[[:space:]]*(.*) ]]; then
|
|
251
|
+
flush_story
|
|
252
|
+
current_epic="Epic ${BASH_REMATCH[1]}: ${BASH_REMATCH[2]}"
|
|
253
|
+
continue
|
|
254
|
+
fi
|
|
255
|
+
|
|
256
|
+
# Detect story headers: ### Story N.M: Title
|
|
257
|
+
if [[ "$line" =~ ^###[[:space:]]+Story[[:space:]]+([0-9]+\.[0-9]+):[[:space:]]*(.*) ]]; then
|
|
258
|
+
flush_story
|
|
259
|
+
current_story_id="${BASH_REMATCH[1]}"
|
|
260
|
+
current_story_title="${BASH_REMATCH[2]}"
|
|
261
|
+
in_story=true
|
|
262
|
+
in_description=true
|
|
263
|
+
in_ac=false
|
|
264
|
+
continue
|
|
265
|
+
fi
|
|
266
|
+
|
|
267
|
+
# Skip if not in a story
|
|
268
|
+
if [[ "$in_story" != "true" ]]; then
|
|
269
|
+
continue
|
|
270
|
+
fi
|
|
271
|
+
|
|
272
|
+
# Detect acceptance criteria section
|
|
273
|
+
if [[ "$line" =~ ^\*\*Acceptance[[:space:]]+Criteria ]]; then
|
|
274
|
+
in_description=false
|
|
275
|
+
in_ac=true
|
|
276
|
+
continue
|
|
277
|
+
fi
|
|
278
|
+
|
|
279
|
+
# Collect description (user story lines: As a / I want / So that)
|
|
280
|
+
if [[ "$in_description" == "true" ]]; then
|
|
281
|
+
if [[ "$line" =~ ^(As[[:space:]]a|I[[:space:]]want|So[[:space:]]that) ]]; then
|
|
282
|
+
if [[ -n "$current_description" ]]; then
|
|
283
|
+
current_description+=" "
|
|
284
|
+
fi
|
|
285
|
+
current_description+="$line"
|
|
286
|
+
fi
|
|
287
|
+
continue
|
|
288
|
+
fi
|
|
289
|
+
|
|
290
|
+
# Collect acceptance criteria lines
|
|
291
|
+
if [[ "$in_ac" == "true" ]]; then
|
|
292
|
+
# **Given**, **When**, **Then**, **And** lines
|
|
293
|
+
if [[ "$line" =~ ^\*\*(Given|When|Then|And)\*\*[[:space:]]*(.*) ]]; then
|
|
294
|
+
local keyword="${BASH_REMATCH[1]}"
|
|
295
|
+
local rest="${BASH_REMATCH[2]}"
|
|
296
|
+
current_ac+="${keyword} ${rest}"$'\n'
|
|
297
|
+
fi
|
|
298
|
+
continue
|
|
299
|
+
fi
|
|
300
|
+
done < "$epics_file"
|
|
301
|
+
|
|
302
|
+
# Flush last story
|
|
303
|
+
flush_story
|
|
304
|
+
|
|
305
|
+
# Build final output
|
|
306
|
+
local generated_at
|
|
307
|
+
generated_at=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
|
308
|
+
|
|
309
|
+
jq -n \
|
|
310
|
+
--arg generated_at "$generated_at" \
|
|
311
|
+
--arg source "$epics_file" \
|
|
312
|
+
--argjson tasks "$tasks_json" \
|
|
313
|
+
'{
|
|
314
|
+
generated_at: $generated_at,
|
|
315
|
+
source: $source,
|
|
316
|
+
tasks: $tasks
|
|
317
|
+
}' > "$output_file"
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
# ─── Standalone Tasks Parsing ─────────────────────────────────────────────
|
|
321
|
+
|
|
322
|
+
parse_standalone_tasks() {
|
|
323
|
+
local tasks_file="$1"
|
|
324
|
+
local output_file="$2"
|
|
325
|
+
|
|
326
|
+
local tasks_json="[]"
|
|
327
|
+
local task_num=0
|
|
328
|
+
|
|
329
|
+
while IFS= read -r line; do
|
|
330
|
+
# Skip empty lines
|
|
331
|
+
[[ -z "$line" ]] && continue
|
|
332
|
+
|
|
333
|
+
# Parse markdown checklist: - [ ] or - [x]
|
|
334
|
+
if [[ "$line" =~ ^[[:space:]]*-[[:space:]]+\[([[:space:]xX])\][[:space:]]+(.*) ]]; then
|
|
335
|
+
task_num=$((task_num + 1))
|
|
336
|
+
local check="${BASH_REMATCH[1]}"
|
|
337
|
+
local title="${BASH_REMATCH[2]}"
|
|
338
|
+
local status="pending"
|
|
339
|
+
if [[ "$check" == "x" || "$check" == "X" ]]; then
|
|
340
|
+
status="complete"
|
|
341
|
+
fi
|
|
342
|
+
|
|
343
|
+
local task
|
|
344
|
+
task=$(jq -n \
|
|
345
|
+
--arg id "T-$(printf '%03d' $task_num)" \
|
|
346
|
+
--arg title "$title" \
|
|
347
|
+
--arg status "$status" \
|
|
348
|
+
'{
|
|
349
|
+
id: $id,
|
|
350
|
+
title: $title,
|
|
351
|
+
epic: "Standalone",
|
|
352
|
+
description: $title,
|
|
353
|
+
status: $status,
|
|
354
|
+
acceptance_criteria: [],
|
|
355
|
+
verification: {
|
|
356
|
+
proof_path: ("verification/" + $id + "-proof.md"),
|
|
357
|
+
observability: { query_logs: true, check_traces: true },
|
|
358
|
+
showboat: { required: true, template: "templates/showboat-template.md" }
|
|
359
|
+
}
|
|
360
|
+
}')
|
|
361
|
+
|
|
362
|
+
tasks_json=$(echo "$tasks_json" | jq --argjson task "$task" '. += [$task]')
|
|
363
|
+
continue
|
|
364
|
+
fi
|
|
365
|
+
|
|
366
|
+
# Plain text: one task per non-empty line
|
|
367
|
+
if [[ ! "$line" =~ ^[[:space:]]*# ]]; then
|
|
368
|
+
task_num=$((task_num + 1))
|
|
369
|
+
local title
|
|
370
|
+
title=$(echo "$line" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')
|
|
371
|
+
|
|
372
|
+
if [[ -n "$title" ]]; then
|
|
373
|
+
local task
|
|
374
|
+
task=$(jq -n \
|
|
375
|
+
--arg id "T-$(printf '%03d' $task_num)" \
|
|
376
|
+
--arg title "$title" \
|
|
377
|
+
--arg status "pending" \
|
|
378
|
+
'{
|
|
379
|
+
id: $id,
|
|
380
|
+
title: $title,
|
|
381
|
+
epic: "Standalone",
|
|
382
|
+
description: $title,
|
|
383
|
+
status: $status,
|
|
384
|
+
acceptance_criteria: [],
|
|
385
|
+
verification: {
|
|
386
|
+
proof_path: ("verification/" + $id + "-proof.md"),
|
|
387
|
+
observability: { query_logs: true, check_traces: true },
|
|
388
|
+
showboat: { required: true, template: "templates/showboat-template.md" }
|
|
389
|
+
}
|
|
390
|
+
}')
|
|
391
|
+
|
|
392
|
+
tasks_json=$(echo "$tasks_json" | jq --argjson task "$task" '. += [$task]')
|
|
393
|
+
fi
|
|
394
|
+
fi
|
|
395
|
+
done < "$tasks_file"
|
|
396
|
+
|
|
397
|
+
local generated_at
|
|
398
|
+
generated_at=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
|
399
|
+
|
|
400
|
+
jq -n \
|
|
401
|
+
--arg generated_at "$generated_at" \
|
|
402
|
+
--arg source "$tasks_file" \
|
|
403
|
+
--argjson tasks "$tasks_json" \
|
|
404
|
+
'{
|
|
405
|
+
generated_at: $generated_at,
|
|
406
|
+
source: $source,
|
|
407
|
+
tasks: $tasks
|
|
408
|
+
}' > "$output_file"
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
# ─── Main ─────────────────────────────────────────────────────────────────
|
|
412
|
+
|
|
413
|
+
mkdir -p "$(dirname "$OUTPUT_FILE")"
|
|
414
|
+
|
|
415
|
+
if [[ -n "$EPICS_FILE" ]]; then
|
|
416
|
+
parse_epics "$EPICS_FILE" "$OUTPUT_FILE"
|
|
417
|
+
elif [[ -n "$TASKS_FILE" ]]; then
|
|
418
|
+
parse_standalone_tasks "$TASKS_FILE" "$OUTPUT_FILE"
|
|
419
|
+
fi
|
|
420
|
+
|
|
421
|
+
echo "[OK] Bridge: $(jq '.tasks | length' "$OUTPUT_FILE") tasks generated → $OUTPUT_FILE"
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# db_schema_gen.sh — Generate docs/generated/db-schema.md from schema JSON
|
|
3
|
+
# Schema JSON can come from DB MCP queries or be provided directly.
|
|
4
|
+
#
|
|
5
|
+
# Usage: ralph/db_schema_gen.sh --project-dir DIR --schema-file PATH
|
|
6
|
+
|
|
7
|
+
set -e
|
|
8
|
+
|
|
9
|
+
PROJECT_DIR=""
|
|
10
|
+
SCHEMA_FILE=""
|
|
11
|
+
|
|
12
|
+
show_help() {
|
|
13
|
+
cat << 'HELPEOF'
|
|
14
|
+
DB Schema Generator — create db-schema.md from schema data
|
|
15
|
+
|
|
16
|
+
Usage:
|
|
17
|
+
ralph/db_schema_gen.sh --project-dir DIR --schema-file PATH
|
|
18
|
+
|
|
19
|
+
The schema file should be JSON with this structure:
|
|
20
|
+
{"tables": [{"name": "...", "columns": [{"name": "...", "type": "...", ...}]}]}
|
|
21
|
+
|
|
22
|
+
Options:
|
|
23
|
+
--project-dir DIR Project root directory
|
|
24
|
+
--schema-file PATH Path to schema JSON file (from DB MCP or manual)
|
|
25
|
+
-h, --help Show this help message
|
|
26
|
+
HELPEOF
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
while [[ $# -gt 0 ]]; do
|
|
30
|
+
case $1 in
|
|
31
|
+
-h|--help)
|
|
32
|
+
show_help
|
|
33
|
+
exit 0
|
|
34
|
+
;;
|
|
35
|
+
--project-dir)
|
|
36
|
+
PROJECT_DIR="$2"
|
|
37
|
+
shift 2
|
|
38
|
+
;;
|
|
39
|
+
--schema-file)
|
|
40
|
+
SCHEMA_FILE="$2"
|
|
41
|
+
shift 2
|
|
42
|
+
;;
|
|
43
|
+
*)
|
|
44
|
+
echo "Unknown option: $1" >&2
|
|
45
|
+
exit 1
|
|
46
|
+
;;
|
|
47
|
+
esac
|
|
48
|
+
done
|
|
49
|
+
|
|
50
|
+
if [[ -z "$PROJECT_DIR" ]]; then
|
|
51
|
+
echo "Error: --project-dir is required" >&2
|
|
52
|
+
exit 1
|
|
53
|
+
fi
|
|
54
|
+
|
|
55
|
+
if [[ -z "$SCHEMA_FILE" ]]; then
|
|
56
|
+
echo "Error: --schema-file is required" >&2
|
|
57
|
+
exit 1
|
|
58
|
+
fi
|
|
59
|
+
|
|
60
|
+
if [[ ! -f "$SCHEMA_FILE" ]]; then
|
|
61
|
+
echo "Error: schema file not found: $SCHEMA_FILE" >&2
|
|
62
|
+
exit 1
|
|
63
|
+
fi
|
|
64
|
+
|
|
65
|
+
# ─── Generate ─────────────────────────────────────────────────────────────
|
|
66
|
+
|
|
67
|
+
OUTPUT_FILE="$PROJECT_DIR/docs/generated/db-schema.md"
|
|
68
|
+
mkdir -p "$(dirname "$OUTPUT_FILE")"
|
|
69
|
+
|
|
70
|
+
timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
|
71
|
+
table_count=$(jq '.tables | length' "$SCHEMA_FILE")
|
|
72
|
+
|
|
73
|
+
{
|
|
74
|
+
echo "<!-- DO NOT EDIT MANUALLY — generated by db_schema_gen.sh -->"
|
|
75
|
+
echo ""
|
|
76
|
+
echo "# Database Schema"
|
|
77
|
+
echo ""
|
|
78
|
+
echo "**Generated:** $timestamp"
|
|
79
|
+
echo "**Tables:** $table_count"
|
|
80
|
+
echo ""
|
|
81
|
+
|
|
82
|
+
for ((i=0; i<table_count; i++)); do
|
|
83
|
+
table_name=$(jq -r ".tables[$i].name" "$SCHEMA_FILE")
|
|
84
|
+
echo "## $table_name"
|
|
85
|
+
echo ""
|
|
86
|
+
echo "| Column | Type | Nullable | Key | References |"
|
|
87
|
+
echo "|--------|------|----------|-----|------------|"
|
|
88
|
+
|
|
89
|
+
col_count=$(jq ".tables[$i].columns | length" "$SCHEMA_FILE")
|
|
90
|
+
for ((j=0; j<col_count; j++)); do
|
|
91
|
+
col_name=$(jq -r ".tables[$i].columns[$j].name" "$SCHEMA_FILE")
|
|
92
|
+
col_type=$(jq -r ".tables[$i].columns[$j].type" "$SCHEMA_FILE")
|
|
93
|
+
nullable=$(jq -r ".tables[$i].columns[$j].nullable // true" "$SCHEMA_FILE")
|
|
94
|
+
pk=$(jq -r ".tables[$i].columns[$j].primary_key // false" "$SCHEMA_FILE")
|
|
95
|
+
refs=$(jq -r ".tables[$i].columns[$j].references // \"-\"" "$SCHEMA_FILE")
|
|
96
|
+
|
|
97
|
+
nullable_text="YES"
|
|
98
|
+
[[ "$nullable" == "false" ]] && nullable_text="NO"
|
|
99
|
+
|
|
100
|
+
key_text="-"
|
|
101
|
+
[[ "$pk" == "true" ]] && key_text="PK"
|
|
102
|
+
|
|
103
|
+
echo "| $col_name | $col_type | $nullable_text | $key_text | $refs |"
|
|
104
|
+
done
|
|
105
|
+
echo ""
|
|
106
|
+
done
|
|
107
|
+
} > "$OUTPUT_FILE"
|
|
108
|
+
|
|
109
|
+
echo "[OK] DB schema: $table_count tables → $OUTPUT_FILE"
|