agentic-dev 0.2.1 → 0.2.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/skills/sdd/SKILL.md +178 -7
- package/.claude/skills/sdd/agents/openai.yaml +4 -0
- package/.claude/skills/sdd/references/section-map.md +67 -0
- package/package.json +1 -1
- package/sdd/99_toolchain/01_automation/agentic-dev/run_repo_phase.sh +1 -1
- package/.claude/skills/commit/SKILL.md +0 -37
- package/.claude/skills/dev-browser/SKILL.md +0 -30
- package/.claude/skills/otro/SKILL.md +0 -43
- package/.claude/skills/planning-with-files/SKILL.md +0 -37
- package/.claude/skills/prd/SKILL.md +0 -27
- package/.claude/skills/ralph-loop/SKILL.md +0 -42
- package/.claude/skills/sdd-dev/SKILL.md +0 -71
- package/.claude/skills/sdd-development/SKILL.md +0 -13
- package/.codex/skills/agents/openai.yaml +0 -4
- package/.codex/skills/commit/SKILL.md +0 -219
- package/.codex/skills/commit/references/commit_examples.md +0 -292
- package/.codex/skills/dev-browser/SKILL.md +0 -211
- package/.codex/skills/dev-browser/bun.lock +0 -443
- package/.codex/skills/dev-browser/package-lock.json +0 -2988
- package/.codex/skills/dev-browser/package.json +0 -31
- package/.codex/skills/dev-browser/references/scraping.md +0 -155
- package/.codex/skills/dev-browser/scripts/start-relay.ts +0 -32
- package/.codex/skills/dev-browser/scripts/start-server.ts +0 -117
- package/.codex/skills/dev-browser/server.sh +0 -24
- package/.codex/skills/dev-browser/src/client.ts +0 -474
- package/.codex/skills/dev-browser/src/index.ts +0 -287
- package/.codex/skills/dev-browser/src/relay.ts +0 -731
- package/.codex/skills/dev-browser/src/snapshot/__tests__/snapshot.test.ts +0 -223
- package/.codex/skills/dev-browser/src/snapshot/browser-script.ts +0 -877
- package/.codex/skills/dev-browser/src/snapshot/index.ts +0 -14
- package/.codex/skills/dev-browser/src/snapshot/inject.ts +0 -13
- package/.codex/skills/dev-browser/src/types.ts +0 -34
- package/.codex/skills/dev-browser/tsconfig.json +0 -36
- package/.codex/skills/dev-browser/vitest.config.ts +0 -12
- package/.codex/skills/otro/SKILL.md +0 -74
- package/.codex/skills/otro/agents/openai.yaml +0 -4
- package/.codex/skills/otro/references/agent-prompts.md +0 -61
- package/.codex/skills/otro/references/contracts.md +0 -146
- package/.codex/skills/otro/references/orchestration-loop.md +0 -51
- package/.codex/skills/otro/references/runtime.md +0 -79
- package/.codex/skills/otro/runs/README.md +0 -11
- package/.codex/skills/otro/schemas/step_plan.schema.json +0 -289
- package/.codex/skills/otro/schemas/task_result.schema.json +0 -142
- package/.codex/skills/otro/schemas/wave_plan.schema.json +0 -4
- package/.codex/skills/otro/scripts/README.md +0 -38
- package/.codex/skills/otro/scripts/bump_validation_header.py +0 -179
- package/.codex/skills/otro/scripts/check_validation_header.py +0 -84
- package/.codex/skills/otro/scripts/common.py +0 -303
- package/.codex/skills/otro/scripts/init_run.sh +0 -68
- package/.codex/skills/otro/scripts/plan_loop.py +0 -8
- package/.codex/skills/otro/scripts/plan_step.py +0 -367
- package/.codex/skills/otro/scripts/plan_wave.py +0 -8
- package/.codex/skills/otro/scripts/reconcile_loop.py +0 -8
- package/.codex/skills/otro/scripts/reconcile_step.py +0 -37
- package/.codex/skills/otro/scripts/reconcile_wave.py +0 -8
- package/.codex/skills/otro/scripts/run_loop.py +0 -300
- package/.codex/skills/otro/scripts/run_loop_step.py +0 -8
- package/.codex/skills/otro/scripts/run_step.py +0 -246
- package/.codex/skills/otro/scripts/run_wave.py +0 -8
- package/.codex/skills/otro/validation/validation.md +0 -15
- package/.codex/skills/planning-with-files/SKILL.md +0 -42
- package/.codex/skills/planning-with-files/agents/openai.yaml +0 -4
- package/.codex/skills/planning-with-files/assets/plan-template.md +0 -37
- package/.codex/skills/planning-with-files/references/plan-rules.md +0 -35
- package/.codex/skills/planning-with-files/scripts/new_plan.sh +0 -65
- package/.codex/skills/prd/SKILL.md +0 -235
- package/.codex/skills/ralph-loop/SKILL.md +0 -46
- package/.codex/skills/ralph-loop/agents/openai.yaml +0 -4
- package/.codex/skills/ralph-loop/references/failure-triage.md +0 -32
- package/.codex/skills/ralph-loop/scripts/loop_until_success.sh +0 -97
- package/sdd/99_toolchain/02_policies/otro-orchestration-policy.md +0 -30
|
@@ -1,84 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
import argparse
|
|
3
|
-
import os
|
|
4
|
-
import re
|
|
5
|
-
import sys
|
|
6
|
-
from typing import List
|
|
7
|
-
|
|
8
|
-
DEFAULT_PATH = ".codex/skills/otro/validation/validation.md"
|
|
9
|
-
EXPECTED_KEYS: List[str] = ["Status", "Last updated"]
|
|
10
|
-
TOP_LINES_DEFAULT = int(os.environ.get("CHECK_HEADER_TOP_LINES", "30"))
|
|
11
|
-
|
|
12
|
-
LINE_PATTERNS = {
|
|
13
|
-
# Accept forms like:
|
|
14
|
-
# Status: value
|
|
15
|
-
# | Status | value |
|
|
16
|
-
# - Status: value
|
|
17
|
-
# * Status: value
|
|
18
|
-
# > Status: value
|
|
19
|
-
# Status | value
|
|
20
|
-
key: re.compile(rf"^\s*(?:[|>*-]\s*)?{re.escape(key)}\s*(?::|\|)", re.UNICODE)
|
|
21
|
-
for key in EXPECTED_KEYS
|
|
22
|
-
}
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
def load_lines(path: str, top_n: int) -> List[str]:
|
|
26
|
-
with open(path, "r", encoding="utf-8") as f:
|
|
27
|
-
return [next(f) for _ in range(top_n)]
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
def find_keys(lines: List[str]):
|
|
31
|
-
found = {}
|
|
32
|
-
for key, pat in LINE_PATTERNS.items():
|
|
33
|
-
hit = None
|
|
34
|
-
for idx, line in enumerate(lines, start=1):
|
|
35
|
-
if pat.search(line):
|
|
36
|
-
hit = (idx, line.rstrip("\n"))
|
|
37
|
-
break
|
|
38
|
-
found[key] = hit
|
|
39
|
-
return found
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
def main():
|
|
43
|
-
parser = argparse.ArgumentParser(description="Guard validation.md header keys against drift")
|
|
44
|
-
parser.add_argument("--file", default=DEFAULT_PATH, help="Path to validation.md (default: %(default)s)")
|
|
45
|
-
parser.add_argument("--top", type=int, default=TOP_LINES_DEFAULT, help="Number of top lines to scan (default: %(default)s)")
|
|
46
|
-
args = parser.parse_args()
|
|
47
|
-
|
|
48
|
-
path = args.file
|
|
49
|
-
top_n = args.top
|
|
50
|
-
|
|
51
|
-
if not os.path.isfile(path):
|
|
52
|
-
print(f"::error file={path},title=validation.md missing::File not found", file=sys.stderr)
|
|
53
|
-
return 2
|
|
54
|
-
|
|
55
|
-
try:
|
|
56
|
-
lines = load_lines(path, top_n)
|
|
57
|
-
except StopIteration:
|
|
58
|
-
# File shorter than requested; still use what we have
|
|
59
|
-
with open(path, "r", encoding="utf-8") as f:
|
|
60
|
-
lines = f.readlines()
|
|
61
|
-
|
|
62
|
-
found = find_keys(lines)
|
|
63
|
-
|
|
64
|
-
missing = [k for k, v in found.items() if v is None]
|
|
65
|
-
if missing:
|
|
66
|
-
for key in missing:
|
|
67
|
-
print(
|
|
68
|
-
f"::error file={path},title=Header key missing::Expected '{key}' in first {len(lines)} lines",
|
|
69
|
-
file=sys.stderr,
|
|
70
|
-
)
|
|
71
|
-
print(
|
|
72
|
-
f"Header guard failed: missing keys: {', '.join(missing)} (scanned top {len(lines)} lines)",
|
|
73
|
-
file=sys.stderr,
|
|
74
|
-
)
|
|
75
|
-
return 1
|
|
76
|
-
|
|
77
|
-
# Success summary
|
|
78
|
-
for key, (ln, text) in found.items():
|
|
79
|
-
print(f"::notice file={path},title=Header key OK::{key} at line {ln}: {text}")
|
|
80
|
-
return 0
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
if __name__ == "__main__":
|
|
84
|
-
sys.exit(main())
|
|
@@ -1,303 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
from __future__ import annotations
|
|
3
|
-
|
|
4
|
-
import json
|
|
5
|
-
import re
|
|
6
|
-
import subprocess
|
|
7
|
-
from pathlib import Path
|
|
8
|
-
from typing import Any
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
def skill_dir_from_file(path: str) -> Path:
|
|
12
|
-
return Path(path).resolve().parent.parent
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
def repo_root_from_skill_dir(skill_dir: Path) -> Path:
|
|
16
|
-
return skill_dir.parent.parent.parent
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
def repo_root_from_run_dir(
|
|
20
|
-
run_dir: Path,
|
|
21
|
-
config: dict[str, Any] | None = None,
|
|
22
|
-
*,
|
|
23
|
-
skill_dir: Path | None = None,
|
|
24
|
-
) -> Path:
|
|
25
|
-
resolved_config = config
|
|
26
|
-
if resolved_config is None:
|
|
27
|
-
config_path = run_dir / "config.json"
|
|
28
|
-
if config_path.exists():
|
|
29
|
-
try:
|
|
30
|
-
resolved_config = load_json(config_path)
|
|
31
|
-
except json.JSONDecodeError:
|
|
32
|
-
resolved_config = {}
|
|
33
|
-
else:
|
|
34
|
-
resolved_config = {}
|
|
35
|
-
raw = resolved_config.get("repo_root")
|
|
36
|
-
if isinstance(raw, str) and raw.strip():
|
|
37
|
-
return Path(raw).expanduser().resolve()
|
|
38
|
-
|
|
39
|
-
for candidate in [run_dir, *run_dir.parents]:
|
|
40
|
-
if (candidate / ".git").exists():
|
|
41
|
-
return candidate.resolve()
|
|
42
|
-
if skill_dir is not None:
|
|
43
|
-
return repo_root_from_skill_dir(skill_dir).resolve()
|
|
44
|
-
return run_dir.resolve()
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
def run_root_from_skill_dir(skill_dir: Path) -> Path:
|
|
48
|
-
return skill_dir / "runs"
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
def anchor_plan_path(run_dir: Path) -> Path:
|
|
52
|
-
return run_dir / "plans" / "anchor-plan.json"
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
def load_json(path: Path) -> Any:
|
|
56
|
-
with path.open("r", encoding="utf-8") as handle:
|
|
57
|
-
return json.load(handle)
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
def write_json(path: Path, payload: Any) -> None:
|
|
61
|
-
path.parent.mkdir(parents=True, exist_ok=True)
|
|
62
|
-
with path.open("w", encoding="utf-8") as handle:
|
|
63
|
-
json.dump(payload, handle, indent=2, ensure_ascii=False)
|
|
64
|
-
handle.write("\n")
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
def load_text(path: Path) -> str:
|
|
68
|
-
return path.read_text(encoding="utf-8")
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
def write_text(path: Path, content: str) -> None:
|
|
72
|
-
path.parent.mkdir(parents=True, exist_ok=True)
|
|
73
|
-
path.write_text(content, encoding="utf-8")
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
def loop_results_path(run_dir: Path, loop_number: int) -> Path:
|
|
77
|
-
return run_dir / "results" / f"loop-{loop_number}.json"
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
def legacy_wave_results_path(run_dir: Path, loop_number: int) -> Path:
|
|
81
|
-
return run_dir / "results" / f"wave-{loop_number}.json"
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
def load_loop_results(run_dir: Path, loop_number: int) -> Any:
|
|
85
|
-
canonical = loop_results_path(run_dir, loop_number)
|
|
86
|
-
legacy = legacy_wave_results_path(run_dir, loop_number)
|
|
87
|
-
if canonical.exists():
|
|
88
|
-
return load_json(canonical)
|
|
89
|
-
if legacy.exists():
|
|
90
|
-
return load_json(legacy)
|
|
91
|
-
raise FileNotFoundError(f"missing loop results: {canonical} (legacy fallback: {legacy})")
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
def write_loop_results(run_dir: Path, loop_number: int, payload: Any) -> None:
|
|
95
|
-
write_json(loop_results_path(run_dir, loop_number), payload)
|
|
96
|
-
write_json(legacy_wave_results_path(run_dir, loop_number), payload)
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
def loop_task_dir(run_dir: Path, loop_number: int, task_id: str) -> Path:
|
|
100
|
-
return run_dir / "loops" / f"loop-{loop_number}" / task_id
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
def legacy_wave_task_dir(run_dir: Path, loop_number: int, task_id: str) -> Path:
|
|
104
|
-
return run_dir / "waves" / f"wave-{loop_number}" / task_id
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
def task_step(task: dict[str, Any]) -> int:
|
|
108
|
-
raw = task.get("step", task.get("wave"))
|
|
109
|
-
if raw is None:
|
|
110
|
-
raise KeyError("task is missing step/wave")
|
|
111
|
-
return int(raw)
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
def set_task_step(task: dict[str, Any], step_number: int) -> None:
|
|
115
|
-
task["step"] = int(step_number)
|
|
116
|
-
task.pop("wave", None)
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
def plan_steps(plan: dict[str, Any]) -> list[dict[str, Any]]:
|
|
120
|
-
steps = plan.get("steps")
|
|
121
|
-
if isinstance(steps, list):
|
|
122
|
-
return steps
|
|
123
|
-
waves = plan.get("waves")
|
|
124
|
-
if isinstance(waves, list):
|
|
125
|
-
return waves
|
|
126
|
-
return []
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
def set_plan_steps(plan: dict[str, Any], steps: list[dict[str, Any]]) -> None:
|
|
130
|
-
normalized: list[dict[str, Any]] = []
|
|
131
|
-
for entry in steps:
|
|
132
|
-
item = dict(entry)
|
|
133
|
-
if "step" not in item and "wave" in item:
|
|
134
|
-
item["step"] = item.pop("wave")
|
|
135
|
-
else:
|
|
136
|
-
item.pop("wave", None)
|
|
137
|
-
normalized.append(item)
|
|
138
|
-
plan["steps"] = normalized
|
|
139
|
-
plan.pop("waves", None)
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
def normalize_completion_policy(plan: dict[str, Any]) -> None:
|
|
143
|
-
policy = plan.get("completion_policy")
|
|
144
|
-
if not isinstance(policy, dict):
|
|
145
|
-
return
|
|
146
|
-
if "repository_done" not in policy and "scope_done" in policy:
|
|
147
|
-
policy["repository_done"] = policy["scope_done"]
|
|
148
|
-
policy.pop("scope_done", None)
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
def canonicalize_plan(plan: dict[str, Any]) -> dict[str, Any]:
|
|
152
|
-
normalized = dict(plan)
|
|
153
|
-
normalize_completion_policy(normalized)
|
|
154
|
-
|
|
155
|
-
tasks = []
|
|
156
|
-
for task in normalized.get("tasks", []):
|
|
157
|
-
item = dict(task)
|
|
158
|
-
if "step" not in item and "wave" in item:
|
|
159
|
-
item["step"] = item["wave"]
|
|
160
|
-
item.pop("wave", None)
|
|
161
|
-
tasks.append(item)
|
|
162
|
-
normalized["tasks"] = tasks
|
|
163
|
-
|
|
164
|
-
steps = []
|
|
165
|
-
for entry in plan_steps(normalized):
|
|
166
|
-
item = dict(entry)
|
|
167
|
-
if "step" not in item and "wave" in item:
|
|
168
|
-
item["step"] = item["wave"]
|
|
169
|
-
item.pop("wave", None)
|
|
170
|
-
steps.append(item)
|
|
171
|
-
set_plan_steps(normalized, steps)
|
|
172
|
-
return normalized
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
def ensure_anchor_plan(run_dir: Path, source_plan_path: Path) -> Path:
|
|
176
|
-
anchor_path = anchor_plan_path(run_dir)
|
|
177
|
-
if not anchor_path.exists() and source_plan_path.exists():
|
|
178
|
-
write_text(anchor_path, load_text(source_plan_path))
|
|
179
|
-
return anchor_path
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
def model_timeout_profile(model: str) -> dict[str, Any]:
|
|
183
|
-
normalized = model.strip().lower()
|
|
184
|
-
if re.search(r"gpt-5(\.1)?|codex", normalized):
|
|
185
|
-
return {
|
|
186
|
-
"planner_timeout_seconds": 300,
|
|
187
|
-
"worker_timeout_seconds": 300,
|
|
188
|
-
"worker_timeout_seconds_by_kind": {
|
|
189
|
-
"analysis": 300,
|
|
190
|
-
"planning": 300,
|
|
191
|
-
"edit": 300,
|
|
192
|
-
"verify": 300,
|
|
193
|
-
"integration": 300,
|
|
194
|
-
"cleanup": 300,
|
|
195
|
-
},
|
|
196
|
-
}
|
|
197
|
-
return {
|
|
198
|
-
"planner_timeout_seconds": 300,
|
|
199
|
-
"worker_timeout_seconds": 300,
|
|
200
|
-
"worker_timeout_seconds_by_kind": {
|
|
201
|
-
"analysis": 300,
|
|
202
|
-
"planning": 300,
|
|
203
|
-
"edit": 300,
|
|
204
|
-
"verify": 300,
|
|
205
|
-
"integration": 300,
|
|
206
|
-
"cleanup": 300,
|
|
207
|
-
},
|
|
208
|
-
}
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
def planner_timeout_seconds(config: dict[str, Any]) -> int:
|
|
212
|
-
raw = config.get("planner_timeout_seconds")
|
|
213
|
-
if raw is not None:
|
|
214
|
-
return int(raw)
|
|
215
|
-
profile = model_timeout_profile(str(config.get("model", "")))
|
|
216
|
-
return int(profile["planner_timeout_seconds"])
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
def worker_timeout_seconds(config: dict[str, Any], task_kind: str) -> int:
|
|
220
|
-
by_kind = config.get("worker_timeout_seconds_by_kind")
|
|
221
|
-
if isinstance(by_kind, dict):
|
|
222
|
-
kind_value = by_kind.get(task_kind)
|
|
223
|
-
if kind_value is not None:
|
|
224
|
-
return int(kind_value)
|
|
225
|
-
raw = config.get("worker_timeout_seconds")
|
|
226
|
-
if raw is not None:
|
|
227
|
-
return int(raw)
|
|
228
|
-
profile = model_timeout_profile(str(config.get("model", "")))
|
|
229
|
-
kind_map = profile.get("worker_timeout_seconds_by_kind", {})
|
|
230
|
-
if task_kind in kind_map:
|
|
231
|
-
return int(kind_map[task_kind])
|
|
232
|
-
return int(profile["worker_timeout_seconds"])
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
def salvage_json_output(log_path: Path, output_path: Path) -> bool:
|
|
236
|
-
if not log_path.exists():
|
|
237
|
-
return False
|
|
238
|
-
lines = log_path.read_text(encoding="utf-8", errors="replace").splitlines()
|
|
239
|
-
for line in reversed(lines):
|
|
240
|
-
candidate = line.strip()
|
|
241
|
-
if not candidate.startswith("{") or not candidate.endswith("}"):
|
|
242
|
-
continue
|
|
243
|
-
try:
|
|
244
|
-
payload = json.loads(candidate)
|
|
245
|
-
except json.JSONDecodeError:
|
|
246
|
-
continue
|
|
247
|
-
write_json(output_path, payload)
|
|
248
|
-
return True
|
|
249
|
-
return False
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
def run_codex_exec(
|
|
253
|
-
*,
|
|
254
|
-
repo_root: Path,
|
|
255
|
-
prompt: str,
|
|
256
|
-
model: str,
|
|
257
|
-
schema_path: Path,
|
|
258
|
-
output_path: Path,
|
|
259
|
-
log_path: Path,
|
|
260
|
-
timeout_seconds: int = 180,
|
|
261
|
-
extra_args: list[str] | None = None,
|
|
262
|
-
) -> subprocess.CompletedProcess[str]:
|
|
263
|
-
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
264
|
-
log_path.parent.mkdir(parents=True, exist_ok=True)
|
|
265
|
-
cmd = [
|
|
266
|
-
"codex",
|
|
267
|
-
"exec",
|
|
268
|
-
"--dangerously-bypass-approvals-and-sandbox",
|
|
269
|
-
"-c",
|
|
270
|
-
"model_reasoning_effort=high",
|
|
271
|
-
"--cd",
|
|
272
|
-
str(repo_root),
|
|
273
|
-
"--model",
|
|
274
|
-
model,
|
|
275
|
-
"--output-schema",
|
|
276
|
-
str(schema_path),
|
|
277
|
-
"--output-last-message",
|
|
278
|
-
str(output_path),
|
|
279
|
-
"-",
|
|
280
|
-
]
|
|
281
|
-
if extra_args:
|
|
282
|
-
cmd[2:2] = extra_args
|
|
283
|
-
with log_path.open("w", encoding="utf-8") as log_handle:
|
|
284
|
-
try:
|
|
285
|
-
return subprocess.run(
|
|
286
|
-
cmd,
|
|
287
|
-
input=prompt,
|
|
288
|
-
text=True,
|
|
289
|
-
stdout=log_handle,
|
|
290
|
-
stderr=subprocess.STDOUT,
|
|
291
|
-
check=False,
|
|
292
|
-
timeout=timeout_seconds,
|
|
293
|
-
)
|
|
294
|
-
except subprocess.TimeoutExpired:
|
|
295
|
-
log_handle.write(f"\n[TIMEOUT after {timeout_seconds}s]\n")
|
|
296
|
-
log_handle.flush()
|
|
297
|
-
return subprocess.CompletedProcess(cmd, 124)
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
def summarize_completed_process(result: subprocess.CompletedProcess[str]) -> dict[str, Any]:
|
|
301
|
-
return {
|
|
302
|
-
"returncode": result.returncode,
|
|
303
|
-
}
|
|
@@ -1,68 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env bash
|
|
2
|
-
set -euo pipefail
|
|
3
|
-
|
|
4
|
-
if [[ $# -lt 1 ]]; then
|
|
5
|
-
echo "usage: $0 <run-name>" >&2
|
|
6
|
-
exit 1
|
|
7
|
-
fi
|
|
8
|
-
|
|
9
|
-
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
10
|
-
SKILL_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
11
|
-
RUN_NAME="$1"
|
|
12
|
-
RUN_DIR="$SKILL_DIR/runs/$RUN_NAME"
|
|
13
|
-
|
|
14
|
-
REPO_ROOT="$(pwd -P)"
|
|
15
|
-
|
|
16
|
-
mkdir -p "$RUN_DIR"/{plans,results,steps,waves,logs}
|
|
17
|
-
|
|
18
|
-
cat > "$RUN_DIR/goal.md" <<'EOF'
|
|
19
|
-
# Goal
|
|
20
|
-
|
|
21
|
-
Replace this text with the repository-wide objective.
|
|
22
|
-
|
|
23
|
-
# Constraints
|
|
24
|
-
|
|
25
|
-
- Keep tasks disjoint by owned file paths inside the same loop.
|
|
26
|
-
- Reconcile integration after every loop before dispatching the next loop.
|
|
27
|
-
|
|
28
|
-
# Done When
|
|
29
|
-
|
|
30
|
-
- Define these three levels explicitly:
|
|
31
|
-
- loop_done: current plan backlog is exhausted.
|
|
32
|
-
- run_done: a fresh repository-wide rescan after loop_done finds no materially new tasks.
|
|
33
|
-
- repository_done: run_done plus repository-level deployment and verification gates pass.
|
|
34
|
-
EOF
|
|
35
|
-
|
|
36
|
-
cat > "$RUN_DIR/config.json" <<EOF
|
|
37
|
-
{
|
|
38
|
-
"run_name": "$RUN_NAME",
|
|
39
|
-
"repo_root": "$REPO_ROOT",
|
|
40
|
-
"model": "gpt-5",
|
|
41
|
-
"max_parallel": "all",
|
|
42
|
-
"max_tasks_per_step": 10000,
|
|
43
|
-
"max_tasks_per_wave": 10000,
|
|
44
|
-
"max_tasks_total": 50000,
|
|
45
|
-
"planner_timeout_seconds": 300,
|
|
46
|
-
"worker_timeout_seconds": 300,
|
|
47
|
-
"worker_timeout_seconds_by_kind": {
|
|
48
|
-
"analysis": 300,
|
|
49
|
-
"planning": 300,
|
|
50
|
-
"edit": 300,
|
|
51
|
-
"verify": 300,
|
|
52
|
-
"integration": 300,
|
|
53
|
-
"cleanup": 300
|
|
54
|
-
}
|
|
55
|
-
}
|
|
56
|
-
EOF
|
|
57
|
-
|
|
58
|
-
cat > "$RUN_DIR/state.json" <<EOF
|
|
59
|
-
{
|
|
60
|
-
"run_name": "$RUN_NAME",
|
|
61
|
-
"plan_version": 0,
|
|
62
|
-
"current_loop": 0,
|
|
63
|
-
"current_step": 0,
|
|
64
|
-
"current_wave": 0
|
|
65
|
-
}
|
|
66
|
-
EOF
|
|
67
|
-
|
|
68
|
-
echo "initialized $RUN_DIR"
|