autopilot-code 0.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.autopilot/autopilot.json +15 -0
- package/LICENSE +21 -0
- package/README.md +119 -0
- package/dist/cli.js +58 -0
- package/package.json +32 -0
- package/scripts/run_autopilot.py +293 -0
- package/scripts/run_opencode_issue.sh +86 -0
- package/templates/autopilot.json +14 -0
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
{
|
|
2
|
+
"enabled": true,
|
|
3
|
+
"repo": "bakkensoftware/autopilot",
|
|
4
|
+
"agent": "opencode",
|
|
5
|
+
"issueLabels": {
|
|
6
|
+
"queue": ["autopilot:todo"],
|
|
7
|
+
"blocked": "autopilot:blocked",
|
|
8
|
+
"inProgress": "autopilot:in-progress",
|
|
9
|
+
"done": "autopilot:done"
|
|
10
|
+
},
|
|
11
|
+
"priorityLabels": ["p0", "p1", "p2"],
|
|
12
|
+
"maxParallel": 1,
|
|
13
|
+
"branchPrefix": "autopilot/",
|
|
14
|
+
"allowedBaseBranch": "main"
|
|
15
|
+
}
|
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Bakken Software
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
# autopilot
|
|
2
|
+
|
|
3
|
+
Repo-issue–driven autopilot runner.
|
|
4
|
+
|
|
5
|
+
## What it is
|
|
6
|
+
Autopilot is a **GitHub-issue–driven** automation loop:
|
|
7
|
+
|
|
8
|
+
- repos **opt in** by committing `.autopilot/autopilot.json`
|
|
9
|
+
- work lives in **GitHub Issues**
|
|
10
|
+
- the runner advances issues through a **label-based workflow**
|
|
11
|
+
- the runner is designed to determine “is work still happening?” using **durable artifacts** (GitHub + repo files), **not process inspection** (`ps`, PID checks, etc.)
|
|
12
|
+
|
|
13
|
+
## Repo opt-in
|
|
14
|
+
A repo is considered autopilot-enabled when it contains:
|
|
15
|
+
|
|
16
|
+
- `.autopilot/autopilot.json` with `enabled: true`
|
|
17
|
+
|
|
18
|
+
Example:
|
|
19
|
+
|
|
20
|
+
```json
|
|
21
|
+
{
|
|
22
|
+
"enabled": true,
|
|
23
|
+
"repo": "bakkensoftware/autopilot",
|
|
24
|
+
"agent": "opencode",
|
|
25
|
+
"issueLabels": {
|
|
26
|
+
"queue": ["autopilot:todo"],
|
|
27
|
+
"blocked": "autopilot:blocked",
|
|
28
|
+
"inProgress": "autopilot:in-progress",
|
|
29
|
+
"done": "autopilot:done"
|
|
30
|
+
},
|
|
31
|
+
"priorityLabels": ["p0", "p1", "p2"],
|
|
32
|
+
"maxParallel": 1,
|
|
33
|
+
"heartbeatMaxAgeSecs": 3600,
|
|
34
|
+
"branchPrefix": "autopilot/",
|
|
35
|
+
"allowedBaseBranch": "main"
|
|
36
|
+
}
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
Notes:
|
|
40
|
+
- `repo` must be the GitHub `owner/name`.
|
|
41
|
+
- `agent` (optional, default `"none"`): set to `"opencode"` to enable OpenCode integration after claiming issues.
|
|
42
|
+
- `heartbeatMaxAgeSecs` controls how long an in-progress issue can go without a heartbeat before it's considered stale.
|
|
43
|
+
|
|
44
|
+
## Workflow (labels)
|
|
45
|
+
Autopilot uses labels as a kanban state machine:
|
|
46
|
+
|
|
47
|
+
- `autopilot:backlog` — captured, not ready
|
|
48
|
+
- `autopilot:todo` — ready to be picked up by the runner
|
|
49
|
+
- `autopilot:in-progress` — claimed by autopilot
|
|
50
|
+
- `autopilot:blocked` — needs human input or missing/stale heartbeat
|
|
51
|
+
- `autopilot:done` — completed
|
|
52
|
+
|
|
53
|
+
Optional priority labels:
|
|
54
|
+
- `p0`, `p1`, `p2` (lower number = higher priority)
|
|
55
|
+
|
|
56
|
+
## How claiming works
|
|
57
|
+
When the runner finds a candidate issue (typically `autopilot:todo`):
|
|
58
|
+
|
|
59
|
+
1. It applies `autopilot:in-progress`
|
|
60
|
+
2. It removes the queue label(s) (e.g. `autopilot:todo`)
|
|
61
|
+
3. It leaves a comment indicating the claim time and next step
|
|
62
|
+
|
|
63
|
+
## Durable tracking (no process inspection)
|
|
64
|
+
This runner intentionally **does not** check local processes to decide if work is ongoing.
|
|
65
|
+
|
|
66
|
+
Instead it uses durable artifacts:
|
|
67
|
+
|
|
68
|
+
- **GitHub:** issue labels + issue comments + (future) PR presence/status
|
|
69
|
+
- **Repo file heartbeat:** `.autopilot/state.json`
|
|
70
|
+
|
|
71
|
+
The runner writes/updates `.autopilot/state.json` like:
|
|
72
|
+
|
|
73
|
+
```json
|
|
74
|
+
{
|
|
75
|
+
"activeIssue": {
|
|
76
|
+
"number": 2,
|
|
77
|
+
"repo": "bakkensoftware/autopilot",
|
|
78
|
+
"updatedAt": 1738000000
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
On each loop:
|
|
84
|
+
- if an issue is `autopilot:in-progress` but the heartbeat is stale/missing, autopilot comments and moves it to `autopilot:blocked`.
|
|
85
|
+
|
|
86
|
+
## Running locally
|
|
87
|
+
### Python runner
|
|
88
|
+
```bash
|
|
89
|
+
python3 scripts/run_autopilot.py --root /mnt/f/Source
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
### Node CLI wrapper
|
|
93
|
+
From the repo root:
|
|
94
|
+
|
|
95
|
+
```bash
|
|
96
|
+
npm install
|
|
97
|
+
npm run build
|
|
98
|
+
|
|
99
|
+
# sanity checks
|
|
100
|
+
node dist/cli.js doctor
|
|
101
|
+
|
|
102
|
+
# scan enabled repos without claiming
|
|
103
|
+
node dist/cli.js scan --root /mnt/f/Source
|
|
104
|
+
|
|
105
|
+
# claim exactly one issue + comment
|
|
106
|
+
node dist/cli.js run-once --root /mnt/f/Source
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
## Roadmap
|
|
110
|
+
- Spawn a coding agent (Claude Code / OpenCode) in a worktree per issue
|
|
111
|
+
- Create PRs linked to issues; wait for checks to go green
|
|
112
|
+
- Merge PRs automatically when mergeable + checks pass
|
|
113
|
+
- Close issues + apply `autopilot:done`
|
|
114
|
+
- Richer durable state:
|
|
115
|
+
- PR number / branch name / last heartbeat comment
|
|
116
|
+
- “blocked reason” codified
|
|
117
|
+
|
|
118
|
+
## Config template
|
|
119
|
+
See `templates/autopilot.json`.
|
package/dist/cli.js
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
"use strict";
|
|
3
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
4
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
5
|
+
};
|
|
6
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
7
|
+
const commander_1 = require("commander");
|
|
8
|
+
const node_child_process_1 = require("node:child_process");
|
|
9
|
+
const node_path_1 = __importDefault(require("node:path"));
|
|
10
|
+
function run(cmd, args) {
|
|
11
|
+
const res = (0, node_child_process_1.spawnSync)(cmd, args, { stdio: "inherit" });
|
|
12
|
+
if (res.error)
|
|
13
|
+
throw res.error;
|
|
14
|
+
if (typeof res.status === "number" && res.status !== 0)
|
|
15
|
+
process.exit(res.status);
|
|
16
|
+
}
|
|
17
|
+
function check(cmd, args, label) {
|
|
18
|
+
const res = (0, node_child_process_1.spawnSync)(cmd, args, { stdio: "inherit" });
|
|
19
|
+
if (res.error) {
|
|
20
|
+
console.error(`\n${label}: failed to run: ${res.error.message}`);
|
|
21
|
+
process.exit(1);
|
|
22
|
+
}
|
|
23
|
+
if (typeof res.status === "number" && res.status !== 0) {
|
|
24
|
+
console.error(`\n${label}: returned exit code ${res.status}`);
|
|
25
|
+
process.exit(res.status);
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
const repoRoot = node_path_1.default.resolve(__dirname, "..");
|
|
29
|
+
const runnerPath = node_path_1.default.join(repoRoot, "scripts", "run_autopilot.py");
|
|
30
|
+
const program = new commander_1.Command();
|
|
31
|
+
program
|
|
32
|
+
.name("autopilot")
|
|
33
|
+
.description("Repo-issue–driven autopilot runner (CLI wrapper)")
|
|
34
|
+
.version("0.0.0");
|
|
35
|
+
program
|
|
36
|
+
.command("doctor")
|
|
37
|
+
.description("Check local prerequisites (gh auth, python3)")
|
|
38
|
+
.action(() => {
|
|
39
|
+
check("gh", ["--version"], "gh");
|
|
40
|
+
check("gh", ["auth", "status"], "gh auth");
|
|
41
|
+
check("python3", ["--version"], "python3");
|
|
42
|
+
console.log("\nOK");
|
|
43
|
+
});
|
|
44
|
+
program
|
|
45
|
+
.command("scan")
|
|
46
|
+
.description("Discover autopilot-enabled repos + show next issue candidate (dry-run)")
|
|
47
|
+
.requiredOption("--root <path>", "Root folder that contains git repos")
|
|
48
|
+
.action((opts) => {
|
|
49
|
+
run("python3", [runnerPath, "--root", opts.root, "--dry-run"]);
|
|
50
|
+
});
|
|
51
|
+
program
|
|
52
|
+
.command("run-once")
|
|
53
|
+
.description("Claim exactly one issue and post a progress comment")
|
|
54
|
+
.requiredOption("--root <path>", "Root folder that contains git repos")
|
|
55
|
+
.action((opts) => {
|
|
56
|
+
run("python3", [runnerPath, "--root", opts.root]);
|
|
57
|
+
});
|
|
58
|
+
program.parse();
|
package/package.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "autopilot-code",
|
|
3
|
+
"version": "0.0.4",
|
|
4
|
+
"private": false,
|
|
5
|
+
"description": "Repo-issue–driven autopilot runner",
|
|
6
|
+
"license": "MIT",
|
|
7
|
+
"bin": {
|
|
8
|
+
"autopilot": "dist/cli.js"
|
|
9
|
+
},
|
|
10
|
+
"main": "dist/cli.js",
|
|
11
|
+
"files": [
|
|
12
|
+
"dist/",
|
|
13
|
+
"scripts/",
|
|
14
|
+
"templates/",
|
|
15
|
+
".autopilot/"
|
|
16
|
+
],
|
|
17
|
+
"scripts": {
|
|
18
|
+
"build": "tsc -p tsconfig.json",
|
|
19
|
+
"lint": "node -e \"console.log('lint: (not configured)')\"",
|
|
20
|
+
"test": "node -e \"console.log('test: (not configured)')\"",
|
|
21
|
+
"release": "changeset publish",
|
|
22
|
+
"version-packages": "changeset version"
|
|
23
|
+
},
|
|
24
|
+
"dependencies": {
|
|
25
|
+
"commander": "^12.1.0"
|
|
26
|
+
},
|
|
27
|
+
"devDependencies": {
|
|
28
|
+
"@changesets/cli": "^2.29.8",
|
|
29
|
+
"@types/node": "^22.10.2",
|
|
30
|
+
"typescript": "^5.7.2"
|
|
31
|
+
}
|
|
32
|
+
}
|
|
@@ -0,0 +1,293 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Issue-driven autopilot runner.
|
|
3
|
+
|
|
4
|
+
MVP: discovery + issue queue + claim + notify + durable state tracking.
|
|
5
|
+
Later: spawn coding agent, open PR, merge, close issue.
|
|
6
|
+
|
|
7
|
+
Design constraint: the runner must NOT rely on local process inspection (ps, pid files).
|
|
8
|
+
It should infer progress using durable artifacts:
|
|
9
|
+
- GitHub (labels/comments/PRs)
|
|
10
|
+
- repo files under .autopilot/ (local state)
|
|
11
|
+
|
|
12
|
+
Designed to run from a cron/loop.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
|
|
17
|
+
import argparse
|
|
18
|
+
import json
|
|
19
|
+
import os
|
|
20
|
+
import subprocess
|
|
21
|
+
import sys
|
|
22
|
+
import time
|
|
23
|
+
from dataclasses import dataclass
|
|
24
|
+
from pathlib import Path
|
|
25
|
+
from typing import Any
|
|
26
|
+
|
|
27
|
+
STATE_DIR = ".autopilot"
|
|
28
|
+
STATE_FILE = "state.json"
|
|
29
|
+
DEFAULT_HEARTBEAT_MAX_AGE_SECS = 60 * 60 # 1h
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def sh(cmd: list[str], cwd: Path | None = None, check: bool = True) -> str:
|
|
33
|
+
p = subprocess.run(cmd, cwd=str(cwd) if cwd else None, text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
|
34
|
+
if check and p.returncode != 0:
|
|
35
|
+
raise RuntimeError(f"command failed ({p.returncode}): {' '.join(cmd)}\n{p.stdout}")
|
|
36
|
+
return p.stdout
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@dataclass
|
|
40
|
+
class RepoConfig:
|
|
41
|
+
root: Path
|
|
42
|
+
repo: str
|
|
43
|
+
enabled: bool
|
|
44
|
+
queue_labels: list[str]
|
|
45
|
+
label_in_progress: str
|
|
46
|
+
label_blocked: str
|
|
47
|
+
label_done: str
|
|
48
|
+
priority_labels: list[str]
|
|
49
|
+
max_parallel: int
|
|
50
|
+
heartbeat_max_age_secs: int
|
|
51
|
+
agent: str
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def load_config(repo_root: Path) -> RepoConfig | None:
|
|
55
|
+
cfg_path = repo_root / ".autopilot" / "autopilot.json"
|
|
56
|
+
if not cfg_path.exists():
|
|
57
|
+
return None
|
|
58
|
+
data = json.loads(cfg_path.read_text(encoding="utf-8"))
|
|
59
|
+
if not data.get("enabled", False):
|
|
60
|
+
return None
|
|
61
|
+
|
|
62
|
+
labels = data.get("issueLabels", {})
|
|
63
|
+
queue = labels.get("queue", ["autopilot:todo"])
|
|
64
|
+
|
|
65
|
+
return RepoConfig(
|
|
66
|
+
root=repo_root,
|
|
67
|
+
repo=data["repo"],
|
|
68
|
+
enabled=True,
|
|
69
|
+
queue_labels=queue,
|
|
70
|
+
label_in_progress=labels.get("inProgress", "autopilot:in-progress"),
|
|
71
|
+
label_blocked=labels.get("blocked", "autopilot:blocked"),
|
|
72
|
+
label_done=labels.get("done", "autopilot:done"),
|
|
73
|
+
priority_labels=data.get("priorityLabels", ["p0", "p1", "p2"]),
|
|
74
|
+
max_parallel=int(data.get("maxParallel", 2)),
|
|
75
|
+
heartbeat_max_age_secs=int(data.get("heartbeatMaxAgeSecs", DEFAULT_HEARTBEAT_MAX_AGE_SECS)),
|
|
76
|
+
agent=data.get("agent", "none"),
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def discover_repos(root: Path) -> list[RepoConfig]:
|
|
81
|
+
out: list[RepoConfig] = []
|
|
82
|
+
for child in root.iterdir():
|
|
83
|
+
if not child.is_dir():
|
|
84
|
+
continue
|
|
85
|
+
if (child / ".git").exists() and (child / ".autopilot" / "autopilot.json").exists():
|
|
86
|
+
cfg = load_config(child)
|
|
87
|
+
if cfg:
|
|
88
|
+
out.append(cfg)
|
|
89
|
+
return out
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def list_candidate_issues(cfg: RepoConfig, limit: int = 10) -> list[dict[str, Any]]:
|
|
93
|
+
# Sort by created date asc; prioritize p0/p1/p2 by label presence.
|
|
94
|
+
q = " ".join([f"label:{l}" for l in cfg.queue_labels])
|
|
95
|
+
cmd = [
|
|
96
|
+
"gh",
|
|
97
|
+
"issue",
|
|
98
|
+
"list",
|
|
99
|
+
"--repo",
|
|
100
|
+
cfg.repo,
|
|
101
|
+
"--limit",
|
|
102
|
+
str(limit),
|
|
103
|
+
"--search",
|
|
104
|
+
f"is:open {q}",
|
|
105
|
+
"--json",
|
|
106
|
+
"number,title,labels,updatedAt,createdAt,url",
|
|
107
|
+
]
|
|
108
|
+
raw = sh(cmd)
|
|
109
|
+
issues = json.loads(raw)
|
|
110
|
+
|
|
111
|
+
def pri_rank(it: dict[str, Any]) -> int:
|
|
112
|
+
labs = {l["name"] for l in it.get("labels", [])}
|
|
113
|
+
for i, p in enumerate(cfg.priority_labels):
|
|
114
|
+
if p in labs:
|
|
115
|
+
return i
|
|
116
|
+
return 999
|
|
117
|
+
|
|
118
|
+
issues.sort(key=lambda x: (pri_rank(x), x.get("createdAt", "")))
|
|
119
|
+
return issues
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def _state_path(repo_root: Path) -> Path:
|
|
123
|
+
return repo_root / STATE_DIR / STATE_FILE
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def load_state(repo_root: Path) -> dict[str, Any]:
|
|
127
|
+
p = _state_path(repo_root)
|
|
128
|
+
if not p.exists():
|
|
129
|
+
return {}
|
|
130
|
+
try:
|
|
131
|
+
return json.loads(p.read_text(encoding="utf-8"))
|
|
132
|
+
except Exception:
|
|
133
|
+
return {}
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def write_state(repo_root: Path, state: dict[str, Any]) -> None:
|
|
137
|
+
d = repo_root / STATE_DIR
|
|
138
|
+
d.mkdir(parents=True, exist_ok=True)
|
|
139
|
+
p = d / STATE_FILE
|
|
140
|
+
p.write_text(json.dumps(state, indent=2, sort_keys=True) + "\n", encoding="utf-8")
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def touch_heartbeat(cfg: RepoConfig, issue_number: int) -> None:
|
|
144
|
+
"""Update durable local heartbeat state for the repo.
|
|
145
|
+
|
|
146
|
+
This is the mechanism we use to infer liveness WITHOUT process inspection.
|
|
147
|
+
"""
|
|
148
|
+
state = load_state(cfg.root)
|
|
149
|
+
now = int(time.time())
|
|
150
|
+
state["activeIssue"] = {
|
|
151
|
+
"number": issue_number,
|
|
152
|
+
"updatedAt": now,
|
|
153
|
+
"repo": cfg.repo,
|
|
154
|
+
}
|
|
155
|
+
write_state(cfg.root, state)
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def is_heartbeat_fresh(cfg: RepoConfig, issue_number: int) -> bool:
|
|
159
|
+
state = load_state(cfg.root)
|
|
160
|
+
active = state.get("activeIssue")
|
|
161
|
+
if not isinstance(active, dict):
|
|
162
|
+
return False
|
|
163
|
+
if int(active.get("number", -1)) != int(issue_number):
|
|
164
|
+
return False
|
|
165
|
+
updated = int(active.get("updatedAt", 0))
|
|
166
|
+
age = int(time.time()) - updated
|
|
167
|
+
return age <= cfg.heartbeat_max_age_secs
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
def claim_issue(cfg: RepoConfig, issue: dict[str, Any], note: str) -> None:
|
|
171
|
+
num = int(issue["number"])
|
|
172
|
+
sh(
|
|
173
|
+
[
|
|
174
|
+
"gh",
|
|
175
|
+
"issue",
|
|
176
|
+
"edit",
|
|
177
|
+
str(num),
|
|
178
|
+
"--repo",
|
|
179
|
+
cfg.repo,
|
|
180
|
+
"--add-label",
|
|
181
|
+
cfg.label_in_progress,
|
|
182
|
+
"--remove-label",
|
|
183
|
+
",".join(cfg.queue_labels),
|
|
184
|
+
]
|
|
185
|
+
)
|
|
186
|
+
sh(["gh", "issue", "comment", str(num), "--repo", cfg.repo, "--body", note])
|
|
187
|
+
touch_heartbeat(cfg, num)
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def list_in_progress_issues(cfg: RepoConfig, limit: int = 20) -> list[dict[str, Any]]:
|
|
191
|
+
cmd = [
|
|
192
|
+
"gh",
|
|
193
|
+
"issue",
|
|
194
|
+
"list",
|
|
195
|
+
"--repo",
|
|
196
|
+
cfg.repo,
|
|
197
|
+
"--limit",
|
|
198
|
+
str(limit),
|
|
199
|
+
"--search",
|
|
200
|
+
f"is:open label:{cfg.label_in_progress}",
|
|
201
|
+
"--json",
|
|
202
|
+
"number,title,labels,updatedAt,createdAt,url",
|
|
203
|
+
]
|
|
204
|
+
raw = sh(cmd)
|
|
205
|
+
return json.loads(raw)
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def maybe_mark_blocked(cfg: RepoConfig, issue: dict[str, Any]) -> None:
|
|
209
|
+
num = int(issue["number"])
|
|
210
|
+
if is_heartbeat_fresh(cfg, num):
|
|
211
|
+
return
|
|
212
|
+
|
|
213
|
+
note = (
|
|
214
|
+
"Autopilot marking this issue **blocked** because no durable heartbeat was updated "
|
|
215
|
+
f"in the last {cfg.heartbeat_max_age_secs} seconds.\n\n"
|
|
216
|
+
"This runner intentionally does **not** inspect local processes, so a fresh heartbeat "
|
|
217
|
+
"is how we infer work is ongoing.\n\n"
|
|
218
|
+
"Reply here if work is still in progress (or re-label as `autopilot:todo` to re-queue)."
|
|
219
|
+
)
|
|
220
|
+
sh(["gh", "issue", "comment", str(num), "--repo", cfg.repo, "--body", note])
|
|
221
|
+
sh(
|
|
222
|
+
[
|
|
223
|
+
"gh",
|
|
224
|
+
"issue",
|
|
225
|
+
"edit",
|
|
226
|
+
str(num),
|
|
227
|
+
"--repo",
|
|
228
|
+
cfg.repo,
|
|
229
|
+
"--add-label",
|
|
230
|
+
cfg.label_blocked,
|
|
231
|
+
"--remove-label",
|
|
232
|
+
cfg.label_in_progress,
|
|
233
|
+
]
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def main() -> int:
|
|
238
|
+
ap = argparse.ArgumentParser()
|
|
239
|
+
ap.add_argument("--root", default="/mnt/f/Source")
|
|
240
|
+
ap.add_argument("--max-parallel", type=int, default=2)
|
|
241
|
+
ap.add_argument("--dry-run", action="store_true")
|
|
242
|
+
args = ap.parse_args()
|
|
243
|
+
|
|
244
|
+
root = Path(args.root)
|
|
245
|
+
configs = discover_repos(root)
|
|
246
|
+
if not configs:
|
|
247
|
+
print("No autopilot-enabled repos found.")
|
|
248
|
+
return 0
|
|
249
|
+
|
|
250
|
+
for cfg in configs:
|
|
251
|
+
# 1) First, check any in-progress issues and mark blocked if stale.
|
|
252
|
+
inprog = list_in_progress_issues(cfg)
|
|
253
|
+
for it in inprog:
|
|
254
|
+
# This is conservative: only mark blocked if we have no fresh local heartbeat.
|
|
255
|
+
if not args.dry_run:
|
|
256
|
+
maybe_mark_blocked(cfg, it)
|
|
257
|
+
|
|
258
|
+
# 2) If nothing is currently in progress, claim one from the queue.
|
|
259
|
+
# (We only support one active issue per repo in this MVP.)
|
|
260
|
+
if inprog:
|
|
261
|
+
continue
|
|
262
|
+
|
|
263
|
+
issues = list_candidate_issues(cfg)
|
|
264
|
+
if not issues:
|
|
265
|
+
continue
|
|
266
|
+
|
|
267
|
+
issue = issues[0]
|
|
268
|
+
msg = (
|
|
269
|
+
f"Autopilot claimed this issue at {time.strftime('%Y-%m-%d %H:%M:%S %Z')}.\n\n"
|
|
270
|
+
"Next: implement fix and open PR.\n\n"
|
|
271
|
+
"(Durable tracking: this repo will maintain `.autopilot/state.json` as a heartbeat; "
|
|
272
|
+
"the runner does not inspect processes.)"
|
|
273
|
+
)
|
|
274
|
+
print(f"[{cfg.repo}] next issue: #{issue['number']} {issue['title']}")
|
|
275
|
+
if args.dry_run:
|
|
276
|
+
continue
|
|
277
|
+
claim_issue(cfg, issue, msg)
|
|
278
|
+
|
|
279
|
+
# If agent==opencode, delegate to bash script
|
|
280
|
+
if cfg.agent == "opencode":
|
|
281
|
+
sh(
|
|
282
|
+
[
|
|
283
|
+
"/home/kellye/clawd/repos/autopilot/scripts/run_opencode_issue.sh",
|
|
284
|
+
str(cfg.root),
|
|
285
|
+
str(issue["number"]),
|
|
286
|
+
]
|
|
287
|
+
)
|
|
288
|
+
|
|
289
|
+
return 0
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
if __name__ == "__main__":
|
|
293
|
+
raise SystemExit(main())
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
set -euo pipefail
|
|
3
|
+
|
|
4
|
+
REPO_DIR="$1"
|
|
5
|
+
ISSUE_NUMBER="$2"
|
|
6
|
+
|
|
7
|
+
if [[ -z "$REPO_DIR" || -z "$ISSUE_NUMBER" ]]; then
|
|
8
|
+
echo "Usage: $0 <repoDir> <issueNumber>" >&2
|
|
9
|
+
exit 1
|
|
10
|
+
fi
|
|
11
|
+
|
|
12
|
+
cd "$REPO_DIR"
|
|
13
|
+
|
|
14
|
+
# Read repo name from config (prefer jq, fallback to python3)
|
|
15
|
+
if command -v jq >/dev/null 2>&1; then
|
|
16
|
+
REPO=$(jq -r '.repo' < .autopilot/autopilot.json)
|
|
17
|
+
else
|
|
18
|
+
REPO=$(python3 -c 'import json; print(json.load(open(".autopilot/autopilot.json"))["repo"])')
|
|
19
|
+
fi
|
|
20
|
+
|
|
21
|
+
WORKTREE="/tmp/autopilot-issue-$ISSUE_NUMBER"
|
|
22
|
+
BRANCH="autopilot/issue-$ISSUE_NUMBER"
|
|
23
|
+
|
|
24
|
+
echo "[run_opencode_issue.sh] repo=$REPO issue=$ISSUE_NUMBER worktree=$WORKTREE"
|
|
25
|
+
|
|
26
|
+
# 1. Create or reuse git worktree from main
|
|
27
|
+
if [[ -d "$WORKTREE" ]]; then
|
|
28
|
+
echo "[run_opencode_issue.sh] Reusing existing worktree: $WORKTREE"
|
|
29
|
+
else
|
|
30
|
+
# If branch exists already, add worktree from that branch; otherwise create from main.
|
|
31
|
+
if git show-ref --verify --quiet "refs/heads/$BRANCH"; then
|
|
32
|
+
git worktree add "$WORKTREE" "$BRANCH"
|
|
33
|
+
else
|
|
34
|
+
git worktree add "$WORKTREE" -b "$BRANCH" main
|
|
35
|
+
fi
|
|
36
|
+
fi
|
|
37
|
+
|
|
38
|
+
# 2. Fetch issue title/body
|
|
39
|
+
ISSUE_JSON=$(gh issue view "$ISSUE_NUMBER" --repo "$REPO" --json title,body)
|
|
40
|
+
if command -v jq >/dev/null 2>&1; then
|
|
41
|
+
ISSUE_TITLE=$(echo "$ISSUE_JSON" | jq -r '.title')
|
|
42
|
+
ISSUE_BODY=$(echo "$ISSUE_JSON" | jq -r '.body')
|
|
43
|
+
else
|
|
44
|
+
ISSUE_TITLE=$(python3 -c 'import json,sys; d=json.load(sys.stdin); print(d["title"])' <<<"$ISSUE_JSON")
|
|
45
|
+
ISSUE_BODY=$(python3 -c 'import json,sys; d=json.load(sys.stdin); print(d.get("body") or "")' <<<"$ISSUE_JSON")
|
|
46
|
+
fi
|
|
47
|
+
|
|
48
|
+
# 3. Build prompt
|
|
49
|
+
PROMPT="Please implement the following GitHub issue.
|
|
50
|
+
|
|
51
|
+
Issue #$ISSUE_NUMBER: $ISSUE_TITLE
|
|
52
|
+
|
|
53
|
+
$ISSUE_BODY
|
|
54
|
+
|
|
55
|
+
Work rules:
|
|
56
|
+
- Make the necessary code changes.
|
|
57
|
+
- Commit with message: \"autopilot: work for issue #$ISSUE_NUMBER\".
|
|
58
|
+
- Push your changes to the remote branch $BRANCH.
|
|
59
|
+
- If the issue is a simple file-addition, just do it directly (no extra refactors)."
|
|
60
|
+
# 4. Run opencode inside worktree
|
|
61
|
+
cd "$WORKTREE"
|
|
62
|
+
opencode run "$PROMPT"
|
|
63
|
+
|
|
64
|
+
# 5. Commit any changes OpenCode made
|
|
65
|
+
if [[ -n "$(git status --porcelain)" ]]; then
|
|
66
|
+
git add -A
|
|
67
|
+
git commit -m "autopilot: work for issue #$ISSUE_NUMBER"
|
|
68
|
+
fi
|
|
69
|
+
|
|
70
|
+
# 6. Ensure branch is pushed (no-op if already up to date)
|
|
71
|
+
git push -u origin "$BRANCH" || true
|
|
72
|
+
|
|
73
|
+
# 7. Create PR if one doesn't already exist
|
|
74
|
+
PR_URL=""
|
|
75
|
+
if gh pr view --repo "$REPO" --head "$BRANCH" --json url --jq .url >/dev/null 2>&1; then
|
|
76
|
+
PR_URL=$(gh pr view --repo "$REPO" --head "$BRANCH" --json url --jq .url)
|
|
77
|
+
else
|
|
78
|
+
PR_URL=$(gh pr create --repo "$REPO" --title "Autopilot: Issue #$ISSUE_NUMBER" --body "Closes #$ISSUE_NUMBER" --base main --head "$BRANCH")
|
|
79
|
+
fi
|
|
80
|
+
|
|
81
|
+
# 8. Comment on issue with PR URL (best-effort)
|
|
82
|
+
if [[ -n "$PR_URL" ]]; then
|
|
83
|
+
gh issue comment "$ISSUE_NUMBER" --repo "$REPO" --body "Autopilot opened PR: $PR_URL" || true
|
|
84
|
+
fi
|
|
85
|
+
|
|
86
|
+
echo "[run_opencode_issue.sh] Done: $PR_URL"
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
{
|
|
2
|
+
"enabled": true,
|
|
3
|
+
"repo": "bakkensoftware/TicketToolbox",
|
|
4
|
+
"issueLabels": {
|
|
5
|
+
"queue": ["autopilot:todo"],
|
|
6
|
+
"blocked": "autopilot:blocked",
|
|
7
|
+
"inProgress": "autopilot:in-progress",
|
|
8
|
+
"done": "autopilot:done"
|
|
9
|
+
},
|
|
10
|
+
"priorityLabels": ["p0", "p1", "p2"],
|
|
11
|
+
"maxParallel": 2,
|
|
12
|
+
"branchPrefix": "autopilot/",
|
|
13
|
+
"allowedBaseBranch": "main"
|
|
14
|
+
}
|