ctrlrelay 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ctrlrelay/__init__.py +8 -0
- ctrlrelay/bridge/__init__.py +21 -0
- ctrlrelay/bridge/__main__.py +69 -0
- ctrlrelay/bridge/protocol.py +75 -0
- ctrlrelay/bridge/server.py +285 -0
- ctrlrelay/bridge/telegram_handler.py +117 -0
- ctrlrelay/cli.py +1449 -0
- ctrlrelay/core/__init__.py +54 -0
- ctrlrelay/core/audit.py +257 -0
- ctrlrelay/core/checkpoint.py +155 -0
- ctrlrelay/core/config.py +291 -0
- ctrlrelay/core/dispatcher.py +202 -0
- ctrlrelay/core/github.py +272 -0
- ctrlrelay/core/obs.py +118 -0
- ctrlrelay/core/poller.py +319 -0
- ctrlrelay/core/pr_verifier.py +177 -0
- ctrlrelay/core/pr_watcher.py +121 -0
- ctrlrelay/core/scheduler.py +337 -0
- ctrlrelay/core/state.py +167 -0
- ctrlrelay/core/worktree.py +673 -0
- ctrlrelay/dashboard/__init__.py +5 -0
- ctrlrelay/dashboard/client.py +159 -0
- ctrlrelay/pipelines/__init__.py +15 -0
- ctrlrelay/pipelines/base.py +50 -0
- ctrlrelay/pipelines/dev.py +562 -0
- ctrlrelay/pipelines/post_merge.py +279 -0
- ctrlrelay/pipelines/secops.py +379 -0
- ctrlrelay/transports/__init__.py +33 -0
- ctrlrelay/transports/base.py +47 -0
- ctrlrelay/transports/file_mock.py +94 -0
- ctrlrelay/transports/socket_client.py +180 -0
- ctrlrelay-0.1.5.dist-info/METADATA +251 -0
- ctrlrelay-0.1.5.dist-info/RECORD +36 -0
- ctrlrelay-0.1.5.dist-info/WHEEL +4 -0
- ctrlrelay-0.1.5.dist-info/entry_points.txt +2 -0
- ctrlrelay-0.1.5.dist-info/licenses/LICENSE +201 -0
ctrlrelay/cli.py
ADDED
|
@@ -0,0 +1,1449 @@
|
|
|
1
|
+
"""CLI entry point for ctrlrelay."""
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
import typer
|
|
6
|
+
from rich.console import Console
|
|
7
|
+
from rich.table import Table
|
|
8
|
+
|
|
9
|
+
from ctrlrelay import __version__
|
|
10
|
+
from ctrlrelay.core.config import ConfigError, load_config
|
|
11
|
+
|
|
12
|
+
app = typer.Typer(
|
|
13
|
+
name="ctrlrelay",
|
|
14
|
+
help="Local-first orchestrator for Claude Code across multiple GitHub repos.",
|
|
15
|
+
no_args_is_help=True,
|
|
16
|
+
)
|
|
17
|
+
console = Console()
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def version_callback(value: bool) -> None:
|
|
21
|
+
if value:
|
|
22
|
+
console.print(f"ctrlrelay version {__version__}")
|
|
23
|
+
raise typer.Exit()
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@app.callback()
|
|
27
|
+
def main(
|
|
28
|
+
version: bool = typer.Option(
|
|
29
|
+
False,
|
|
30
|
+
"--version",
|
|
31
|
+
"-v",
|
|
32
|
+
callback=version_callback,
|
|
33
|
+
is_eager=True,
|
|
34
|
+
help="Show version and exit.",
|
|
35
|
+
),
|
|
36
|
+
) -> None:
|
|
37
|
+
"""ctrlrelay orchestrator CLI."""
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
# Subcommand groups
|
|
41
|
+
config_app = typer.Typer(help="Configuration commands.")
|
|
42
|
+
app.add_typer(config_app, name="config")
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@config_app.command("validate")
|
|
46
|
+
def config_validate(
|
|
47
|
+
config_path: str = typer.Option(
|
|
48
|
+
"config/orchestrator.yaml",
|
|
49
|
+
"--config",
|
|
50
|
+
"-c",
|
|
51
|
+
help="Path to orchestrator.yaml",
|
|
52
|
+
),
|
|
53
|
+
) -> None:
|
|
54
|
+
"""Validate orchestrator.yaml configuration."""
|
|
55
|
+
path = Path(config_path)
|
|
56
|
+
|
|
57
|
+
if not path.exists():
|
|
58
|
+
console.print(f"[red]Error:[/red] Config file not found: {path}")
|
|
59
|
+
raise typer.Exit(1)
|
|
60
|
+
|
|
61
|
+
try:
|
|
62
|
+
config = load_config(path)
|
|
63
|
+
except ConfigError as e:
|
|
64
|
+
console.print(f"[red]Validation failed:[/red] {e}")
|
|
65
|
+
raise typer.Exit(1)
|
|
66
|
+
|
|
67
|
+
console.print(f"[green]✓[/green] Config valid: {path}")
|
|
68
|
+
console.print(f" Node ID: {config.node_id}")
|
|
69
|
+
console.print(f" Timezone: {config.timezone}")
|
|
70
|
+
console.print(f" Transport: {config.transport.type.value}")
|
|
71
|
+
console.print(f" Repos: {len(config.repos)}")
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
@config_app.command("repos")
|
|
75
|
+
def config_repos(
|
|
76
|
+
config_path: str = typer.Option(
|
|
77
|
+
"config/orchestrator.yaml",
|
|
78
|
+
"--config",
|
|
79
|
+
"-c",
|
|
80
|
+
help="Path to orchestrator.yaml",
|
|
81
|
+
),
|
|
82
|
+
) -> None:
|
|
83
|
+
"""List configured repositories."""
|
|
84
|
+
path = Path(config_path)
|
|
85
|
+
|
|
86
|
+
try:
|
|
87
|
+
config = load_config(path)
|
|
88
|
+
except ConfigError as e:
|
|
89
|
+
console.print(f"[red]Error:[/red] {e}")
|
|
90
|
+
raise typer.Exit(1)
|
|
91
|
+
|
|
92
|
+
if not config.repos:
|
|
93
|
+
console.print("[yellow]No repositories configured.[/yellow]")
|
|
94
|
+
return
|
|
95
|
+
|
|
96
|
+
table = Table(title="Configured Repositories")
|
|
97
|
+
table.add_column("Name", style="cyan")
|
|
98
|
+
table.add_column("Path", style="dim")
|
|
99
|
+
table.add_column("Deploy", style="green")
|
|
100
|
+
|
|
101
|
+
for repo in config.repos:
|
|
102
|
+
deploy = repo.deploy.provider if repo.deploy else "-"
|
|
103
|
+
table.add_row(repo.name, str(repo.local_path), deploy)
|
|
104
|
+
|
|
105
|
+
console.print(table)
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
# Skills subcommand group
|
|
109
|
+
skills_app = typer.Typer(help="Skill management commands.")
|
|
110
|
+
app.add_typer(skills_app, name="skills")
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def _resolve_skills_dir(skills_path: str | None, config_path: str) -> Path:
|
|
114
|
+
"""Resolve skills directory from flag or config."""
|
|
115
|
+
if skills_path is not None:
|
|
116
|
+
skills_dir = Path(skills_path).expanduser().resolve()
|
|
117
|
+
else:
|
|
118
|
+
try:
|
|
119
|
+
config = load_config(config_path)
|
|
120
|
+
skills_dir = config.paths.skills.expanduser().resolve()
|
|
121
|
+
except ConfigError as e:
|
|
122
|
+
console.print(f"[red]Error loading config:[/red] {e}")
|
|
123
|
+
console.print("Use --path to specify skills directory directly.")
|
|
124
|
+
raise typer.Exit(1)
|
|
125
|
+
|
|
126
|
+
if not skills_dir.exists():
|
|
127
|
+
console.print(f"[red]Skills directory not found:[/red] {skills_dir}")
|
|
128
|
+
raise typer.Exit(1)
|
|
129
|
+
|
|
130
|
+
return skills_dir
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
@skills_app.command("audit")
|
|
134
|
+
def skills_audit(
|
|
135
|
+
skills_path: str = typer.Option(
|
|
136
|
+
None,
|
|
137
|
+
"--path",
|
|
138
|
+
"-p",
|
|
139
|
+
help="Path to skills directory (default: from config)",
|
|
140
|
+
),
|
|
141
|
+
config_path: str = typer.Option(
|
|
142
|
+
"config/orchestrator.yaml",
|
|
143
|
+
"--config",
|
|
144
|
+
"-c",
|
|
145
|
+
help="Path to orchestrator.yaml",
|
|
146
|
+
),
|
|
147
|
+
) -> None:
|
|
148
|
+
"""Audit skills for orchestrator readiness."""
|
|
149
|
+
from ctrlrelay.core.audit import audit_all, format_report
|
|
150
|
+
|
|
151
|
+
skills_dir = _resolve_skills_dir(skills_path, config_path)
|
|
152
|
+
|
|
153
|
+
console.print(f"Auditing skills in: {skills_dir}\n")
|
|
154
|
+
|
|
155
|
+
audits = audit_all(skills_dir)
|
|
156
|
+
|
|
157
|
+
if not audits:
|
|
158
|
+
console.print("[yellow]No skills found.[/yellow]")
|
|
159
|
+
return
|
|
160
|
+
|
|
161
|
+
report = format_report(audits)
|
|
162
|
+
console.print(report)
|
|
163
|
+
|
|
164
|
+
# Exit with error if any skills not ready
|
|
165
|
+
if not all(a.passed for a in audits):
|
|
166
|
+
raise typer.Exit(1)
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
@skills_app.command("list")
|
|
170
|
+
def skills_list(
|
|
171
|
+
skills_path: str = typer.Option(
|
|
172
|
+
None,
|
|
173
|
+
"--path",
|
|
174
|
+
"-p",
|
|
175
|
+
help="Path to skills directory (default: from config)",
|
|
176
|
+
),
|
|
177
|
+
config_path: str = typer.Option(
|
|
178
|
+
"config/orchestrator.yaml",
|
|
179
|
+
"--config",
|
|
180
|
+
"-c",
|
|
181
|
+
help="Path to orchestrator.yaml",
|
|
182
|
+
),
|
|
183
|
+
) -> None:
|
|
184
|
+
"""List available skills."""
|
|
185
|
+
from ctrlrelay.core.audit import discover_skills
|
|
186
|
+
|
|
187
|
+
skills_dir = _resolve_skills_dir(skills_path, config_path)
|
|
188
|
+
|
|
189
|
+
skills = discover_skills(skills_dir)
|
|
190
|
+
|
|
191
|
+
if not skills:
|
|
192
|
+
console.print("[yellow]No skills found.[/yellow]")
|
|
193
|
+
return
|
|
194
|
+
|
|
195
|
+
table = Table(title="Available Skills")
|
|
196
|
+
table.add_column("Name", style="cyan")
|
|
197
|
+
table.add_column("Path", style="dim")
|
|
198
|
+
|
|
199
|
+
for skill in skills:
|
|
200
|
+
table.add_row(skill.name, str(skill.path))
|
|
201
|
+
|
|
202
|
+
console.print(table)
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
# Bridge subcommand group
|
|
206
|
+
bridge_app = typer.Typer(help="Telegram bridge commands.")
|
|
207
|
+
app.add_typer(bridge_app, name="bridge")
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
def _get_socket_path(config_path: str) -> Path:
|
|
211
|
+
"""Get socket path from config."""
|
|
212
|
+
try:
|
|
213
|
+
config = load_config(config_path)
|
|
214
|
+
if config.transport.telegram:
|
|
215
|
+
return config.transport.telegram.socket_path.expanduser().resolve()
|
|
216
|
+
except ConfigError:
|
|
217
|
+
pass
|
|
218
|
+
return Path("~/.ctrlrelay/ctrlrelay.sock").expanduser().resolve()
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
def _get_bridge_pid_file(socket_path: Path) -> Path:
|
|
222
|
+
"""Get PID file path for bridge process."""
|
|
223
|
+
return socket_path.with_suffix(".pid")
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
@bridge_app.command("start")
|
|
227
|
+
def bridge_start(
|
|
228
|
+
config_path: str = typer.Option(
|
|
229
|
+
"config/orchestrator.yaml",
|
|
230
|
+
"--config",
|
|
231
|
+
"-c",
|
|
232
|
+
help="Path to orchestrator.yaml",
|
|
233
|
+
),
|
|
234
|
+
foreground: bool = typer.Option(
|
|
235
|
+
False,
|
|
236
|
+
"--foreground",
|
|
237
|
+
"-F",
|
|
238
|
+
help="Run in the foreground (for launchd/systemd/debugging). Default is to daemonize.",
|
|
239
|
+
),
|
|
240
|
+
) -> None:
|
|
241
|
+
"""Start the Telegram bridge.
|
|
242
|
+
|
|
243
|
+
Daemonizes by default so the terminal returns to you. Pass --foreground
|
|
244
|
+
under a process supervisor (launchd Type=simple, systemd Type=simple) or
|
|
245
|
+
when debugging interactively.
|
|
246
|
+
"""
|
|
247
|
+
import os
|
|
248
|
+
import subprocess
|
|
249
|
+
import sys
|
|
250
|
+
|
|
251
|
+
try:
|
|
252
|
+
config = load_config(config_path)
|
|
253
|
+
except ConfigError as e:
|
|
254
|
+
console.print(f"[red]Error loading config:[/red] {e}")
|
|
255
|
+
raise typer.Exit(1)
|
|
256
|
+
|
|
257
|
+
if config.transport.type.value != "telegram":
|
|
258
|
+
console.print("[yellow]Transport is not set to 'telegram' in config.[/yellow]")
|
|
259
|
+
console.print("Set transport.type: telegram to use the bridge.")
|
|
260
|
+
raise typer.Exit(1)
|
|
261
|
+
|
|
262
|
+
telegram_config = config.transport.telegram
|
|
263
|
+
if not telegram_config:
|
|
264
|
+
console.print("[red]Telegram config not found.[/red]")
|
|
265
|
+
raise typer.Exit(1)
|
|
266
|
+
|
|
267
|
+
socket_path = telegram_config.socket_path.expanduser().resolve()
|
|
268
|
+
pid_file = _get_bridge_pid_file(socket_path)
|
|
269
|
+
|
|
270
|
+
if pid_file.exists():
|
|
271
|
+
try:
|
|
272
|
+
pid = int(pid_file.read_text().strip())
|
|
273
|
+
os.kill(pid, 0)
|
|
274
|
+
console.print(f"[yellow]Bridge already running (PID {pid})[/yellow]")
|
|
275
|
+
raise typer.Exit(1)
|
|
276
|
+
except (ProcessLookupError, ValueError):
|
|
277
|
+
pid_file.unlink(missing_ok=True)
|
|
278
|
+
|
|
279
|
+
bot_token = os.environ.get(telegram_config.bot_token_env)
|
|
280
|
+
if not bot_token:
|
|
281
|
+
env_var = telegram_config.bot_token_env
|
|
282
|
+
console.print(f"[red]Bot token not found.[/red] Set {env_var} environment variable.")
|
|
283
|
+
raise typer.Exit(1)
|
|
284
|
+
|
|
285
|
+
pid_file.parent.mkdir(parents=True, exist_ok=True)
|
|
286
|
+
|
|
287
|
+
if foreground:
|
|
288
|
+
import asyncio
|
|
289
|
+
import signal
|
|
290
|
+
|
|
291
|
+
from ctrlrelay.bridge import BridgeServer
|
|
292
|
+
|
|
293
|
+
# Install early SIGTERM/SIGINT handlers so a supervisor stop between
|
|
294
|
+
# now and loop.add_signal_handler below still runs the `finally`
|
|
295
|
+
# that unlinks the PID file. loop.add_signal_handler replaces them
|
|
296
|
+
# once the asyncio loop is running.
|
|
297
|
+
def _raise_systemexit_on_signal(sig: int, _frame: object) -> None:
|
|
298
|
+
raise SystemExit(0)
|
|
299
|
+
|
|
300
|
+
for _sig in (signal.SIGTERM, signal.SIGINT):
|
|
301
|
+
signal.signal(_sig, _raise_systemexit_on_signal)
|
|
302
|
+
|
|
303
|
+
pid_file.write_text(str(os.getpid()))
|
|
304
|
+
console.print(f"Starting bridge on {socket_path}")
|
|
305
|
+
console.print("Press Ctrl+C to stop")
|
|
306
|
+
|
|
307
|
+
server = BridgeServer(
|
|
308
|
+
socket_path=socket_path,
|
|
309
|
+
bot_token=bot_token,
|
|
310
|
+
chat_id=telegram_config.chat_id,
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
loop = asyncio.new_event_loop()
|
|
314
|
+
try:
|
|
315
|
+
asyncio.set_event_loop(loop)
|
|
316
|
+
|
|
317
|
+
async def _run_server() -> None:
|
|
318
|
+
# Wrap start() in a finally that awaits stop() so the loop
|
|
319
|
+
# can't close before _telegram.close() and the socket unlink
|
|
320
|
+
# have actually completed. Scheduling stop() as a bare task
|
|
321
|
+
# in the signal handler would not guarantee that ordering.
|
|
322
|
+
try:
|
|
323
|
+
await server.start()
|
|
324
|
+
finally:
|
|
325
|
+
await server.stop()
|
|
326
|
+
|
|
327
|
+
main_task = loop.create_task(_run_server())
|
|
328
|
+
|
|
329
|
+
def _handle_stop(sig: int) -> None:
|
|
330
|
+
main_task.cancel()
|
|
331
|
+
|
|
332
|
+
for sig in (signal.SIGTERM, signal.SIGINT):
|
|
333
|
+
loop.add_signal_handler(sig, _handle_stop, sig)
|
|
334
|
+
|
|
335
|
+
try:
|
|
336
|
+
loop.run_until_complete(main_task)
|
|
337
|
+
except asyncio.CancelledError:
|
|
338
|
+
pass
|
|
339
|
+
finally:
|
|
340
|
+
loop.close()
|
|
341
|
+
pid_file.unlink(missing_ok=True)
|
|
342
|
+
else:
|
|
343
|
+
# Pass the token via environment, never argv. Putting it on the command
|
|
344
|
+
# line would expose it to anyone who can read `ps` / /proc/*/cmdline.
|
|
345
|
+
cmd = [
|
|
346
|
+
sys.executable,
|
|
347
|
+
"-m",
|
|
348
|
+
"ctrlrelay.bridge",
|
|
349
|
+
"--socket-path",
|
|
350
|
+
str(socket_path),
|
|
351
|
+
"--bot-token-env",
|
|
352
|
+
telegram_config.bot_token_env,
|
|
353
|
+
"--chat-id",
|
|
354
|
+
str(telegram_config.chat_id),
|
|
355
|
+
]
|
|
356
|
+
proc = subprocess.Popen(
|
|
357
|
+
cmd,
|
|
358
|
+
stdout=subprocess.DEVNULL,
|
|
359
|
+
stderr=subprocess.DEVNULL,
|
|
360
|
+
start_new_session=True,
|
|
361
|
+
)
|
|
362
|
+
# Claim the PID file BEFORE the liveness probe; a second concurrent
|
|
363
|
+
# `bridge start` in the 1-second window would otherwise see no PID
|
|
364
|
+
# file, spawn its own child, and both would rebind the shared socket.
|
|
365
|
+
pid_file.write_text(str(proc.pid))
|
|
366
|
+
try:
|
|
367
|
+
proc.wait(timeout=1.0)
|
|
368
|
+
except subprocess.TimeoutExpired:
|
|
369
|
+
console.print(f"[green]Bridge started (PID {proc.pid})[/green]")
|
|
370
|
+
return
|
|
371
|
+
# Child exited within 1s. Zero = clean no-op; non-zero = crash.
|
|
372
|
+
pid_file.unlink(missing_ok=True)
|
|
373
|
+
if proc.returncode == 0:
|
|
374
|
+
console.print(
|
|
375
|
+
"[yellow]Bridge exited immediately with no work to do.[/yellow]"
|
|
376
|
+
)
|
|
377
|
+
return
|
|
378
|
+
console.print(
|
|
379
|
+
f"[red]Bridge failed to start[/red] "
|
|
380
|
+
f"(child exited with code {proc.returncode})"
|
|
381
|
+
)
|
|
382
|
+
raise typer.Exit(1)
|
|
383
|
+
|
|
384
|
+
|
|
385
|
+
@bridge_app.command("stop")
|
|
386
|
+
def bridge_stop(
|
|
387
|
+
config_path: str = typer.Option(
|
|
388
|
+
"config/orchestrator.yaml",
|
|
389
|
+
"--config",
|
|
390
|
+
"-c",
|
|
391
|
+
help="Path to orchestrator.yaml",
|
|
392
|
+
),
|
|
393
|
+
) -> None:
|
|
394
|
+
"""Stop the Telegram bridge."""
|
|
395
|
+
import os
|
|
396
|
+
import signal
|
|
397
|
+
|
|
398
|
+
socket_path = _get_socket_path(config_path)
|
|
399
|
+
pid_file = _get_bridge_pid_file(socket_path)
|
|
400
|
+
|
|
401
|
+
if not pid_file.exists():
|
|
402
|
+
console.print("[yellow]Bridge not running (no PID file)[/yellow]")
|
|
403
|
+
return
|
|
404
|
+
|
|
405
|
+
try:
|
|
406
|
+
pid = int(pid_file.read_text().strip())
|
|
407
|
+
os.kill(pid, signal.SIGTERM)
|
|
408
|
+
console.print(f"[green]Stopped bridge (PID {pid})[/green]")
|
|
409
|
+
pid_file.unlink(missing_ok=True)
|
|
410
|
+
except ProcessLookupError:
|
|
411
|
+
console.print("[yellow]Bridge process not found[/yellow]")
|
|
412
|
+
pid_file.unlink(missing_ok=True)
|
|
413
|
+
except ValueError:
|
|
414
|
+
console.print("[red]Invalid PID file[/red]")
|
|
415
|
+
pid_file.unlink(missing_ok=True)
|
|
416
|
+
|
|
417
|
+
|
|
418
|
+
@bridge_app.command("status")
|
|
419
|
+
def bridge_status(
|
|
420
|
+
config_path: str = typer.Option(
|
|
421
|
+
"config/orchestrator.yaml",
|
|
422
|
+
"--config",
|
|
423
|
+
"-c",
|
|
424
|
+
help="Path to orchestrator.yaml",
|
|
425
|
+
),
|
|
426
|
+
) -> None:
|
|
427
|
+
"""Check bridge status."""
|
|
428
|
+
import os
|
|
429
|
+
|
|
430
|
+
socket_path = _get_socket_path(config_path)
|
|
431
|
+
pid_file = _get_bridge_pid_file(socket_path)
|
|
432
|
+
|
|
433
|
+
if pid_file.exists():
|
|
434
|
+
try:
|
|
435
|
+
pid = int(pid_file.read_text().strip())
|
|
436
|
+
os.kill(pid, 0)
|
|
437
|
+
console.print(f"[green]Bridge running (PID {pid})[/green]")
|
|
438
|
+
console.print(f"Socket: {socket_path}")
|
|
439
|
+
return
|
|
440
|
+
except (ProcessLookupError, ValueError):
|
|
441
|
+
pass
|
|
442
|
+
|
|
443
|
+
if socket_path.exists():
|
|
444
|
+
console.print("[yellow]Socket exists but no PID file[/yellow]")
|
|
445
|
+
console.print(
|
|
446
|
+
"[dim]The bridge may be running under a supervisor that pre-dates "
|
|
447
|
+
"the PID-file change — restart it to refresh state.[/dim]"
|
|
448
|
+
)
|
|
449
|
+
console.print(f"Socket: {socket_path}")
|
|
450
|
+
raise typer.Exit(1)
|
|
451
|
+
|
|
452
|
+
console.print("[dim]Bridge not running[/dim]")
|
|
453
|
+
raise typer.Exit(1)
|
|
454
|
+
|
|
455
|
+
|
|
456
|
+
@bridge_app.command("test")
|
|
457
|
+
def bridge_test(
|
|
458
|
+
message: str = typer.Option(
|
|
459
|
+
"Test message from ctrlrelay bridge",
|
|
460
|
+
"--message",
|
|
461
|
+
"-m",
|
|
462
|
+
help="Message to send",
|
|
463
|
+
),
|
|
464
|
+
config_path: str = typer.Option(
|
|
465
|
+
"config/orchestrator.yaml",
|
|
466
|
+
"--config",
|
|
467
|
+
"-c",
|
|
468
|
+
help="Path to orchestrator.yaml",
|
|
469
|
+
),
|
|
470
|
+
) -> None:
|
|
471
|
+
"""Send a test message to verify bridge is working."""
|
|
472
|
+
import asyncio
|
|
473
|
+
|
|
474
|
+
socket_path = _get_socket_path(config_path)
|
|
475
|
+
|
|
476
|
+
if not socket_path.exists():
|
|
477
|
+
console.print("[red]Bridge not running.[/red] Start it with: ctrlrelay bridge start")
|
|
478
|
+
raise typer.Exit(1)
|
|
479
|
+
|
|
480
|
+
async def send_test():
|
|
481
|
+
from ctrlrelay.transports import SocketTransport
|
|
482
|
+
|
|
483
|
+
transport = SocketTransport(socket_path)
|
|
484
|
+
try:
|
|
485
|
+
await transport.connect()
|
|
486
|
+
await transport.send(message)
|
|
487
|
+
console.print("[green]Message sent successfully![/green]")
|
|
488
|
+
finally:
|
|
489
|
+
await transport.close()
|
|
490
|
+
|
|
491
|
+
try:
|
|
492
|
+
asyncio.run(send_test())
|
|
493
|
+
except Exception as e:
|
|
494
|
+
console.print(f"[red]Failed to send message:[/red] {e}")
|
|
495
|
+
raise typer.Exit(1)
|
|
496
|
+
|
|
497
|
+
|
|
498
|
+
# Run subcommand group
|
|
499
|
+
run_app = typer.Typer(help="Pipeline execution commands.")
|
|
500
|
+
app.add_typer(run_app, name="run")
|
|
501
|
+
|
|
502
|
+
|
|
503
|
+
@run_app.command("secops")
|
|
504
|
+
def run_secops(
|
|
505
|
+
config_path: str = typer.Option(
|
|
506
|
+
"config/orchestrator.yaml",
|
|
507
|
+
"--config",
|
|
508
|
+
"-c",
|
|
509
|
+
help="Path to orchestrator.yaml",
|
|
510
|
+
),
|
|
511
|
+
repo: str = typer.Option(
|
|
512
|
+
None,
|
|
513
|
+
"--repo",
|
|
514
|
+
"-r",
|
|
515
|
+
help="Run on specific repo only",
|
|
516
|
+
),
|
|
517
|
+
) -> None:
|
|
518
|
+
"""Run secops pipeline on configured repos."""
|
|
519
|
+
import asyncio
|
|
520
|
+
|
|
521
|
+
from ctrlrelay.core.dispatcher import make_agent_dispatcher
|
|
522
|
+
from ctrlrelay.core.github import GitHubCLI
|
|
523
|
+
from ctrlrelay.core.state import StateDB
|
|
524
|
+
from ctrlrelay.core.worktree import WorktreeManager
|
|
525
|
+
from ctrlrelay.dashboard.client import DashboardClient
|
|
526
|
+
from ctrlrelay.pipelines.secops import run_secops_all
|
|
527
|
+
|
|
528
|
+
path = Path(config_path)
|
|
529
|
+
|
|
530
|
+
try:
|
|
531
|
+
config = load_config(path)
|
|
532
|
+
except ConfigError as e:
|
|
533
|
+
console.print(f"[red]Error loading config:[/red] {e}")
|
|
534
|
+
raise typer.Exit(1)
|
|
535
|
+
|
|
536
|
+
repos = config.repos
|
|
537
|
+
if repo:
|
|
538
|
+
repos = [r for r in repos if r.name == repo]
|
|
539
|
+
if not repos:
|
|
540
|
+
console.print(f"[red]Repo not found:[/red] {repo}")
|
|
541
|
+
raise typer.Exit(1)
|
|
542
|
+
|
|
543
|
+
if not repos:
|
|
544
|
+
console.print("[yellow]No repos configured.[/yellow]")
|
|
545
|
+
return
|
|
546
|
+
|
|
547
|
+
db = StateDB(config.paths.state_db)
|
|
548
|
+
dispatcher = make_agent_dispatcher(config.agent)
|
|
549
|
+
github = GitHubCLI()
|
|
550
|
+
worktree = WorktreeManager(
|
|
551
|
+
worktrees_dir=config.paths.worktrees,
|
|
552
|
+
bare_repos_dir=config.paths.bare_repos,
|
|
553
|
+
)
|
|
554
|
+
|
|
555
|
+
dashboard = None
|
|
556
|
+
if config.dashboard.enabled and config.dashboard.url:
|
|
557
|
+
import os
|
|
558
|
+
token = os.environ.get(config.dashboard.auth_token_env, "")
|
|
559
|
+
if token:
|
|
560
|
+
dashboard = DashboardClient(
|
|
561
|
+
url=config.dashboard.url,
|
|
562
|
+
auth_token=token,
|
|
563
|
+
node_id=config.node_id,
|
|
564
|
+
queue_dir=config.paths.state_db.parent / "event_queue",
|
|
565
|
+
)
|
|
566
|
+
|
|
567
|
+
console.print(f"Running secops on {len(repos)} repo(s)...")
|
|
568
|
+
|
|
569
|
+
async def _run():
|
|
570
|
+
return await run_secops_all(
|
|
571
|
+
repos=repos,
|
|
572
|
+
dispatcher=dispatcher,
|
|
573
|
+
github=github,
|
|
574
|
+
worktree=worktree,
|
|
575
|
+
dashboard=dashboard,
|
|
576
|
+
state_db=db,
|
|
577
|
+
transport=None,
|
|
578
|
+
contexts_dir=config.paths.contexts,
|
|
579
|
+
)
|
|
580
|
+
|
|
581
|
+
try:
|
|
582
|
+
results = asyncio.run(_run())
|
|
583
|
+
except Exception as e:
|
|
584
|
+
console.print(f"[red]Error:[/red] {e}")
|
|
585
|
+
raise typer.Exit(1)
|
|
586
|
+
finally:
|
|
587
|
+
db.close()
|
|
588
|
+
|
|
589
|
+
success_count = sum(1 for r in results if r.success)
|
|
590
|
+
console.print(f"\n[bold]Results:[/bold] {success_count}/{len(results)} succeeded")
|
|
591
|
+
|
|
592
|
+
for result in results:
|
|
593
|
+
status = "[green]OK[/green]" if result.success else "[red]FAIL[/red]"
|
|
594
|
+
console.print(f" {status} {result.summary}")
|
|
595
|
+
# Surface the blocking question / error text so an operator
|
|
596
|
+
# running secops at the CLI doesn't have to dig into state.db
|
|
597
|
+
# to see what the agent was asking. The scheduler closure
|
|
598
|
+
# already relays these via Telegram; this is the manual path.
|
|
599
|
+
if result.blocked and result.question:
|
|
600
|
+
console.print(f" [yellow]Question:[/yellow] {result.question}")
|
|
601
|
+
elif not result.success and result.error:
|
|
602
|
+
console.print(f" [red]Error:[/red] {result.error}")
|
|
603
|
+
|
|
604
|
+
if not all(r.success for r in results):
|
|
605
|
+
raise typer.Exit(1)
|
|
606
|
+
|
|
607
|
+
|
|
608
|
+
@run_app.command("dev")
|
|
609
|
+
def run_dev(
|
|
610
|
+
issue: int = typer.Option(
|
|
611
|
+
...,
|
|
612
|
+
"--issue",
|
|
613
|
+
"-i",
|
|
614
|
+
help="GitHub issue number to implement",
|
|
615
|
+
),
|
|
616
|
+
repo: str = typer.Option(
|
|
617
|
+
None,
|
|
618
|
+
"--repo",
|
|
619
|
+
"-r",
|
|
620
|
+
help="Run on specific repo only",
|
|
621
|
+
),
|
|
622
|
+
config_path: str = typer.Option(
|
|
623
|
+
"config/orchestrator.yaml",
|
|
624
|
+
"--config",
|
|
625
|
+
"-c",
|
|
626
|
+
help="Path to orchestrator.yaml",
|
|
627
|
+
),
|
|
628
|
+
) -> None:
|
|
629
|
+
"""Run dev pipeline for a GitHub issue."""
|
|
630
|
+
import asyncio
|
|
631
|
+
|
|
632
|
+
from ctrlrelay.core.dispatcher import make_agent_dispatcher
|
|
633
|
+
from ctrlrelay.core.github import GitHubCLI
|
|
634
|
+
from ctrlrelay.core.state import StateDB
|
|
635
|
+
from ctrlrelay.core.worktree import WorktreeManager
|
|
636
|
+
from ctrlrelay.pipelines.dev import run_dev_issue
|
|
637
|
+
|
|
638
|
+
path = Path(config_path)
|
|
639
|
+
|
|
640
|
+
try:
|
|
641
|
+
config = load_config(path)
|
|
642
|
+
except ConfigError as e:
|
|
643
|
+
console.print(f"[red]Error loading config:[/red] {e}")
|
|
644
|
+
raise typer.Exit(1)
|
|
645
|
+
|
|
646
|
+
repos = config.repos
|
|
647
|
+
if repo:
|
|
648
|
+
repos = [r for r in repos if r.name == repo]
|
|
649
|
+
if not repos:
|
|
650
|
+
console.print(f"[red]Repo not found:[/red] {repo}")
|
|
651
|
+
raise typer.Exit(1)
|
|
652
|
+
|
|
653
|
+
if not repos:
|
|
654
|
+
console.print("[yellow]No repos configured.[/yellow]")
|
|
655
|
+
return
|
|
656
|
+
|
|
657
|
+
if len(repos) > 1 and not repo:
|
|
658
|
+
console.print(
|
|
659
|
+
"[red]Error:[/red] Multiple repos configured. "
|
|
660
|
+
"Use --repo to specify which one."
|
|
661
|
+
)
|
|
662
|
+
raise typer.Exit(1)
|
|
663
|
+
|
|
664
|
+
repo_config = repos[0]
|
|
665
|
+
branch_template = repo_config.dev_branch_template
|
|
666
|
+
|
|
667
|
+
db = StateDB(config.paths.state_db)
|
|
668
|
+
dispatcher = make_agent_dispatcher(config.agent)
|
|
669
|
+
github = GitHubCLI()
|
|
670
|
+
worktree = WorktreeManager(
|
|
671
|
+
worktrees_dir=config.paths.worktrees,
|
|
672
|
+
bare_repos_dir=config.paths.bare_repos,
|
|
673
|
+
)
|
|
674
|
+
|
|
675
|
+
dashboard = None
|
|
676
|
+
if config.dashboard.enabled and config.dashboard.url:
|
|
677
|
+
import os
|
|
678
|
+
token = os.environ.get(config.dashboard.auth_token_env, "")
|
|
679
|
+
if token:
|
|
680
|
+
from ctrlrelay.dashboard.client import DashboardClient
|
|
681
|
+
dashboard = DashboardClient(
|
|
682
|
+
url=config.dashboard.url,
|
|
683
|
+
auth_token=token,
|
|
684
|
+
node_id=config.node_id,
|
|
685
|
+
queue_dir=config.paths.state_db.parent / "event_queue",
|
|
686
|
+
)
|
|
687
|
+
|
|
688
|
+
console.print(f"Running dev pipeline for issue #{issue} on {repo_config.name}...")
|
|
689
|
+
|
|
690
|
+
async def _run():
|
|
691
|
+
return await run_dev_issue(
|
|
692
|
+
repo=repo_config.name,
|
|
693
|
+
issue_number=issue,
|
|
694
|
+
branch_template=branch_template,
|
|
695
|
+
dispatcher=dispatcher,
|
|
696
|
+
github=github,
|
|
697
|
+
worktree=worktree,
|
|
698
|
+
dashboard=dashboard,
|
|
699
|
+
state_db=db,
|
|
700
|
+
transport=None,
|
|
701
|
+
contexts_dir=config.paths.contexts,
|
|
702
|
+
)
|
|
703
|
+
|
|
704
|
+
try:
|
|
705
|
+
result = asyncio.run(_run())
|
|
706
|
+
except Exception as e:
|
|
707
|
+
console.print(f"[red]Error:[/red] {e}")
|
|
708
|
+
raise typer.Exit(1)
|
|
709
|
+
finally:
|
|
710
|
+
db.close()
|
|
711
|
+
|
|
712
|
+
if result.success:
|
|
713
|
+
pr_url = result.outputs.get("pr_url", "") if result.outputs else ""
|
|
714
|
+
console.print(f"[green]Success:[/green] {result.summary}")
|
|
715
|
+
if pr_url:
|
|
716
|
+
console.print(f" PR: {pr_url}")
|
|
717
|
+
elif result.blocked:
|
|
718
|
+
console.print(f"[blue]Blocked:[/blue] {result.summary}")
|
|
719
|
+
if result.question:
|
|
720
|
+
console.print(f" Question: {result.question}")
|
|
721
|
+
raise typer.Exit(1)
|
|
722
|
+
else:
|
|
723
|
+
console.print(f"[red]Failed:[/red] {result.summary}")
|
|
724
|
+
if result.error:
|
|
725
|
+
console.print(f" Error: {result.error}")
|
|
726
|
+
raise typer.Exit(1)
|
|
727
|
+
|
|
728
|
+
|
|
729
|
+
# Poller subcommand group
|
|
730
|
+
poller_app = typer.Typer(help="Issue poller commands.")
|
|
731
|
+
app.add_typer(poller_app, name="poller")
|
|
732
|
+
|
|
733
|
+
|
|
734
|
+
def _get_poller_pid_file(config_path: str) -> Path:
|
|
735
|
+
"""Get PID file path for poller process."""
|
|
736
|
+
try:
|
|
737
|
+
config = load_config(config_path)
|
|
738
|
+
return config.paths.state_db.parent / "poller.pid"
|
|
739
|
+
except ConfigError:
|
|
740
|
+
pass
|
|
741
|
+
return Path("~/.ctrlrelay/poller.pid").expanduser().resolve()
|
|
742
|
+
|
|
743
|
+
|
|
744
|
+
@poller_app.command("start")
|
|
745
|
+
def poller_start(
|
|
746
|
+
config_path: str = typer.Option(
|
|
747
|
+
"config/orchestrator.yaml",
|
|
748
|
+
"--config",
|
|
749
|
+
"-c",
|
|
750
|
+
help="Path to orchestrator.yaml",
|
|
751
|
+
),
|
|
752
|
+
foreground: bool = typer.Option(
|
|
753
|
+
False,
|
|
754
|
+
"--foreground",
|
|
755
|
+
"-F",
|
|
756
|
+
help="Run in the foreground (for launchd/systemd/debugging). Default is to daemonize.",
|
|
757
|
+
),
|
|
758
|
+
interval: int = typer.Option(
|
|
759
|
+
300,
|
|
760
|
+
"--interval",
|
|
761
|
+
"-i",
|
|
762
|
+
help="Polling interval in seconds",
|
|
763
|
+
),
|
|
764
|
+
) -> None:
|
|
765
|
+
"""Start the issue poller.
|
|
766
|
+
|
|
767
|
+
Daemonizes by default so the terminal returns to you. Pass --foreground
|
|
768
|
+
under a process supervisor (launchd Type=simple, systemd Type=simple) or
|
|
769
|
+
when debugging interactively.
|
|
770
|
+
"""
|
|
771
|
+
import asyncio
|
|
772
|
+
import os
|
|
773
|
+
import signal
|
|
774
|
+
import subprocess
|
|
775
|
+
import sys
|
|
776
|
+
|
|
777
|
+
try:
|
|
778
|
+
config = load_config(Path(config_path))
|
|
779
|
+
except ConfigError as e:
|
|
780
|
+
console.print(f"[red]Error loading config:[/red] {e}")
|
|
781
|
+
raise typer.Exit(1)
|
|
782
|
+
|
|
783
|
+
pid_file = _get_poller_pid_file(config_path)
|
|
784
|
+
if pid_file.exists():
|
|
785
|
+
try:
|
|
786
|
+
pid = int(pid_file.read_text().strip())
|
|
787
|
+
if pid == os.getpid():
|
|
788
|
+
# The daemon parent wrote our own PID here before spawning us
|
|
789
|
+
# as the `--foreground` child. Don't treat it as a conflict.
|
|
790
|
+
pass
|
|
791
|
+
else:
|
|
792
|
+
os.kill(pid, 0)
|
|
793
|
+
console.print(f"[yellow]Poller already running (PID {pid})[/yellow]")
|
|
794
|
+
raise typer.Exit(1)
|
|
795
|
+
except (ProcessLookupError, ValueError):
|
|
796
|
+
pid_file.unlink(missing_ok=True)
|
|
797
|
+
pid_file.parent.mkdir(parents=True, exist_ok=True)
|
|
798
|
+
|
|
799
|
+
if not foreground:
|
|
800
|
+
cmd = [
|
|
801
|
+
sys.executable,
|
|
802
|
+
"-m",
|
|
803
|
+
"ctrlrelay.cli",
|
|
804
|
+
"poller",
|
|
805
|
+
"start",
|
|
806
|
+
"--config",
|
|
807
|
+
config_path,
|
|
808
|
+
"--interval",
|
|
809
|
+
str(interval),
|
|
810
|
+
"--foreground",
|
|
811
|
+
]
|
|
812
|
+
proc = subprocess.Popen(
|
|
813
|
+
cmd,
|
|
814
|
+
stdout=subprocess.DEVNULL,
|
|
815
|
+
stderr=subprocess.DEVNULL,
|
|
816
|
+
start_new_session=True,
|
|
817
|
+
)
|
|
818
|
+
# Claim the PID file BEFORE the liveness probe; otherwise a second
|
|
819
|
+
# concurrent `start` in the 1-second window would see no PID file and
|
|
820
|
+
# spawn a duplicate poller.
|
|
821
|
+
pid_file.write_text(str(proc.pid))
|
|
822
|
+
try:
|
|
823
|
+
proc.wait(timeout=1.0)
|
|
824
|
+
except subprocess.TimeoutExpired:
|
|
825
|
+
console.print(f"[green]Poller started (PID {proc.pid})[/green]")
|
|
826
|
+
return
|
|
827
|
+
# Child exited. Zero = clean no-op (e.g. `repos: []`); non-zero = crash.
|
|
828
|
+
pid_file.unlink(missing_ok=True)
|
|
829
|
+
if proc.returncode == 0:
|
|
830
|
+
console.print(
|
|
831
|
+
"[yellow]Poller exited immediately with no work to do "
|
|
832
|
+
"(check `repos:` in your config).[/yellow]"
|
|
833
|
+
)
|
|
834
|
+
return
|
|
835
|
+
console.print(
|
|
836
|
+
f"[red]Poller failed to start[/red] "
|
|
837
|
+
f"(child exited with code {proc.returncode})"
|
|
838
|
+
)
|
|
839
|
+
raise typer.Exit(1)
|
|
840
|
+
|
|
841
|
+
# Install SIGTERM/SIGINT handlers BEFORE any startup work so a supervisor
|
|
842
|
+
# stop during `gh api user` / `seed_current()` still unwinds through the
|
|
843
|
+
# `finally` that unlinks poller.pid. Converting the signal into SystemExit
|
|
844
|
+
# lets Python's normal unwind run `finally` blocks. Once the asyncio loop
|
|
845
|
+
# is up below, `loop.add_signal_handler` overrides these to drive a
|
|
846
|
+
# graceful cancel of the poll loop.
|
|
847
|
+
def _raise_systemexit_on_signal(sig: int, _frame: object) -> None:
|
|
848
|
+
raise SystemExit(0)
|
|
849
|
+
|
|
850
|
+
for sig in (signal.SIGTERM, signal.SIGINT):
|
|
851
|
+
signal.signal(sig, _raise_systemexit_on_signal)
|
|
852
|
+
|
|
853
|
+
pid_file.write_text(str(os.getpid()))
|
|
854
|
+
try:
|
|
855
|
+
from ctrlrelay.core.dispatcher import make_agent_dispatcher
|
|
856
|
+
from ctrlrelay.core.github import GitHubCLI
|
|
857
|
+
from ctrlrelay.core.poller import IssuePoller, run_poll_loop
|
|
858
|
+
from ctrlrelay.core.scheduler import make_scheduler
|
|
859
|
+
from ctrlrelay.core.state import StateDB
|
|
860
|
+
from ctrlrelay.core.worktree import WorktreeManager
|
|
861
|
+
from ctrlrelay.pipelines.dev import run_dev_issue
|
|
862
|
+
from ctrlrelay.pipelines.post_merge import pr_watch_task
|
|
863
|
+
from ctrlrelay.pipelines.secops import run_secops_all
|
|
864
|
+
|
|
865
|
+
# Build a DashboardClient if configured BEFORE the gh probe runs,
|
|
866
|
+
# so even if gh fails the user gets clear failure ordering and so
|
|
867
|
+
# tests can short-circuit at gh while still observing this wiring.
|
|
868
|
+
# Mirrors what `ctrlrelay run secops` does for the manual path.
|
|
869
|
+
scheduled_dashboard = None
|
|
870
|
+
if config.dashboard.enabled and config.dashboard.url:
|
|
871
|
+
token = os.environ.get(config.dashboard.auth_token_env, "")
|
|
872
|
+
if token:
|
|
873
|
+
from ctrlrelay.dashboard.client import DashboardClient
|
|
874
|
+
scheduled_dashboard = DashboardClient(
|
|
875
|
+
url=config.dashboard.url,
|
|
876
|
+
auth_token=token,
|
|
877
|
+
node_id=config.node_id,
|
|
878
|
+
queue_dir=config.paths.state_db.parent / "event_queue",
|
|
879
|
+
)
|
|
880
|
+
|
|
881
|
+
github = GitHubCLI()
|
|
882
|
+
|
|
883
|
+
# Get GitHub username
|
|
884
|
+
try:
|
|
885
|
+
from ctrlrelay.core.github import _find_gh
|
|
886
|
+
gh_bin = _find_gh()
|
|
887
|
+
result = subprocess.run(
|
|
888
|
+
[gh_bin, "api", "user", "--jq", ".login"],
|
|
889
|
+
capture_output=True,
|
|
890
|
+
text=True,
|
|
891
|
+
check=True,
|
|
892
|
+
)
|
|
893
|
+
username = result.stdout.strip()
|
|
894
|
+
except subprocess.CalledProcessError as e:
|
|
895
|
+
console.print(f"[red]Failed to get GitHub username:[/red] {e}")
|
|
896
|
+
raise typer.Exit(1)
|
|
897
|
+
|
|
898
|
+
if not username:
|
|
899
|
+
console.print("[red]Could not determine GitHub username.[/red]")
|
|
900
|
+
raise typer.Exit(1)
|
|
901
|
+
|
|
902
|
+
repo_names = [r.name for r in config.repos]
|
|
903
|
+
if not repo_names:
|
|
904
|
+
console.print("[yellow]No repos configured.[/yellow]")
|
|
905
|
+
return
|
|
906
|
+
|
|
907
|
+
state_file = config.paths.state_db.parent / "poller_state.json"
|
|
908
|
+
|
|
909
|
+
first_run = not state_file.exists()
|
|
910
|
+
|
|
911
|
+
poller = IssuePoller(
|
|
912
|
+
github=github,
|
|
913
|
+
username=username,
|
|
914
|
+
repos=repo_names,
|
|
915
|
+
state_file=state_file,
|
|
916
|
+
)
|
|
917
|
+
|
|
918
|
+
# NOTE: first-run seeding moved into `_main()` so the APScheduler
|
|
919
|
+
# cron is registered + running BEFORE the slow seed_current() pass
|
|
920
|
+
# (one GitHub API call per repo) takes place. Otherwise the 6am
|
|
921
|
+
# scheduled fire can pass during startup and APScheduler's misfire
|
|
922
|
+
# grace only catches up on fires that happened AFTER registration.
|
|
923
|
+
|
|
924
|
+
state_db = StateDB(config.paths.state_db)
|
|
925
|
+
dispatcher = make_agent_dispatcher(config.agent)
|
|
926
|
+
worktree = WorktreeManager(
|
|
927
|
+
worktrees_dir=config.paths.worktrees,
|
|
928
|
+
bare_repos_dir=config.paths.bare_repos,
|
|
929
|
+
)
|
|
930
|
+
|
|
931
|
+
# Set up transport for notifications
|
|
932
|
+
transport = None
|
|
933
|
+
if config.transport.type.value == "telegram" and config.transport.telegram:
|
|
934
|
+
from ctrlrelay.transports import SocketTransport
|
|
935
|
+
socket_path = config.transport.telegram.socket_path.expanduser().resolve()
|
|
936
|
+
if socket_path.exists():
|
|
937
|
+
transport = SocketTransport(socket_path)
|
|
938
|
+
console.print(f"[dim]Telegram transport enabled via {socket_path}[/dim]")
|
|
939
|
+
else:
|
|
940
|
+
console.print(f"[yellow]Telegram socket not found at {socket_path}[/yellow]")
|
|
941
|
+
console.print(
|
|
942
|
+
"[yellow]Run 'ctrlrelay bridge start' to enable notifications[/yellow]"
|
|
943
|
+
)
|
|
944
|
+
|
|
945
|
+
# Track in-flight PR-watch background tasks so they outlive
|
|
946
|
+
# handle_issue and aren't garbage-collected. Cleared via
|
|
947
|
+
# done_callback as each task terminates.
|
|
948
|
+
pr_watch_tasks: set[asyncio.Task] = set()
|
|
949
|
+
|
|
950
|
+
async def _watch_transport_factory():
|
|
951
|
+
"""Build a fresh connected SocketTransport for a single watcher,
|
|
952
|
+
independent of the transport used in handle_issue (which is
|
|
953
|
+
closed on exit).
|
|
954
|
+
|
|
955
|
+
Return None ONLY when Telegram notifications aren't
|
|
956
|
+
configured at all — that's a legitimate "no channel" signal
|
|
957
|
+
and the retry loop treats it as clean success. A configured-
|
|
958
|
+
but-currently-missing socket is a transient outage (bridge
|
|
959
|
+
restart, daemon crash mid-merge), so RAISE to trigger the
|
|
960
|
+
retry path; the next attempt will reach the socket once the
|
|
961
|
+
bridge is back.
|
|
962
|
+
"""
|
|
963
|
+
if config.transport.type.value != "telegram" or not config.transport.telegram:
|
|
964
|
+
return None
|
|
965
|
+
from ctrlrelay.transports import SocketTransport
|
|
966
|
+
socket_path = config.transport.telegram.socket_path.expanduser().resolve()
|
|
967
|
+
if not socket_path.exists():
|
|
968
|
+
raise FileNotFoundError(
|
|
969
|
+
f"Telegram bridge socket missing at {socket_path}; "
|
|
970
|
+
"retryable — bridge may be restarting"
|
|
971
|
+
)
|
|
972
|
+
watch_transport = SocketTransport(socket_path)
|
|
973
|
+
await watch_transport.connect()
|
|
974
|
+
return watch_transport
|
|
975
|
+
|
|
976
|
+
async def handle_issue(repo: str, issue: dict) -> None:
|
|
977
|
+
issue_number = issue["number"]
|
|
978
|
+
title = issue.get("title", "")
|
|
979
|
+
console.print(
|
|
980
|
+
f"[green]New issue detected:[/green] #{issue_number} in {repo} — {title}"
|
|
981
|
+
)
|
|
982
|
+
|
|
983
|
+
# Connect transport for notifications
|
|
984
|
+
connected_transport = None
|
|
985
|
+
if transport:
|
|
986
|
+
try:
|
|
987
|
+
await transport.connect()
|
|
988
|
+
connected_transport = transport
|
|
989
|
+
await transport.send(f"🔔 New issue #{issue_number} in {repo}: {title}")
|
|
990
|
+
except Exception as e:
|
|
991
|
+
console.print(f"[yellow]Transport error: {e}[/yellow]")
|
|
992
|
+
|
|
993
|
+
# Find matching repo config
|
|
994
|
+
repo_configs = [r for r in config.repos if r.name == repo]
|
|
995
|
+
if not repo_configs:
|
|
996
|
+
console.print(f"[yellow]No config found for repo {repo}, skipping.[/yellow]")
|
|
997
|
+
if connected_transport:
|
|
998
|
+
await transport.close()
|
|
999
|
+
return
|
|
1000
|
+
|
|
1001
|
+
repo_config = repo_configs[0]
|
|
1002
|
+
try:
|
|
1003
|
+
result = await run_dev_issue(
|
|
1004
|
+
repo=repo,
|
|
1005
|
+
issue_number=issue_number,
|
|
1006
|
+
branch_template=repo_config.dev_branch_template,
|
|
1007
|
+
dispatcher=dispatcher,
|
|
1008
|
+
github=github,
|
|
1009
|
+
worktree=worktree,
|
|
1010
|
+
dashboard=None,
|
|
1011
|
+
state_db=state_db,
|
|
1012
|
+
transport=connected_transport,
|
|
1013
|
+
contexts_dir=config.paths.contexts,
|
|
1014
|
+
)
|
|
1015
|
+
|
|
1016
|
+
# Lock-conflict retry hook. The poller marks issues seen
|
|
1017
|
+
# BEFORE handle_issue runs, so a failed attempt would
|
|
1018
|
+
# permanently drop the issue. If run_dev_issue couldn't
|
|
1019
|
+
# acquire the per-repo lock (common when a scheduled
|
|
1020
|
+
# secops sweep is mid-run on the same repo), un-mark the
|
|
1021
|
+
# issue so the next poll picks it up. Any other failure
|
|
1022
|
+
# still stays seen — those aren't transient.
|
|
1023
|
+
if (
|
|
1024
|
+
not result.success
|
|
1025
|
+
and not result.blocked
|
|
1026
|
+
and result.error
|
|
1027
|
+
and "locked by another session" in result.error.lower()
|
|
1028
|
+
):
|
|
1029
|
+
poller.unmark_seen(repo, issue_number)
|
|
1030
|
+
console.print(
|
|
1031
|
+
f"[yellow]#{issue_number} in {repo}: repo locked "
|
|
1032
|
+
"(secops running?) — un-marked for retry next "
|
|
1033
|
+
"poll.[/yellow]"
|
|
1034
|
+
)
|
|
1035
|
+
if connected_transport:
|
|
1036
|
+
try:
|
|
1037
|
+
await transport.send(
|
|
1038
|
+
f"⏳ #{issue_number} in {repo} "
|
|
1039
|
+
"deferred (repo busy); will retry."
|
|
1040
|
+
)
|
|
1041
|
+
except Exception:
|
|
1042
|
+
pass
|
|
1043
|
+
return
|
|
1044
|
+
|
|
1045
|
+
# Spawn the PR watcher FIRST, before any best-effort
|
|
1046
|
+
# notification. The poller has already marked this issue
|
|
1047
|
+
# as seen in poller_state.json, so if a transient
|
|
1048
|
+
# transport.send failure below raised through the outer
|
|
1049
|
+
# finally, we'd permanently lose the watcher and the
|
|
1050
|
+
# issue would never auto-close on merge.
|
|
1051
|
+
pr_number_raw = result.outputs.get("pr_number")
|
|
1052
|
+
pr_url_str = result.outputs.get("pr_url", "")
|
|
1053
|
+
if pr_number_raw is not None:
|
|
1054
|
+
try:
|
|
1055
|
+
pr_number = int(pr_number_raw)
|
|
1056
|
+
except (TypeError, ValueError):
|
|
1057
|
+
pr_number = None
|
|
1058
|
+
if pr_number is not None:
|
|
1059
|
+
task = asyncio.create_task(
|
|
1060
|
+
pr_watch_task(
|
|
1061
|
+
repo=repo,
|
|
1062
|
+
issue_number=issue_number,
|
|
1063
|
+
pr_url=pr_url_str,
|
|
1064
|
+
pr_number=pr_number,
|
|
1065
|
+
session_id=result.session_id,
|
|
1066
|
+
github=github,
|
|
1067
|
+
transport_factory=_watch_transport_factory,
|
|
1068
|
+
)
|
|
1069
|
+
)
|
|
1070
|
+
pr_watch_tasks.add(task)
|
|
1071
|
+
task.add_done_callback(pr_watch_tasks.discard)
|
|
1072
|
+
|
|
1073
|
+
# Send result notification — best-effort. A failed send
|
|
1074
|
+
# must NOT prevent the merge watcher (spawned above)
|
|
1075
|
+
# from running, so swallow transport errors here.
|
|
1076
|
+
if connected_transport:
|
|
1077
|
+
try:
|
|
1078
|
+
if result.success:
|
|
1079
|
+
pr_url = result.outputs.get("pr_url", "")
|
|
1080
|
+
await transport.send(f"✅ PR ready: {pr_url}")
|
|
1081
|
+
elif result.blocked:
|
|
1082
|
+
await transport.send(
|
|
1083
|
+
f"⏸️ Blocked on #{issue_number}: {result.question}"
|
|
1084
|
+
)
|
|
1085
|
+
else:
|
|
1086
|
+
await transport.send(
|
|
1087
|
+
f"❌ Failed on #{issue_number}: "
|
|
1088
|
+
f"{result.error or result.summary}"
|
|
1089
|
+
)
|
|
1090
|
+
except Exception as e:
|
|
1091
|
+
console.print(
|
|
1092
|
+
f"[yellow]Transport error sending result: {e}[/yellow]"
|
|
1093
|
+
)
|
|
1094
|
+
finally:
|
|
1095
|
+
if connected_transport:
|
|
1096
|
+
await transport.close()
|
|
1097
|
+
|
|
1098
|
+
console.print(f"[green]Starting poller[/green] for {len(repo_names)} repo(s) as {username}")
|
|
1099
|
+
console.print(f" Interval: {interval}s | Press Ctrl+C to stop")
|
|
1100
|
+
|
|
1101
|
+
async def _run_scheduled_secops() -> None:
|
|
1102
|
+
"""Scheduler callback: run the secops sweep across all repos.
|
|
1103
|
+
|
|
1104
|
+
Shares the poller's open state_db, github, dispatcher, and
|
|
1105
|
+
worktree. Per-repo locks in the state DB prevent collisions
|
|
1106
|
+
with an in-flight dev pipeline (and the dev handler now
|
|
1107
|
+
retries on lock-conflict so issues aren't silently dropped).
|
|
1108
|
+
|
|
1109
|
+
Builds a fresh SocketTransport per run so blocked/failed
|
|
1110
|
+
results notify Telegram — the dashboard only pushes for
|
|
1111
|
+
successful runs, so without a transport here operators would
|
|
1112
|
+
lose visibility into scheduled failures.
|
|
1113
|
+
"""
|
|
1114
|
+
if not config.repos:
|
|
1115
|
+
return
|
|
1116
|
+
n_repos = len(config.repos)
|
|
1117
|
+
console.print(
|
|
1118
|
+
f"[dim]Scheduled secops: starting across {n_repos} repo(s)[/dim]"
|
|
1119
|
+
)
|
|
1120
|
+
|
|
1121
|
+
secops_transport = None
|
|
1122
|
+
if config.transport.type.value == "telegram" and config.transport.telegram:
|
|
1123
|
+
from ctrlrelay.transports import SocketTransport
|
|
1124
|
+
sock = config.transport.telegram.socket_path.expanduser().resolve()
|
|
1125
|
+
if sock.exists():
|
|
1126
|
+
try:
|
|
1127
|
+
candidate = SocketTransport(sock)
|
|
1128
|
+
await candidate.connect()
|
|
1129
|
+
secops_transport = candidate
|
|
1130
|
+
except Exception as e:
|
|
1131
|
+
console.print(
|
|
1132
|
+
f"[yellow]Scheduled secops: transport connect "
|
|
1133
|
+
f"failed ({e}) — running without notifications[/yellow]"
|
|
1134
|
+
)
|
|
1135
|
+
|
|
1136
|
+
# Tell the operator the sweep started so a long run isn't
|
|
1137
|
+
# silent. Without this, a 10-min sweep that ends with a
|
|
1138
|
+
# blocked-on-input result looks like "out of nowhere" pings.
|
|
1139
|
+
if secops_transport:
|
|
1140
|
+
try:
|
|
1141
|
+
await secops_transport.send(
|
|
1142
|
+
f"🔄 Scheduled secops: starting sweep across "
|
|
1143
|
+
f"{n_repos} repo(s)"
|
|
1144
|
+
)
|
|
1145
|
+
except Exception as e:
|
|
1146
|
+
console.print(
|
|
1147
|
+
f"[yellow]Scheduled secops: start-notify failed: "
|
|
1148
|
+
f"{e}[/yellow]"
|
|
1149
|
+
)
|
|
1150
|
+
|
|
1151
|
+
try:
|
|
1152
|
+
results = await run_secops_all(
|
|
1153
|
+
repos=config.repos,
|
|
1154
|
+
dispatcher=dispatcher,
|
|
1155
|
+
github=github,
|
|
1156
|
+
worktree=worktree,
|
|
1157
|
+
dashboard=scheduled_dashboard,
|
|
1158
|
+
state_db=state_db,
|
|
1159
|
+
transport=secops_transport,
|
|
1160
|
+
contexts_dir=config.paths.contexts,
|
|
1161
|
+
)
|
|
1162
|
+
ok = sum(1 for r in results if r.success)
|
|
1163
|
+
console.print(
|
|
1164
|
+
f"[dim]Scheduled secops: {ok}/{len(results)} succeeded[/dim]"
|
|
1165
|
+
)
|
|
1166
|
+
|
|
1167
|
+
# Per-repo notifications for the cases an operator must
|
|
1168
|
+
# act on. The aggregate "N blocked" message we used to
|
|
1169
|
+
# send was useless on its own — it didn't say which
|
|
1170
|
+
# repos blocked or what the question was. Fan out one
|
|
1171
|
+
# message per blocked or failed result with the actual
|
|
1172
|
+
# question/error and session id so the operator can
|
|
1173
|
+
# respond directly via the bridge.
|
|
1174
|
+
#
|
|
1175
|
+
# `run_secops_all` returns results in the same order as
|
|
1176
|
+
# the input `repos` list, so zip is safe — only repos
|
|
1177
|
+
# with successful lock-acquisition produce results.
|
|
1178
|
+
if secops_transport:
|
|
1179
|
+
try:
|
|
1180
|
+
for repo_cfg, result in zip(
|
|
1181
|
+
config.repos, results, strict=False
|
|
1182
|
+
):
|
|
1183
|
+
if result.blocked:
|
|
1184
|
+
question = (
|
|
1185
|
+
result.question or "(no question text)"
|
|
1186
|
+
)
|
|
1187
|
+
await secops_transport.send(
|
|
1188
|
+
f"⏸️ Scheduled secops blocked on "
|
|
1189
|
+
f"{repo_cfg.name}\n"
|
|
1190
|
+
f"Session: `{result.session_id}`\n"
|
|
1191
|
+
f"\n{question}"
|
|
1192
|
+
)
|
|
1193
|
+
elif not result.success:
|
|
1194
|
+
err = result.error or result.summary
|
|
1195
|
+
await secops_transport.send(
|
|
1196
|
+
f"❌ Scheduled secops failed on "
|
|
1197
|
+
f"{repo_cfg.name}\n"
|
|
1198
|
+
f"Session: `{result.session_id}`\n"
|
|
1199
|
+
f"\n{err}"
|
|
1200
|
+
)
|
|
1201
|
+
# Final at-a-glance summary — kept because it's
|
|
1202
|
+
# the single message the operator scans first.
|
|
1203
|
+
blocked_n = sum(1 for r in results if r.blocked)
|
|
1204
|
+
failed_n = sum(
|
|
1205
|
+
1 for r in results
|
|
1206
|
+
if not r.success and not r.blocked
|
|
1207
|
+
)
|
|
1208
|
+
if blocked_n or failed_n:
|
|
1209
|
+
parts = []
|
|
1210
|
+
if blocked_n:
|
|
1211
|
+
parts.append(f"{blocked_n} blocked")
|
|
1212
|
+
if failed_n:
|
|
1213
|
+
parts.append(f"{failed_n} failed")
|
|
1214
|
+
await secops_transport.send(
|
|
1215
|
+
f"📋 Scheduled secops sweep done: "
|
|
1216
|
+
f"{ok}/{len(results)} ok, "
|
|
1217
|
+
f"{', '.join(parts)}"
|
|
1218
|
+
)
|
|
1219
|
+
else:
|
|
1220
|
+
await secops_transport.send(
|
|
1221
|
+
f"✅ Scheduled secops sweep done: "
|
|
1222
|
+
f"{ok}/{len(results)} ok"
|
|
1223
|
+
)
|
|
1224
|
+
except Exception as e:
|
|
1225
|
+
console.print(
|
|
1226
|
+
f"[yellow]Scheduled secops: notify failed: {e}[/yellow]"
|
|
1227
|
+
)
|
|
1228
|
+
finally:
|
|
1229
|
+
if secops_transport:
|
|
1230
|
+
try:
|
|
1231
|
+
await secops_transport.close()
|
|
1232
|
+
except Exception:
|
|
1233
|
+
pass
|
|
1234
|
+
|
|
1235
|
+
async def _main() -> None:
|
|
1236
|
+
# Register + start the scheduler FIRST, before any potentially
|
|
1237
|
+
# slow startup work. Otherwise a 6am fire that lands during
|
|
1238
|
+
# seed_current's per-repo GitHub calls would be lost —
|
|
1239
|
+
# APScheduler's misfire_grace_time only rescues fires that
|
|
1240
|
+
# happened AFTER the job was registered.
|
|
1241
|
+
scheduler = make_scheduler(timezone=config.timezone)
|
|
1242
|
+
scheduler.add_cron_job(
|
|
1243
|
+
name="secops",
|
|
1244
|
+
cron_expr=config.schedules.secops_cron,
|
|
1245
|
+
func=_run_scheduled_secops,
|
|
1246
|
+
)
|
|
1247
|
+
scheduler.start()
|
|
1248
|
+
console.print(
|
|
1249
|
+
f"[dim]Scheduler: secops cron={config.schedules.secops_cron} "
|
|
1250
|
+
f"tz={config.timezone}[/dim]"
|
|
1251
|
+
)
|
|
1252
|
+
|
|
1253
|
+
# Now the slow startup: first-run seeding (one gh call per
|
|
1254
|
+
# repo). Done inside _main so the scheduler is already up.
|
|
1255
|
+
if first_run:
|
|
1256
|
+
console.print(
|
|
1257
|
+
"[dim]First run: seeding with current assignments..."
|
|
1258
|
+
"[/dim]"
|
|
1259
|
+
)
|
|
1260
|
+
await poller.seed_current()
|
|
1261
|
+
|
|
1262
|
+
try:
|
|
1263
|
+
await run_poll_loop(
|
|
1264
|
+
poller=poller, handler=handle_issue, interval=interval,
|
|
1265
|
+
)
|
|
1266
|
+
finally:
|
|
1267
|
+
# Scheduler shutdown is async so it can cancel + await any
|
|
1268
|
+
# in-flight job tasks (e.g. a scheduled secops sweep that was
|
|
1269
|
+
# running when SIGTERM arrived). Without awaiting here the
|
|
1270
|
+
# loop closes before jobs finalize — state_db locks stay
|
|
1271
|
+
# held and worktrees stay dirty.
|
|
1272
|
+
await scheduler.shutdown()
|
|
1273
|
+
# Cancel any in-flight PR watchers so a poller stop/restart
|
|
1274
|
+
# doesn't leak asyncio tasks. Their handlers log
|
|
1275
|
+
# dev.pr.watch_cancelled and close their transport via the
|
|
1276
|
+
# finally block.
|
|
1277
|
+
if pr_watch_tasks:
|
|
1278
|
+
for t in list(pr_watch_tasks):
|
|
1279
|
+
t.cancel()
|
|
1280
|
+
await asyncio.gather(*list(pr_watch_tasks), return_exceptions=True)
|
|
1281
|
+
|
|
1282
|
+
loop = asyncio.new_event_loop()
|
|
1283
|
+
try:
|
|
1284
|
+
asyncio.set_event_loop(loop)
|
|
1285
|
+
main_task = loop.create_task(_main())
|
|
1286
|
+
|
|
1287
|
+
def _handle_stop(sig: int) -> None:
|
|
1288
|
+
main_task.cancel()
|
|
1289
|
+
|
|
1290
|
+
for sig in (signal.SIGTERM, signal.SIGINT):
|
|
1291
|
+
loop.add_signal_handler(sig, _handle_stop, sig)
|
|
1292
|
+
|
|
1293
|
+
try:
|
|
1294
|
+
loop.run_until_complete(main_task)
|
|
1295
|
+
except asyncio.CancelledError:
|
|
1296
|
+
console.print("\n[yellow]Poller stopped.[/yellow]")
|
|
1297
|
+
finally:
|
|
1298
|
+
loop.close()
|
|
1299
|
+
state_db.close()
|
|
1300
|
+
finally:
|
|
1301
|
+
pid_file.unlink(missing_ok=True)
|
|
1302
|
+
|
|
1303
|
+
|
|
1304
|
+
@poller_app.command("stop")
|
|
1305
|
+
def poller_stop(
|
|
1306
|
+
config_path: str = typer.Option(
|
|
1307
|
+
"config/orchestrator.yaml",
|
|
1308
|
+
"--config",
|
|
1309
|
+
"-c",
|
|
1310
|
+
help="Path to orchestrator.yaml",
|
|
1311
|
+
),
|
|
1312
|
+
) -> None:
|
|
1313
|
+
"""Stop the issue poller."""
|
|
1314
|
+
import os
|
|
1315
|
+
import signal
|
|
1316
|
+
|
|
1317
|
+
pid_file = _get_poller_pid_file(config_path)
|
|
1318
|
+
|
|
1319
|
+
if not pid_file.exists():
|
|
1320
|
+
console.print("[yellow]Poller not running (no PID file)[/yellow]")
|
|
1321
|
+
return
|
|
1322
|
+
|
|
1323
|
+
try:
|
|
1324
|
+
pid = int(pid_file.read_text().strip())
|
|
1325
|
+
os.kill(pid, signal.SIGTERM)
|
|
1326
|
+
console.print(f"[green]Stopped poller (PID {pid})[/green]")
|
|
1327
|
+
pid_file.unlink(missing_ok=True)
|
|
1328
|
+
except ProcessLookupError:
|
|
1329
|
+
console.print("[yellow]Poller process not found[/yellow]")
|
|
1330
|
+
pid_file.unlink(missing_ok=True)
|
|
1331
|
+
except ValueError:
|
|
1332
|
+
console.print("[red]Invalid PID file[/red]")
|
|
1333
|
+
pid_file.unlink(missing_ok=True)
|
|
1334
|
+
|
|
1335
|
+
|
|
1336
|
+
@poller_app.command("status")
|
|
1337
|
+
def poller_status(
|
|
1338
|
+
config_path: str = typer.Option(
|
|
1339
|
+
"config/orchestrator.yaml",
|
|
1340
|
+
"--config",
|
|
1341
|
+
"-c",
|
|
1342
|
+
help="Path to orchestrator.yaml",
|
|
1343
|
+
),
|
|
1344
|
+
) -> None:
|
|
1345
|
+
"""Check poller status."""
|
|
1346
|
+
import os
|
|
1347
|
+
|
|
1348
|
+
pid_file = _get_poller_pid_file(config_path)
|
|
1349
|
+
|
|
1350
|
+
if pid_file.exists():
|
|
1351
|
+
try:
|
|
1352
|
+
pid = int(pid_file.read_text().strip())
|
|
1353
|
+
os.kill(pid, 0)
|
|
1354
|
+
console.print(f"[green]Poller running (PID {pid})[/green]")
|
|
1355
|
+
return
|
|
1356
|
+
except (ProcessLookupError, ValueError):
|
|
1357
|
+
pass
|
|
1358
|
+
|
|
1359
|
+
console.print("[dim]Poller not running[/dim]")
|
|
1360
|
+
raise typer.Exit(1)
|
|
1361
|
+
|
|
1362
|
+
|
|
1363
|
+
@app.command("version")
|
|
1364
|
+
def version() -> None:
|
|
1365
|
+
"""Print the package version."""
|
|
1366
|
+
console.print(__version__)
|
|
1367
|
+
|
|
1368
|
+
|
|
1369
|
+
@app.command("status")
|
|
1370
|
+
def status(
|
|
1371
|
+
config_path: str = typer.Option(
|
|
1372
|
+
"config/orchestrator.yaml",
|
|
1373
|
+
"--config",
|
|
1374
|
+
"-c",
|
|
1375
|
+
help="Path to orchestrator.yaml",
|
|
1376
|
+
),
|
|
1377
|
+
) -> None:
|
|
1378
|
+
"""Show orchestrator status and active sessions."""
|
|
1379
|
+
from ctrlrelay.core.state import StateDB
|
|
1380
|
+
|
|
1381
|
+
try:
|
|
1382
|
+
config = load_config(config_path)
|
|
1383
|
+
except ConfigError as e:
|
|
1384
|
+
console.print(f"[red]Error loading config:[/red] {e}")
|
|
1385
|
+
raise typer.Exit(1)
|
|
1386
|
+
|
|
1387
|
+
db_path = config.paths.state_db
|
|
1388
|
+
if not db_path.exists():
|
|
1389
|
+
console.print(f"[yellow]State database not found at {db_path}[/yellow]")
|
|
1390
|
+
console.print("Run a pipeline first to initialize the database.")
|
|
1391
|
+
return
|
|
1392
|
+
|
|
1393
|
+
import sqlite3
|
|
1394
|
+
|
|
1395
|
+
try:
|
|
1396
|
+
db = StateDB(db_path)
|
|
1397
|
+
except sqlite3.Error as e:
|
|
1398
|
+
console.print(f"[red]Error opening database:[/red] {e}")
|
|
1399
|
+
raise typer.Exit(1)
|
|
1400
|
+
|
|
1401
|
+
try:
|
|
1402
|
+
# Show locks
|
|
1403
|
+
locks = db.list_locks()
|
|
1404
|
+
if locks:
|
|
1405
|
+
console.print("\n[bold]Active Locks:[/bold]")
|
|
1406
|
+
for lock in locks:
|
|
1407
|
+
console.print(f" • {lock['repo']} → session {lock['session_id']}")
|
|
1408
|
+
else:
|
|
1409
|
+
console.print("\n[dim]No active locks[/dim]")
|
|
1410
|
+
|
|
1411
|
+
# Show recent sessions
|
|
1412
|
+
rows = db.execute(
|
|
1413
|
+
"SELECT * FROM sessions ORDER BY started_at DESC LIMIT 5"
|
|
1414
|
+
).fetchall()
|
|
1415
|
+
|
|
1416
|
+
if rows:
|
|
1417
|
+
console.print("\n[bold]Recent Sessions:[/bold]")
|
|
1418
|
+
table = Table()
|
|
1419
|
+
table.add_column("ID", style="dim", max_width=12)
|
|
1420
|
+
table.add_column("Pipeline")
|
|
1421
|
+
table.add_column("Repo")
|
|
1422
|
+
table.add_column("Status")
|
|
1423
|
+
|
|
1424
|
+
for row in rows:
|
|
1425
|
+
status_style = {
|
|
1426
|
+
"done": "green",
|
|
1427
|
+
"failed": "red",
|
|
1428
|
+
"running": "yellow",
|
|
1429
|
+
"blocked": "blue",
|
|
1430
|
+
}.get(row["status"], "white")
|
|
1431
|
+
|
|
1432
|
+
table.add_row(
|
|
1433
|
+
row["id"][:12],
|
|
1434
|
+
row["pipeline"],
|
|
1435
|
+
row["repo"],
|
|
1436
|
+
f"[{status_style}]{row['status']}[/{status_style}]",
|
|
1437
|
+
)
|
|
1438
|
+
console.print(table)
|
|
1439
|
+
else:
|
|
1440
|
+
console.print("\n[dim]No sessions recorded yet[/dim]")
|
|
1441
|
+
except sqlite3.Error as e:
|
|
1442
|
+
console.print(f"[red]Database error:[/red] {e}")
|
|
1443
|
+
raise typer.Exit(1)
|
|
1444
|
+
finally:
|
|
1445
|
+
db.close()
|
|
1446
|
+
|
|
1447
|
+
|
|
1448
|
+
if __name__ == "__main__":
|
|
1449
|
+
app()
|