abstractgateway 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. abstractgateway/__init__.py +1 -2
  2. abstractgateway/__main__.py +7 -0
  3. abstractgateway/app.py +4 -4
  4. abstractgateway/cli.py +568 -8
  5. abstractgateway/config.py +15 -5
  6. abstractgateway/embeddings_config.py +45 -0
  7. abstractgateway/host_metrics.py +274 -0
  8. abstractgateway/hosts/bundle_host.py +528 -55
  9. abstractgateway/hosts/visualflow_host.py +30 -3
  10. abstractgateway/integrations/__init__.py +2 -0
  11. abstractgateway/integrations/email_bridge.py +782 -0
  12. abstractgateway/integrations/telegram_bridge.py +534 -0
  13. abstractgateway/maintenance/__init__.py +5 -0
  14. abstractgateway/maintenance/action_tokens.py +100 -0
  15. abstractgateway/maintenance/backlog_exec_runner.py +1592 -0
  16. abstractgateway/maintenance/backlog_parser.py +184 -0
  17. abstractgateway/maintenance/draft_generator.py +451 -0
  18. abstractgateway/maintenance/llm_assist.py +212 -0
  19. abstractgateway/maintenance/notifier.py +109 -0
  20. abstractgateway/maintenance/process_manager.py +1064 -0
  21. abstractgateway/maintenance/report_models.py +81 -0
  22. abstractgateway/maintenance/report_parser.py +219 -0
  23. abstractgateway/maintenance/text_similarity.py +123 -0
  24. abstractgateway/maintenance/triage.py +507 -0
  25. abstractgateway/maintenance/triage_queue.py +142 -0
  26. abstractgateway/migrate.py +155 -0
  27. abstractgateway/routes/__init__.py +2 -2
  28. abstractgateway/routes/gateway.py +10817 -179
  29. abstractgateway/routes/triage.py +118 -0
  30. abstractgateway/runner.py +689 -14
  31. abstractgateway/security/gateway_security.py +425 -110
  32. abstractgateway/service.py +213 -6
  33. abstractgateway/stores.py +64 -4
  34. abstractgateway/workflow_deprecations.py +225 -0
  35. abstractgateway-0.1.1.dist-info/METADATA +135 -0
  36. abstractgateway-0.1.1.dist-info/RECORD +40 -0
  37. abstractgateway-0.1.0.dist-info/METADATA +0 -101
  38. abstractgateway-0.1.0.dist-info/RECORD +0 -18
  39. {abstractgateway-0.1.0.dist-info → abstractgateway-0.1.1.dist-info}/WHEEL +0 -0
  40. {abstractgateway-0.1.0.dist-info → abstractgateway-0.1.1.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,1592 @@
1
+ from __future__ import annotations
2
+
3
+ import datetime
4
+ import json
5
+ import os
6
+ import re
7
+ import socket
8
+ import subprocess
9
+ import threading
10
+ import time
11
+ from urllib.parse import urlparse
12
+ from dataclasses import dataclass
13
+ from pathlib import Path
14
+ from typing import Any, Dict, List, Optional, Tuple
15
+
16
+ from .notifier import send_email_notification, send_telegram_notification
17
+
18
+
19
+ def _as_bool(raw: Any, default: bool) -> bool:
20
+ if raw is None:
21
+ return default
22
+ if isinstance(raw, bool):
23
+ return raw
24
+ s = str(raw).strip().lower()
25
+ if not s:
26
+ return default
27
+ if s in {"1", "true", "yes", "on"}:
28
+ return True
29
+ if s in {"0", "false", "no", "off"}:
30
+ return False
31
+ return default
32
+
33
+
34
+ def _as_int(raw: Any, default: int) -> int:
35
+ if raw is None:
36
+ return default
37
+ if isinstance(raw, bool):
38
+ return int(raw)
39
+ if isinstance(raw, int):
40
+ return raw
41
+ s = str(raw).strip()
42
+ if not s:
43
+ return default
44
+ try:
45
+ return int(s)
46
+ except Exception:
47
+ return default
48
+
49
+
50
+ def _now_iso() -> str:
51
+ return datetime.datetime.now().astimezone().isoformat()
52
+
53
+
54
+ def normalize_codex_model_id(raw: Any) -> str:
55
+ """Normalize Codex model ids for `codex exec --model <MODEL>`."""
56
+
57
+ return parse_codex_model_spec(raw)[0]
58
+
59
+
60
+ def normalize_codex_reasoning_effort(raw: Any) -> Optional[str]:
61
+ s = str(raw or "").strip().lower()
62
+ if not s:
63
+ return None
64
+ if s in {"low", "medium", "high", "xhigh"}:
65
+ return s
66
+ return None
67
+
68
+
69
+ def parse_codex_model_spec(raw: Any) -> Tuple[str, Optional[str]]:
70
+ """Parse a "model spec" into (model_id, reasoning_effort).
71
+
72
+ Supported forms:
73
+ - `gpt-5.2-xhigh` (historic effort suffix; Codex CLI rejects this as `--model`, so we split it)
74
+ - `gpt-5.2 (reasoning xhigh, ...)` (copy/paste from UI)
75
+ - `gpt-5.2` (no effort specified)
76
+ """
77
+
78
+ s = str(raw or "").strip()
79
+ if not s:
80
+ return "gpt-5.2", None
81
+
82
+ # Codex CLI expects model ids without spaces, so use the first token by default.
83
+ token = s.split(" ", 1)[0].strip()
84
+ model_id = token or "gpt-5.2"
85
+
86
+ low = model_id.lower()
87
+ effort: Optional[str] = None
88
+ for cand in ("xhigh", "high", "medium", "low"):
89
+ suffix = f"-{cand}"
90
+ if low.endswith(suffix):
91
+ model_id = model_id[: -len(suffix)].strip() or "gpt-5.2"
92
+ effort = cand
93
+ break
94
+
95
+ if effort is None:
96
+ # Try to recover effort from a descriptive string (best-effort).
97
+ m = re.search(r"(?:reasoning|effort)\s*[:=]?\s*(xhigh|high|medium|low)\b", s, flags=re.IGNORECASE)
98
+ if m:
99
+ effort = normalize_codex_reasoning_effort(m.group(1))
100
+
101
+ return (model_id.strip() or "gpt-5.2"), effort
102
+
103
+
104
+ def _triage_repo_root_from_env() -> Optional[Path]:
105
+ raw = str(os.getenv("ABSTRACTGATEWAY_TRIAGE_REPO_ROOT") or os.getenv("ABSTRACT_TRIAGE_REPO_ROOT") or "").strip()
106
+ if not raw:
107
+ return None
108
+ try:
109
+ return Path(raw).expanduser().resolve()
110
+ except Exception:
111
+ return None
112
+
113
+
114
+ def _backlog_exec_run_id(request_id: str) -> str:
115
+ rid = str(request_id or "").strip()
116
+ return f"backlog_exec_{rid}" if rid else "backlog_exec"
117
+
118
+
119
+ def _safe_relpath_from_repo_root(*, repo_root: Path, path: Path) -> Optional[str]:
120
+ try:
121
+ rel = path.resolve().relative_to(repo_root.resolve())
122
+ except Exception:
123
+ return None
124
+ return str(rel).replace("\\", "/")
125
+
126
+
127
+ def _tail_text(path: Path, *, max_bytes: int = 12_000) -> str:
128
+ try:
129
+ with open(path, "rb") as f:
130
+ f.seek(0, os.SEEK_END)
131
+ size = int(f.tell() or 0)
132
+ start = max(0, size - int(max_bytes))
133
+ f.seek(start, os.SEEK_SET)
134
+ data = f.read(int(max_bytes))
135
+ return data.decode("utf-8", errors="replace")
136
+ except Exception:
137
+ return ""
138
+
139
+
140
+ def _probe_service_url(url: str, *, timeout_s: float = 0.9) -> Dict[str, Any]:
141
+ s = str(url or "").strip()
142
+ if not s:
143
+ return {"ok": False, "error": "missing_url"}
144
+ try:
145
+ u = urlparse(s)
146
+ except Exception:
147
+ return {"ok": False, "error": "invalid_url"}
148
+ host = str(u.hostname or "localhost").strip() or "localhost"
149
+ port = int(u.port or (443 if (u.scheme or "").lower() == "https" else 80))
150
+ try:
151
+ with socket.create_connection((host, port), timeout=float(timeout_s)):
152
+ return {"ok": True, "host": host, "port": port}
153
+ except Exception as e:
154
+ return {"ok": False, "host": host, "port": port, "error": str(e)}
155
+
156
+
157
+ def _repo_pythonpath_entries(repo_root: Path) -> List[str]:
158
+ """Best-effort PYTHONPATH entries to prefer repo code over editable installs.
159
+
160
+ Many packages use a `src/` layout; some (e.g., abstractcode) do not.
161
+ We include:
162
+ - <pkg>/src when present
163
+ - otherwise <pkg> when it contains a pyproject.toml
164
+ """
165
+ root = Path(repo_root).resolve()
166
+ out: List[str] = []
167
+ try:
168
+ for child in sorted(root.iterdir(), key=lambda p: p.name):
169
+ if not child.is_dir():
170
+ continue
171
+ if child.name.startswith("."):
172
+ continue
173
+ if not (child / "pyproject.toml").exists():
174
+ continue
175
+ src = child / "src"
176
+ if src.is_dir():
177
+ out.append(str(src))
178
+ else:
179
+ out.append(str(child))
180
+ except Exception:
181
+ return []
182
+ return out
183
+
184
+
185
+ def _build_pythonpath_for_repo(*, repo_root: Path, base_env: Optional[Dict[str, str]] = None) -> str:
186
+ env = base_env or os.environ
187
+ existing = str(env.get("PYTHONPATH") or "").strip()
188
+ parts = [p for p in _repo_pythonpath_entries(repo_root) if p]
189
+ if existing:
190
+ parts.append(existing)
191
+ # De-dupe preserving order.
192
+ seen: set[str] = set()
193
+ deduped: List[str] = []
194
+ for p in parts:
195
+ ps = str(p).strip()
196
+ if not ps or ps in seen:
197
+ continue
198
+ seen.add(ps)
199
+ deduped.append(ps)
200
+ return os.pathsep.join(deduped)
201
+
202
+
203
+ def _try_symlink(*, src: Path, dst: Path) -> None:
204
+ try:
205
+ if not src.exists():
206
+ return
207
+ if dst.exists() or dst.is_symlink():
208
+ return
209
+ dst.parent.mkdir(parents=True, exist_ok=True)
210
+ dst.symlink_to(src)
211
+ except Exception:
212
+ pass
213
+
214
+
215
+ def _default_uat_root(repo_root: Path) -> Path:
216
+ return (Path(repo_root).resolve() / "untracked" / "backlog_exec_uat").resolve()
217
+
218
+
219
+ def _uat_root_from_env(*, repo_root: Path) -> Path:
220
+ raw = str(os.getenv("ABSTRACTGATEWAY_BACKLOG_EXEC_UAT_DIR") or os.getenv("ABSTRACT_BACKLOG_EXEC_UAT_DIR") or "").strip()
221
+ if not raw:
222
+ return _default_uat_root(repo_root)
223
+ try:
224
+ p = Path(raw).expanduser()
225
+ if not p.is_absolute():
226
+ p = (Path(repo_root).resolve() / p).resolve()
227
+ else:
228
+ p = p.resolve()
229
+ return p
230
+ except Exception:
231
+ return _default_uat_root(repo_root)
232
+
233
+
234
+ def _ensure_uat_workspace(*, repo_root: Path, request_id: str) -> Optional[Path]:
235
+ """Create (or reuse) a candidate workspace for this request.
236
+
237
+ The AbstractFramework root is a *workspace of repos* (no root `.git/`), so we
238
+ cannot create a single worktree at repo_root.
239
+
240
+ v0 approach (multi-repo):
241
+ - Create <repo_root>/untracked/backlog_exec_uat/workspaces/<request_id>/
242
+ - For each top-level repo dir with a `.git/`, create a detached worktree:
243
+ git -C <repo> worktree add --detach <candidate>/<repo_name> HEAD
244
+ - Best-effort symlink shared caches (node_modules, .venv) to avoid re-install overhead
245
+ """
246
+ rid = str(request_id or "").strip()
247
+ if not rid:
248
+ return None
249
+
250
+ uat_root = _uat_root_from_env(repo_root=repo_root)
251
+ ws_root = (uat_root / "workspaces").resolve()
252
+ ws_root.mkdir(parents=True, exist_ok=True)
253
+
254
+ ws = (ws_root / rid).resolve()
255
+ if ws.exists():
256
+ return ws
257
+
258
+ try:
259
+ ws.mkdir(parents=True, exist_ok=True)
260
+ except Exception:
261
+ return None
262
+
263
+ rr = Path(repo_root).resolve()
264
+
265
+ def _is_git_repo_dir(p: Path) -> bool:
266
+ try:
267
+ g = (p / ".git").resolve()
268
+ except Exception:
269
+ g = p / ".git"
270
+ return g.exists()
271
+
272
+ repos: List[Path] = []
273
+ try:
274
+ for child in sorted(rr.iterdir(), key=lambda p: p.name):
275
+ if not child.is_dir():
276
+ continue
277
+ if child.name.startswith("."):
278
+ continue
279
+ if not _is_git_repo_dir(child):
280
+ continue
281
+ repos.append(child)
282
+ except Exception:
283
+ return None
284
+
285
+ required = {"abstractgateway", "abstractobserver", "abstractcode", "abstractflow", "docs"}
286
+ required_present = {p.name for p in repos if p.name in required}
287
+
288
+ # Create required repos first (we need them to render a usable UAT stack).
289
+ def _worktree_add(repo: Path) -> bool:
290
+ dst = (ws / repo.name).resolve()
291
+ if dst.exists():
292
+ return True
293
+ try:
294
+ subprocess.run(
295
+ ["git", "-C", str(repo), "worktree", "add", "--detach", str(dst), "HEAD"],
296
+ check=True,
297
+ stdout=subprocess.DEVNULL,
298
+ stderr=subprocess.DEVNULL,
299
+ timeout=90.0,
300
+ )
301
+ return True
302
+ except Exception:
303
+ return False
304
+
305
+ for repo in repos:
306
+ if repo.name in required_present:
307
+ if not _worktree_add(repo):
308
+ return None
309
+
310
+ # Best-effort: remaining repos (non-fatal).
311
+ for repo in repos:
312
+ if repo.name in required_present:
313
+ continue
314
+ _worktree_add(repo)
315
+
316
+ # Best-effort: reuse repo-local caches to make UAT worktrees runnable without reinstall.
317
+ _try_symlink(src=(rr / ".venv").resolve(), dst=(ws / ".venv").resolve())
318
+ _try_symlink(src=(rr / "abstractobserver" / "node_modules").resolve(), dst=(ws / "abstractobserver" / "node_modules").resolve())
319
+ _try_symlink(src=(rr / "abstractcode" / "web" / "node_modules").resolve(), dst=(ws / "abstractcode" / "web" / "node_modules").resolve())
320
+ _try_symlink(src=(rr / "abstractflow" / "web" / "frontend" / "node_modules").resolve(), dst=(ws / "abstractflow" / "web" / "frontend" / "node_modules").resolve())
321
+
322
+ return ws
323
+
324
+
325
+ _VALID_EXEC_MODE_RE = re.compile(r"^(uat|candidate|inplace)$", re.IGNORECASE)
326
+
327
+
328
+ def _default_exec_mode() -> str:
329
+ raw = str(os.getenv("ABSTRACTGATEWAY_BACKLOG_EXEC_MODE") or os.getenv("ABSTRACT_BACKLOG_EXEC_MODE") or "").strip().lower()
330
+ if raw and _VALID_EXEC_MODE_RE.match(raw):
331
+ return "uat" if raw in {"uat", "candidate"} else "inplace"
332
+ # Safer default: do not mutate prod repo unless explicitly requested.
333
+ return "uat"
334
+
335
+
336
+ def _is_uat_mode(mode: str) -> bool:
337
+ m = str(mode or "").strip().lower()
338
+ return m in {"uat", "candidate"}
339
+
340
+
341
+ def _is_safe_repo_relpath(path: str) -> bool:
342
+ p = str(path or "").replace("\\", "/").strip()
343
+ if not p:
344
+ return False
345
+ if p.startswith("/") or p.startswith("\\"):
346
+ return False
347
+ # Windows drive/path style.
348
+ if re.match(r"^[a-zA-Z]:[/\\\\]", p):
349
+ return False
350
+ parts = [seg for seg in p.split("/") if seg]
351
+ if not parts:
352
+ return False
353
+ for seg in parts:
354
+ if seg in {".", ".."}:
355
+ return False
356
+ return True
357
+
358
+
359
+ def _write_candidate_git_diff_patch(*, candidate_root: Path, out_path: Path) -> bool:
360
+ """Write a best-effort git patch for a candidate workspace.
361
+
362
+ - If candidate_root is a git repo, we `git diff` directly.
363
+ - If candidate_root is a multi-repo workspace, we concatenate per-repo diffs and
364
+ prefix paths with the repo folder (so the patch is readable at the workspace root).
365
+ """
366
+ try:
367
+ out_path.parent.mkdir(parents=True, exist_ok=True)
368
+ except Exception:
369
+ return False
370
+ try:
371
+ root = Path(candidate_root).resolve()
372
+ if (root / ".git").exists():
373
+ with open(out_path, "wb") as f:
374
+ subprocess.run(
375
+ ["git", "-C", str(root), "diff", "--binary", "--no-color"],
376
+ check=False,
377
+ stdout=f,
378
+ stderr=subprocess.DEVNULL,
379
+ timeout=30.0,
380
+ )
381
+ return out_path.exists()
382
+
383
+ repos: List[Path] = []
384
+ for child in sorted(root.iterdir(), key=lambda p: p.name):
385
+ if not child.is_dir():
386
+ continue
387
+ if child.name.startswith("."):
388
+ continue
389
+ if not (child / ".git").exists():
390
+ continue
391
+ repos.append(child)
392
+
393
+ wrote_any = False
394
+ with open(out_path, "wb") as f:
395
+ for repo in repos:
396
+ # Prefix the diff paths so the patch is meaningful at the workspace root.
397
+ src_prefix = f"a/{repo.name}/"
398
+ dst_prefix = f"b/{repo.name}/"
399
+ proc = subprocess.run(
400
+ [
401
+ "git",
402
+ "-C",
403
+ str(repo),
404
+ "diff",
405
+ "--binary",
406
+ "--no-color",
407
+ f"--src-prefix={src_prefix}",
408
+ f"--dst-prefix={dst_prefix}",
409
+ ],
410
+ check=False,
411
+ stdout=subprocess.PIPE,
412
+ stderr=subprocess.DEVNULL,
413
+ timeout=30.0,
414
+ )
415
+ if proc.stdout:
416
+ f.write(proc.stdout)
417
+ if not proc.stdout.endswith(b"\n"):
418
+ f.write(b"\n")
419
+ wrote_any = True
420
+ return out_path.exists() and wrote_any
421
+ except Exception:
422
+ return False
423
+
424
+
425
+ _BACKLOG_ID_FROM_FILENAME_RE = re.compile(r"^(?P<id>\d+)-")
426
+
427
+
428
+ def _maybe_fix_backlog_move_in_candidate(*, candidate_root: Path, req: Dict[str, Any]) -> Dict[str, Any]:
429
+ """Best-effort enforce the 'planned -> completed' move inside a candidate workspace.
430
+
431
+ Some agent runs create the completed backlog file but forget to delete the planned one.
432
+ That results in duplicated backlog items (both Planned and Completed) after promotion.
433
+
434
+ Safety constraints:
435
+ - Only touches a planned backlog item under the `docs/` repo.
436
+ - Only deletes the planned file when a completed file for the same numeric id exists.
437
+ """
438
+ out: Dict[str, Any] = {"ok": False, "action": "none"}
439
+ backlog = req.get("backlog") if isinstance(req.get("backlog"), dict) else {}
440
+ relpath = str(backlog.get("relpath") or "").replace("\\\\", "/").strip()
441
+ if not relpath:
442
+ out["reason"] = "missing_relpath"
443
+ return out
444
+ if not relpath.startswith("docs/backlog/planned/"):
445
+ out["reason"] = "not_planned_docs_backlog"
446
+ return out
447
+
448
+ planned_name = relpath.split("/")[-1]
449
+ m = _BACKLOG_ID_FROM_FILENAME_RE.match(planned_name)
450
+ if not m:
451
+ out["reason"] = "unparseable_id"
452
+ return out
453
+ item_id = str(m.group("id") or "").strip()
454
+ if not item_id:
455
+ out["reason"] = "missing_id"
456
+ return out
457
+
458
+ root = Path(candidate_root).resolve()
459
+ planned_path = (root / relpath).resolve()
460
+ try:
461
+ planned_path.relative_to(root)
462
+ except Exception:
463
+ out["reason"] = "unsafe_planned_path"
464
+ return out
465
+
466
+ completed_dir = (root / "docs" / "backlog" / "completed").resolve()
467
+ try:
468
+ completed_dir.relative_to(root)
469
+ except Exception:
470
+ out["reason"] = "unsafe_completed_dir"
471
+ return out
472
+
473
+ matches: List[Path] = []
474
+ try:
475
+ if completed_dir.exists():
476
+ matches = sorted(completed_dir.glob(f"{item_id}-*.md"))
477
+ except Exception:
478
+ matches = []
479
+
480
+ if not matches:
481
+ out["reason"] = "no_completed_file"
482
+ return out
483
+
484
+ if not planned_path.exists():
485
+ out["ok"] = True
486
+ out["reason"] = "planned_missing"
487
+ out["completed_relpath"] = str(matches[0].relative_to(root)).replace("\\\\", "/")
488
+ return out
489
+
490
+ try:
491
+ planned_path.unlink()
492
+ except Exception as e:
493
+ out["reason"] = f"unlink_failed: {e}"
494
+ return out
495
+
496
+ out.update(
497
+ {
498
+ "ok": True,
499
+ "action": "deleted_planned_duplicate",
500
+ "planned_relpath": relpath,
501
+ "completed_relpath": str(matches[0].relative_to(root)).replace("\\\\", "/"),
502
+ }
503
+ )
504
+ return out
505
+
506
+
507
+ def _candidate_manifest_path(run_dir: Path) -> Path:
508
+ return (Path(run_dir).resolve() / "candidate_manifest.json").resolve()
509
+
510
+
511
+ _SKIP_UNTRACKED_DIRS = {
512
+ ".git",
513
+ ".hg",
514
+ ".svn",
515
+ ".venv",
516
+ "venv",
517
+ "node_modules",
518
+ "__pycache__",
519
+ ".pytest_cache",
520
+ ".mypy_cache",
521
+ ".ruff_cache",
522
+ ".cache",
523
+ "dist",
524
+ "build",
525
+ ".next",
526
+ ".turbo",
527
+ "runtime",
528
+ "logs",
529
+ "untracked",
530
+ }
531
+
532
+
533
+ def _git_stdout(*, cwd: Path, args: List[str], timeout_s: float) -> str:
534
+ try:
535
+ proc = subprocess.run(
536
+ ["git", *args],
537
+ cwd=str(Path(cwd).resolve()),
538
+ stdout=subprocess.PIPE,
539
+ stderr=subprocess.DEVNULL,
540
+ text=True,
541
+ timeout=float(timeout_s),
542
+ check=False,
543
+ )
544
+ except Exception:
545
+ return ""
546
+ return str(proc.stdout or "")
547
+
548
+
549
+ def _collect_repo_candidate_manifest(repo_dir: Path) -> Dict[str, Any]:
550
+ repo = Path(repo_dir).resolve()
551
+ name = repo.name
552
+
553
+ copy_paths: set[str] = set()
554
+ delete_paths: set[str] = set()
555
+ skipped: List[Dict[str, str]] = []
556
+
557
+ diff = _git_stdout(cwd=repo, args=["diff", "--name-status"], timeout_s=30.0)
558
+ for raw in diff.splitlines():
559
+ line = raw.strip("\n").rstrip("\r")
560
+ if not line:
561
+ continue
562
+ parts = line.split("\t")
563
+ if not parts:
564
+ continue
565
+ st = str(parts[0] or "").strip()
566
+ if not st:
567
+ continue
568
+
569
+ def _add_copy(p: str) -> None:
570
+ ps = str(p or "").strip()
571
+ if not _is_safe_repo_relpath(ps):
572
+ skipped.append({"path": ps, "reason": "unsafe_path"})
573
+ return
574
+ copy_paths.add(ps.replace("\\", "/"))
575
+
576
+ def _add_delete(p: str) -> None:
577
+ ps = str(p or "").strip()
578
+ if not _is_safe_repo_relpath(ps):
579
+ skipped.append({"path": ps, "reason": "unsafe_path"})
580
+ return
581
+ delete_paths.add(ps.replace("\\", "/"))
582
+
583
+ if st.startswith("R") and len(parts) >= 3:
584
+ _add_delete(parts[1])
585
+ _add_copy(parts[2])
586
+ continue
587
+ if st.startswith("C") and len(parts) >= 3:
588
+ _add_copy(parts[2])
589
+ continue
590
+
591
+ code = st[0]
592
+ if code in {"M", "A"} and len(parts) >= 2:
593
+ _add_copy(parts[1])
594
+ elif code == "D" and len(parts) >= 2:
595
+ _add_delete(parts[1])
596
+ else:
597
+ # Unknown / ignored.
598
+ continue
599
+
600
+ untracked = _git_stdout(cwd=repo, args=["ls-files", "--others", "--exclude-standard"], timeout_s=30.0)
601
+ for raw in untracked.splitlines():
602
+ p = str(raw or "").strip()
603
+ if not p:
604
+ continue
605
+ p = p.replace("\\", "/")
606
+ if not _is_safe_repo_relpath(p):
607
+ skipped.append({"path": p, "reason": "unsafe_path"})
608
+ continue
609
+ segs = [s for s in p.split("/") if s]
610
+ if any(s in _SKIP_UNTRACKED_DIRS for s in segs):
611
+ skipped.append({"path": p, "reason": "skipped_untracked_dir"})
612
+ continue
613
+ copy_paths.add(p)
614
+
615
+ # If a path is deleted, don't also try to copy it.
616
+ copy_paths.difference_update(delete_paths)
617
+
618
+ return {
619
+ "repo": name,
620
+ "repo_relpath": name,
621
+ "copy": sorted(copy_paths),
622
+ "delete": sorted(delete_paths),
623
+ "skipped": skipped[:500],
624
+ }
625
+
626
+
627
+ def _write_candidate_manifest(*, candidate_root: Path, run_dir: Path, request_id: str, candidate_relpath: str) -> bool:
628
+ try:
629
+ run_dir.mkdir(parents=True, exist_ok=True)
630
+ except Exception:
631
+ return False
632
+
633
+ root = Path(candidate_root).resolve()
634
+ repos: List[Path] = []
635
+ try:
636
+ for child in sorted(root.iterdir(), key=lambda p: p.name):
637
+ if not child.is_dir():
638
+ continue
639
+ if child.name.startswith("."):
640
+ continue
641
+ if not (child / ".git").exists():
642
+ continue
643
+ repos.append(child)
644
+ except Exception:
645
+ return False
646
+
647
+ repoman: List[Dict[str, Any]] = []
648
+ for repo in repos:
649
+ try:
650
+ repoman.append(_collect_repo_candidate_manifest(repo))
651
+ except Exception:
652
+ continue
653
+
654
+ out_path = _candidate_manifest_path(run_dir)
655
+ obj = {
656
+ "version": 1,
657
+ "created_at": _now_iso(),
658
+ "request_id": str(request_id),
659
+ "candidate_relpath": str(candidate_relpath or ""),
660
+ "repos": repoman,
661
+ }
662
+ try:
663
+ out_path.write_text(json.dumps(obj, ensure_ascii=False, indent=2, sort_keys=True) + "\n", encoding="utf-8")
664
+ return out_path.exists()
665
+ except Exception:
666
+ return False
667
+
668
+
669
+ def _latest_feedback_text(req: Dict[str, Any]) -> str:
670
+ fb = req.get("feedback")
671
+ if not isinstance(fb, list) or not fb:
672
+ return ""
673
+ last = fb[-1]
674
+ if isinstance(last, dict):
675
+ return str(last.get("text") or "")
676
+ return str(last or "")
677
+
678
+
679
+ def _prompt_with_feedback(prompt: str, *, req: Dict[str, Any]) -> str:
680
+ base = str(prompt or "").strip()
681
+ fb = _latest_feedback_text(req).strip()
682
+ if not fb:
683
+ return base
684
+ if len(fb) > 20_000:
685
+ fb = fb[:20_000].rstrip() + "\n…(truncated)…"
686
+ attempt_raw = req.get("attempt")
687
+ try:
688
+ attempt = int(attempt_raw) if attempt_raw is not None else None
689
+ except Exception:
690
+ attempt = None
691
+ suffix = "\n\n---\n"
692
+ suffix += "Operator QA feedback"
693
+ if attempt is not None:
694
+ suffix += f" (attempt {attempt})"
695
+ suffix += ":\n"
696
+ suffix += fb
697
+ suffix += "\n---\n"
698
+ return base + suffix
699
+
700
+
701
+ def _build_gateway_stores(*, gateway_data_dir: Path):
702
+ # Lazy import: keep this module usable in minimal contexts.
703
+ from ..config import GatewayHostConfig # type: ignore
704
+ from ..stores import build_file_stores, build_sqlite_stores # type: ignore
705
+
706
+ base = Path(gateway_data_dir).expanduser().resolve()
707
+ cfg = GatewayHostConfig.from_env()
708
+ backend = str(getattr(cfg, "store_backend", "file") or "file").strip().lower() or "file"
709
+ if backend == "sqlite":
710
+ return build_sqlite_stores(base_dir=base, db_path=getattr(cfg, "db_path", None))
711
+ return build_file_stores(base_dir=base)
712
+
713
+
714
+ def _store_backlog_exec_logs_to_ledger(
715
+ *,
716
+ gateway_data_dir: Path,
717
+ request_id: str,
718
+ req: Dict[str, Any],
719
+ prompt: str,
720
+ run_dir: Path,
721
+ result: Dict[str, Any],
722
+ ) -> Dict[str, Any]:
723
+ """Persist backlog execution logs as a durable run+ledger entry.
724
+
725
+ Contract:
726
+ - Never raise (best-effort). Caller should still record completion in request JSON.
727
+ - Store the full logs as artifacts and reference them from both RunState.vars and a ledger StepRecord.
728
+ """
729
+
730
+ try:
731
+ stores = _build_gateway_stores(gateway_data_dir=gateway_data_dir)
732
+ except Exception:
733
+ return {}
734
+
735
+ from abstractruntime.core.models import RunState, RunStatus, StepRecord, StepStatus # type: ignore
736
+ from abstractruntime.storage.artifacts import artifact_ref # type: ignore
737
+
738
+ rid = _backlog_exec_run_id(request_id)
739
+ now = _now_iso()
740
+
741
+ backlog = req.get("backlog") if isinstance(req.get("backlog"), dict) else {}
742
+ backlog_queue = req.get("backlog_queue") if isinstance(req.get("backlog_queue"), dict) else {}
743
+ backlog_kind = str(backlog.get("kind") or "").strip()
744
+ backlog_filename = str(backlog.get("filename") or "").strip()
745
+ backlog_relpath = str(backlog.get("relpath") or "").strip()
746
+ item_id = None
747
+ try:
748
+ item_id = int(str(backlog_filename).split("-", 1)[0])
749
+ except Exception:
750
+ item_id = None
751
+
752
+ base_tags = {
753
+ "kind": "backlog_exec_log",
754
+ "request_id": str(request_id),
755
+ "backlog_kind": backlog_kind,
756
+ "backlog_filename": backlog_filename,
757
+ "backlog_relpath": backlog_relpath,
758
+ "backlog_item_id": str(item_id) if item_id is not None else "",
759
+ }
760
+ base_tags = {k: v for k, v in base_tags.items() if isinstance(v, str) and v.strip()}
761
+
762
+ def _store_file(*, path: Path, name: str, content_type: str) -> Optional[str]:
763
+ try:
764
+ if not path.exists():
765
+ return None
766
+ data = path.read_bytes()
767
+ meta = stores.artifact_store.store(data, content_type=content_type, run_id=rid, tags={**base_tags, "name": name})
768
+ return str(getattr(meta, "artifact_id", "") or "").strip() or None
769
+ except Exception:
770
+ return None
771
+
772
+ events_id = _store_file(path=(run_dir / "codex_events.jsonl").resolve(), name="events", content_type="application/jsonl")
773
+ stderr_id = _store_file(path=(run_dir / "codex_stderr.log").resolve(), name="stderr", content_type="text/plain")
774
+ last_id = _store_file(path=(run_dir / "codex_last_message.txt").resolve(), name="last_message", content_type="text/plain")
775
+ patch_id = _store_file(path=(run_dir / "candidate.patch").resolve(), name="candidate_patch", content_type="text/x-diff")
776
+ manifest_id = _store_file(path=(run_dir / "candidate_manifest.json").resolve(), name="candidate_manifest", content_type="application/json")
777
+
778
+ log_artifacts = {
779
+ "events": artifact_ref(events_id) if events_id else None,
780
+ "stderr": artifact_ref(stderr_id) if stderr_id else None,
781
+ "last_message": artifact_ref(last_id) if last_id else None,
782
+ "candidate_patch": artifact_ref(patch_id) if patch_id else None,
783
+ "candidate_manifest": artifact_ref(manifest_id) if manifest_id else None,
784
+ }
785
+
786
+ ok = bool(result.get("ok") is True)
787
+ status = RunStatus.COMPLETED if ok else RunStatus.FAILED
788
+ started_at = str(req.get("started_at") or result.get("started_at") or now)
789
+ finished_at = str(req.get("finished_at") or result.get("finished_at") or now)
790
+ err = str(result.get("error") or "").strip() or None
791
+
792
+ run_vars = {
793
+ "kind": "backlog_exec",
794
+ "request_id": str(request_id),
795
+ "backlog": backlog,
796
+ "backlog_queue": backlog_queue if backlog_queue else {},
797
+ "executor": req.get("executor") if isinstance(req.get("executor"), dict) else {},
798
+ "execution_mode": str(req.get("execution_mode") or ""),
799
+ "candidate_relpath": str(req.get("candidate_relpath") or ""),
800
+ "candidate_patch_relpath": str(req.get("candidate_patch_relpath") or ""),
801
+ "candidate_manifest_relpath": str(req.get("candidate_manifest_relpath") or ""),
802
+ "run_dir_relpath": str(req.get("run_dir_relpath") or ""),
803
+ "prompt": str(prompt or ""),
804
+ "result": {
805
+ "ok": ok,
806
+ "exit_code": result.get("exit_code"),
807
+ "error": err,
808
+ "log_artifacts": log_artifacts,
809
+ },
810
+ }
811
+
812
+ try:
813
+ run = RunState(
814
+ run_id=rid,
815
+ workflow_id="backlog_exec",
816
+ status=status,
817
+ current_node="backlog_exec",
818
+ vars=run_vars,
819
+ output=None,
820
+ error=err,
821
+ created_at=started_at,
822
+ updated_at=finished_at,
823
+ actor_id="backlog_exec_runner",
824
+ session_id=None,
825
+ parent_run_id=None,
826
+ )
827
+ stores.run_store.save(run)
828
+ except Exception:
829
+ pass
830
+
831
+ try:
832
+ rec = StepRecord(
833
+ run_id=rid,
834
+ step_id=str(request_id),
835
+ node_id="backlog_exec",
836
+ status=StepStatus.COMPLETED if ok else StepStatus.FAILED,
837
+ effect={"type": "backlog_exec", "payload": {"request_id": str(request_id), "backlog": backlog, "backlog_queue": backlog_queue}},
838
+ result={
839
+ "ok": ok,
840
+ "exit_code": result.get("exit_code"),
841
+ "error": err,
842
+ "log_artifacts": log_artifacts,
843
+ },
844
+ error=err,
845
+ started_at=started_at,
846
+ ended_at=finished_at,
847
+ actor_id="backlog_exec_runner",
848
+ session_id=None,
849
+ attempt=1,
850
+ idempotency_key=f"backlog_exec:{request_id}",
851
+ )
852
+ stores.ledger_store.append(rec)
853
+ except Exception:
854
+ pass
855
+
856
+ return {"ledger_run_id": rid, "log_artifacts": log_artifacts}
857
+
858
+
859
+ @dataclass(frozen=True)
860
+ class BacklogExecRunnerConfig:
861
+ enabled: bool = False
862
+ poll_interval_s: float = 2.0
863
+ workers: int = 2
864
+ executor: str = "none" # none|codex_cli|workflow_bundle
865
+ notify: bool = False
866
+
867
+ # Codex executor defaults.
868
+ codex_bin: str = "codex"
869
+ codex_model: str = "gpt-5.2"
870
+ codex_reasoning_effort: str = "" # low|medium|high|xhigh (optional)
871
+ codex_sandbox: str = "workspace-write"
872
+ codex_approvals: str = "never"
873
+
874
+ # Execution safety (v0)
875
+ exec_mode_default: str = "uat" # uat|inplace
876
+
877
+ @staticmethod
878
+ def from_env() -> "BacklogExecRunnerConfig":
879
+ enabled = _as_bool(os.getenv("ABSTRACTGATEWAY_BACKLOG_EXEC_RUNNER"), False) or _as_bool(
880
+ os.getenv("ABSTRACT_BACKLOG_EXEC_RUNNER"), False
881
+ )
882
+ poll_s_raw = os.getenv("ABSTRACTGATEWAY_BACKLOG_EXEC_POLL_S") or os.getenv("ABSTRACT_BACKLOG_EXEC_POLL_S") or ""
883
+ try:
884
+ poll_s = float(str(poll_s_raw).strip()) if str(poll_s_raw).strip() else 2.0
885
+ except Exception:
886
+ poll_s = 2.0
887
+ workers_raw = (
888
+ os.getenv("ABSTRACTGATEWAY_BACKLOG_EXEC_WORKERS")
889
+ or os.getenv("ABSTRACT_BACKLOG_EXEC_WORKERS")
890
+ or os.getenv("ABSTRACTGATEWAY_BACKLOG_EXEC_THREADS")
891
+ or os.getenv("ABSTRACT_BACKLOG_EXEC_THREADS")
892
+ or ""
893
+ )
894
+ workers = _as_int(workers_raw, 2)
895
+
896
+ executor = str(os.getenv("ABSTRACTGATEWAY_BACKLOG_EXECUTOR") or os.getenv("ABSTRACT_BACKLOG_EXECUTOR") or "none").strip().lower()
897
+ notify = _as_bool(os.getenv("ABSTRACTGATEWAY_BACKLOG_EXEC_NOTIFY"), False) or _as_bool(
898
+ os.getenv("ABSTRACT_BACKLOG_EXEC_NOTIFY"), False
899
+ )
900
+
901
+ codex_bin = str(os.getenv("ABSTRACTGATEWAY_BACKLOG_CODEX_BIN") or os.getenv("ABSTRACT_BACKLOG_CODEX_BIN") or "codex").strip() or "codex"
902
+ raw_model = str(os.getenv("ABSTRACTGATEWAY_BACKLOG_CODEX_MODEL") or os.getenv("ABSTRACT_BACKLOG_CODEX_MODEL") or "gpt-5.2").strip() or "gpt-5.2"
903
+ raw_effort = str(
904
+ os.getenv("ABSTRACTGATEWAY_BACKLOG_CODEX_REASONING_EFFORT")
905
+ or os.getenv("ABSTRACT_BACKLOG_CODEX_REASONING_EFFORT")
906
+ or ""
907
+ ).strip()
908
+ codex_model, inferred_effort = parse_codex_model_spec(raw_model)
909
+ codex_effort = normalize_codex_reasoning_effort(raw_effort) or inferred_effort or None
910
+ codex_sandbox = (
911
+ str(os.getenv("ABSTRACTGATEWAY_BACKLOG_CODEX_SANDBOX") or os.getenv("ABSTRACT_BACKLOG_CODEX_SANDBOX") or "workspace-write").strip()
912
+ or "workspace-write"
913
+ )
914
+ codex_approvals = (
915
+ str(os.getenv("ABSTRACTGATEWAY_BACKLOG_CODEX_APPROVALS") or os.getenv("ABSTRACT_BACKLOG_CODEX_APPROVALS") or "never").strip()
916
+ or "never"
917
+ )
918
+
919
+ return BacklogExecRunnerConfig(
920
+ enabled=bool(enabled),
921
+ poll_interval_s=max(0.25, float(poll_s)),
922
+ workers=max(1, min(32, int(workers))),
923
+ executor=executor,
924
+ notify=bool(notify),
925
+ codex_bin=codex_bin,
926
+ codex_model=codex_model,
927
+ codex_reasoning_effort=str(codex_effort or "").strip(),
928
+ codex_sandbox=codex_sandbox,
929
+ codex_approvals=codex_approvals,
930
+ exec_mode_default=_default_exec_mode(),
931
+ )
932
+
933
+
934
+ def exec_queue_dir(gateway_data_dir: Path) -> Path:
935
+ return (Path(gateway_data_dir).expanduser().resolve() / "backlog_exec_queue").resolve()
936
+
937
+
938
+ def exec_runs_dir(gateway_data_dir: Path) -> Path:
939
+ return (Path(gateway_data_dir).expanduser().resolve() / "backlog_exec_runs").resolve()
940
+
941
+
942
+ def uat_deploy_lock_path(gateway_data_dir: Path) -> Path:
943
+ return (Path(gateway_data_dir).expanduser().resolve() / "uat_deploy_lock.json").resolve()
944
+
945
+
946
+ def _load_json(path: Path) -> Dict[str, Any]:
947
+ try:
948
+ obj = json.loads(path.read_text(encoding="utf-8", errors="replace"))
949
+ except Exception:
950
+ return {}
951
+ return obj if isinstance(obj, dict) else {}
952
+
953
+
954
+ def _atomic_write_json(path: Path, obj: Dict[str, Any]) -> None:
955
+ tmp = path.with_suffix(path.suffix + ".tmp")
956
+ data = json.dumps(obj, ensure_ascii=False, indent=2, sort_keys=True) + "\n"
957
+ tmp.write_text(data, encoding="utf-8")
958
+ tmp.replace(path)
959
+
960
+
961
+ def _read_uat_deploy_lock(gateway_data_dir: Path) -> Dict[str, Any]:
962
+ path = uat_deploy_lock_path(gateway_data_dir)
963
+ if not path.exists():
964
+ return {}
965
+ obj = _load_json(path)
966
+ return obj if isinstance(obj, dict) else {}
967
+
968
+
969
+ def _write_uat_deploy_lock(*, gateway_data_dir: Path, request_id: str, candidate_relpath: str) -> Tuple[bool, str]:
970
+ """Force-set the shared UAT deploy lock (operator-controlled).
971
+
972
+ Returns: (ok, previous_owner_request_id)
973
+ """
974
+ rid = str(request_id or "").strip()
975
+ if not rid:
976
+ return False, ""
977
+
978
+ path = uat_deploy_lock_path(gateway_data_dir)
979
+ cur = _read_uat_deploy_lock(gateway_data_dir)
980
+ prev_owner = str(cur.get("owner_request_id") or "").strip()
981
+
982
+ now = _now_iso()
983
+ payload = {
984
+ "version": 2,
985
+ "owner_request_id": rid,
986
+ "candidate_relpath": str(candidate_relpath or "").strip(),
987
+ "updated_at": now,
988
+ "previous_owner_request_id": prev_owner,
989
+ "previous_updated_at": str(cur.get("updated_at") or "").strip(),
990
+ }
991
+ try:
992
+ path.parent.mkdir(parents=True, exist_ok=True)
993
+ _atomic_write_json(path, payload)
994
+ return True, prev_owner
995
+ except Exception:
996
+ return False, prev_owner
997
+
998
+
999
+ def deploy_uat_for_request(*, gateway_data_dir: Path, repo_root: Path, request_id: str) -> Dict[str, Any]:
1000
+ """Deploy/restart the shared UAT stack for a specific completed request (operator-controlled).
1001
+
1002
+ Safety:
1003
+ - requires `status=awaiting_qa` and `execution_mode=uat`
1004
+ - only points UAT `current` symlink at a repo-root-contained candidate workspace
1005
+ - requires the process manager to be enabled (so we can restart services deterministically)
1006
+ """
1007
+ rid = str(request_id or "").strip()
1008
+ if not rid:
1009
+ return {"ok": False, "error": "missing_request_id"}
1010
+
1011
+ # UAT deploy should make the UAT URLs usable, which requires starting/restarting services.
1012
+ # We keep the process manager opt-in for security, so refuse the deploy when disabled.
1013
+ if not _as_bool(os.getenv("ABSTRACTGATEWAY_ENABLE_PROCESS_MANAGER"), False):
1014
+ try:
1015
+ base = Path(gateway_data_dir).expanduser().resolve()
1016
+ qdir = exec_queue_dir(base)
1017
+ path = (qdir / f"{rid}.json").resolve()
1018
+ if path.exists():
1019
+ req = _load_json(path)
1020
+ if isinstance(req, dict):
1021
+ req["uat_deploy"] = {"at": _now_iso(), "processes": {"status": "skipped", "reason": "process_manager_disabled"}}
1022
+ req["uat_deploy_error"] = "process_manager_disabled"
1023
+ try:
1024
+ _atomic_write_json(path, req)
1025
+ except Exception:
1026
+ pass
1027
+ except Exception:
1028
+ pass
1029
+ return {"ok": False, "error": "process_manager_disabled"}
1030
+
1031
+ rr = Path(repo_root).resolve()
1032
+ base = Path(gateway_data_dir).expanduser().resolve()
1033
+ qdir = exec_queue_dir(base)
1034
+ path = (qdir / f"{rid}.json").resolve()
1035
+ try:
1036
+ path.relative_to(qdir)
1037
+ except Exception:
1038
+ return {"ok": False, "error": "invalid_request_path"}
1039
+ if not path.exists():
1040
+ return {"ok": False, "error": "request_not_found"}
1041
+
1042
+ req = _load_json(path)
1043
+ status = str(req.get("status") or "").strip().lower()
1044
+ if status != "awaiting_qa":
1045
+ return {"ok": False, "error": "invalid_status", "status": status}
1046
+
1047
+ exec_mode = str(req.get("execution_mode") or "").strip().lower() or "uat"
1048
+ if exec_mode == "candidate":
1049
+ exec_mode = "uat"
1050
+ if not _is_uat_mode(exec_mode):
1051
+ return {"ok": False, "error": "invalid_execution_mode", "execution_mode": exec_mode}
1052
+
1053
+ candidate_relpath = str(req.get("candidate_relpath") or "").strip()
1054
+ if not candidate_relpath:
1055
+ return {"ok": False, "error": "missing_candidate_relpath"}
1056
+ candidate_root = (rr / candidate_relpath).resolve()
1057
+ try:
1058
+ candidate_root.relative_to(rr)
1059
+ except Exception:
1060
+ return {"ok": False, "error": "unsafe_candidate_path"}
1061
+ if not candidate_root.exists():
1062
+ return {"ok": False, "error": "candidate_missing"}
1063
+
1064
+ ok, prev_owner = _write_uat_deploy_lock(gateway_data_dir=base, request_id=rid, candidate_relpath=candidate_relpath)
1065
+ req["uat_lock_owner_request_id"] = rid
1066
+ req["uat_lock_acquired"] = bool(ok)
1067
+ if prev_owner and prev_owner != rid:
1068
+ req["uat_lock_previous_owner_request_id"] = prev_owner
1069
+ if not ok:
1070
+ try:
1071
+ _atomic_write_json(path, req)
1072
+ except Exception:
1073
+ pass
1074
+ return {"ok": False, "error": "uat_lock_write_failed", "previous_owner_request_id": prev_owner}
1075
+
1076
+ # Operator-controlled deploy/restart: point the stable UAT pointer to this request's candidate.
1077
+ lock_path = uat_deploy_lock_path(base)
1078
+
1079
+ def _release_lock_best_effort() -> None:
1080
+ try:
1081
+ obj = _read_uat_deploy_lock(base)
1082
+ cur_owner = str(obj.get("owner_request_id") or "").strip()
1083
+ if cur_owner and cur_owner != rid:
1084
+ return
1085
+ if lock_path.exists():
1086
+ lock_path.unlink()
1087
+ except Exception:
1088
+ pass
1089
+
1090
+ #
1091
+ # IMPORTANT: Do NOT call `.resolve()` on the `current` path, because it would resolve the symlink
1092
+ # target (the previous candidate workspace) and break unlink/symlink updates.
1093
+ uat_root = _uat_root_from_env(repo_root=rr).resolve()
1094
+ cur = (uat_root / "current")
1095
+ try:
1096
+ cur.absolute().relative_to(uat_root)
1097
+ except Exception:
1098
+ _release_lock_best_effort()
1099
+ return {"ok": False, "error": "unsafe_uat_current_path"}
1100
+
1101
+ if cur.exists() and not cur.is_symlink():
1102
+ # Safety: we never want to overwrite an on-disk directory/file here.
1103
+ _release_lock_best_effort()
1104
+ return {"ok": False, "error": "uat_current_not_symlink"}
1105
+
1106
+ try:
1107
+ if cur.is_symlink() or cur.exists():
1108
+ cur.unlink()
1109
+ except Exception:
1110
+ _release_lock_best_effort()
1111
+ return {"ok": False, "error": "uat_current_unlink_failed"}
1112
+
1113
+ try:
1114
+ cur.parent.mkdir(parents=True, exist_ok=True)
1115
+ cur.symlink_to(candidate_root)
1116
+ req["uat_current_relpath"] = _safe_relpath_from_repo_root(repo_root=rr, path=cur) or ""
1117
+ except Exception:
1118
+ req["uat_current_relpath"] = ""
1119
+ _release_lock_best_effort()
1120
+ return {"ok": False, "error": "uat_current_symlink_failed"}
1121
+
1122
+ # Mark as no longer pending once we own the lock.
1123
+ req.pop("uat_pending", None)
1124
+
1125
+ deployed: Dict[str, Any] = {}
1126
+ try:
1127
+ from .process_manager import get_process_manager # type: ignore
1128
+
1129
+ mgr = get_process_manager(base_dir=base, repo_root=rr)
1130
+ for pid in (
1131
+ "gateway_uat",
1132
+ "abstractflow_backend_uat",
1133
+ "abstractflow_frontend_uat",
1134
+ "abstractobserver_uat",
1135
+ "abstractcode_web_uat",
1136
+ ):
1137
+ try:
1138
+ deployed[pid] = mgr.restart(pid)
1139
+ except Exception as e:
1140
+ deployed[pid] = {"status": "error", "error": str(e)}
1141
+
1142
+ # Best-effort: liveness probes so the operator can immediately see whether the UAT URLs
1143
+ # are actually reachable (process running != port listening).
1144
+ try:
1145
+ # Give dev servers a moment to bind their ports before probing.
1146
+ time.sleep(0.85)
1147
+ info_by_id = {p.get("id"): p for p in (mgr.list_processes() or []) if isinstance(p, dict)}
1148
+ for pid, st in list(deployed.items()):
1149
+ if not isinstance(st, dict):
1150
+ continue
1151
+ info = info_by_id.get(pid) if isinstance(info_by_id.get(pid), dict) else {}
1152
+ url = str((info or {}).get("url") or "").strip()
1153
+ if url:
1154
+ st["url"] = url
1155
+ st["probe"] = _probe_service_url(url)
1156
+ if not bool(st.get("probe", {}).get("ok") is True):
1157
+ rel = str(st.get("log_relpath") or "").strip()
1158
+ if rel:
1159
+ log_path = (base / rel).resolve()
1160
+ try:
1161
+ log_path.relative_to(base)
1162
+ st["log_tail"] = _tail_text(log_path, max_bytes=8000)
1163
+ except Exception:
1164
+ pass
1165
+ deployed[pid] = st
1166
+ except Exception:
1167
+ pass
1168
+
1169
+ req["uat_deploy"] = {"at": _now_iso(), "processes": deployed}
1170
+ except Exception as e:
1171
+ req["uat_deploy_error"] = str(e)
1172
+ deployed = {"status": "error", "error": str(e)}
1173
+
1174
+ try:
1175
+ _atomic_write_json(path, req)
1176
+ except Exception:
1177
+ pass
1178
+
1179
+ return {"ok": True, "request_id": rid, "deployed": deployed}
1180
+
1181
+
1182
+ def _claim_lock(queue_dir: Path, request_id: str) -> Optional[Path]:
1183
+ lock_path = (queue_dir / f"{request_id}.lock").resolve()
1184
+ try:
1185
+ lock_path.relative_to(queue_dir.resolve())
1186
+ except Exception:
1187
+ return None
1188
+ try:
1189
+ with open(lock_path, "x", encoding="utf-8") as f:
1190
+ f.write(_now_iso() + "\n")
1191
+ f.write(f"pid={os.getpid()}\n")
1192
+ return lock_path
1193
+ except FileExistsError:
1194
+ return None
1195
+ except Exception:
1196
+ return None
1197
+
1198
+
1199
+ def _release_lock(lock_path: Optional[Path]) -> None:
1200
+ if lock_path is None:
1201
+ return
1202
+ try:
1203
+ lock_path.unlink()
1204
+ except Exception:
1205
+ pass
1206
+
1207
+
1208
+ class BacklogExecutor:
1209
+ name: str = "executor"
1210
+
1211
+ def execute(self, *, prompt: str, repo_root: Path, run_dir: Path, env: Optional[Dict[str, str]] = None) -> Dict[str, Any]:
1212
+ raise NotImplementedError
1213
+
1214
+
1215
+ class CodexCliExecutor(BacklogExecutor):
1216
+ name = "codex_cli"
1217
+
1218
+ def __init__(self, *, bin_path: str, model: str, reasoning_effort: str, sandbox: str, approvals: str):
1219
+ self.bin_path = str(bin_path or "codex").strip() or "codex"
1220
+ self.model = str(model or "").strip() or "gpt-5.2"
1221
+ self.reasoning_effort = str(reasoning_effort or "").strip()
1222
+ self.sandbox = str(sandbox or "").strip() or "workspace-write"
1223
+ self.approvals = str(approvals or "").strip() or "never"
1224
+
1225
+ def execute(self, *, prompt: str, repo_root: Path, run_dir: Path, env: Optional[Dict[str, str]] = None) -> Dict[str, Any]:
1226
+ run_dir.mkdir(parents=True, exist_ok=True)
1227
+ events_path = (run_dir / "codex_events.jsonl").resolve()
1228
+ last_msg_path = (run_dir / "codex_last_message.txt").resolve()
1229
+ stderr_path = (run_dir / "codex_stderr.log").resolve()
1230
+ rel_base = Path("backlog_exec_runs") / str(run_dir.name)
1231
+ model_id, inferred_effort = parse_codex_model_spec(self.model)
1232
+ effort = normalize_codex_reasoning_effort(self.reasoning_effort) or inferred_effort or None
1233
+
1234
+ # Preserve previous attempt logs (best-effort).
1235
+ try:
1236
+ for p in (events_path, last_msg_path, stderr_path):
1237
+ if not p.exists():
1238
+ continue
1239
+ ts = datetime.datetime.now(datetime.timezone.utc).strftime("%Y%m%dT%H%M%SZ")
1240
+ p.rename(p.with_name(f"{p.name}.{ts}.bak"))
1241
+ except Exception:
1242
+ pass
1243
+
1244
+ # Keep this invocation compatible with the installed Codex CLI.
1245
+ # As of 2026-01, `codex exec --help` shows no `--ask-for-approval` flag; passing it causes hard failure.
1246
+ # We also force `--` before the prompt to prevent prompt text from being parsed as flags.
1247
+ cmd: List[str] = [
1248
+ self.bin_path,
1249
+ "exec",
1250
+ "--json",
1251
+ "--color",
1252
+ "never",
1253
+ "--output-last-message",
1254
+ str(last_msg_path),
1255
+ "--skip-git-repo-check",
1256
+ "--cd",
1257
+ str(repo_root),
1258
+ "--model",
1259
+ model_id,
1260
+ ]
1261
+ if effort:
1262
+ cmd.extend(["-c", f'model_reasoning_effort="{effort}"'])
1263
+ cmd.extend(["--sandbox", self.sandbox, "--", str(prompt or "")])
1264
+
1265
+ started_at = _now_iso()
1266
+ exit_code = -1
1267
+ err: Optional[str] = None
1268
+ try:
1269
+ run_env = dict(os.environ)
1270
+ if isinstance(env, dict):
1271
+ for k, v in env.items():
1272
+ ks = str(k or "").strip()
1273
+ if not ks:
1274
+ continue
1275
+ run_env[ks] = str(v if v is not None else "")
1276
+ with open(events_path, "wb") as out, open(stderr_path, "wb") as errf:
1277
+ proc = subprocess.run(
1278
+ cmd,
1279
+ stdout=out,
1280
+ stderr=errf,
1281
+ cwd=str(repo_root),
1282
+ env=run_env,
1283
+ check=False,
1284
+ timeout=None,
1285
+ stdin=subprocess.DEVNULL,
1286
+ )
1287
+ exit_code = int(proc.returncode)
1288
+ except FileNotFoundError:
1289
+ err = f"codex binary not found: {self.bin_path}"
1290
+ except Exception as e:
1291
+ err = str(e)
1292
+
1293
+ finished_at = _now_iso()
1294
+ last_msg = ""
1295
+ try:
1296
+ if last_msg_path.exists():
1297
+ last_msg = last_msg_path.read_text(encoding="utf-8", errors="replace").strip()
1298
+ except Exception:
1299
+ last_msg = ""
1300
+
1301
+ ok = err is None and exit_code == 0
1302
+ return {
1303
+ "ok": bool(ok),
1304
+ "executor": self.name,
1305
+ "model": model_id,
1306
+ "reasoning_effort": effort,
1307
+ "started_at": started_at,
1308
+ "finished_at": finished_at,
1309
+ "exit_code": exit_code,
1310
+ "error": err,
1311
+ "logs": {
1312
+ "events_relpath": str(rel_base / "codex_events.jsonl").replace("\\", "/"),
1313
+ "stderr_relpath": str(rel_base / "codex_stderr.log").replace("\\", "/"),
1314
+ "last_message_relpath": str(rel_base / "codex_last_message.txt").replace("\\", "/"),
1315
+ },
1316
+ "last_message": last_msg[:20000] if last_msg else "",
1317
+ }
1318
+
1319
+
1320
+ def _resolve_executor(cfg: BacklogExecRunnerConfig) -> Optional[BacklogExecutor]:
1321
+ ex = str(cfg.executor or "").strip().lower()
1322
+ if not ex or ex == "none":
1323
+ return None
1324
+ if ex in {"codex", "codex_cli", "codex-cli"}:
1325
+ return CodexCliExecutor(
1326
+ bin_path=cfg.codex_bin,
1327
+ model=cfg.codex_model,
1328
+ reasoning_effort=cfg.codex_reasoning_effort,
1329
+ sandbox=cfg.codex_sandbox,
1330
+ approvals=cfg.codex_approvals,
1331
+ )
1332
+ # Future: execute via a bundle workflow run (durable).
1333
+ if ex in {"workflow", "workflow_bundle", "workflow-bundle"}:
1334
+ return None
1335
+ return None
1336
+
1337
+
1338
+ def process_next_backlog_exec_request(
1339
+ *,
1340
+ gateway_data_dir: Path,
1341
+ repo_root: Path,
1342
+ cfg: BacklogExecRunnerConfig,
1343
+ ) -> Tuple[bool, Optional[str]]:
1344
+ """Process a single queued request (best-effort).
1345
+
1346
+ Returns: (processed, request_id).
1347
+ """
1348
+ queue_dir = exec_queue_dir(gateway_data_dir)
1349
+ if not queue_dir.exists():
1350
+ return False, None
1351
+
1352
+ executor = _resolve_executor(cfg)
1353
+ if executor is None:
1354
+ return False, None
1355
+
1356
+ items = sorted([p for p in queue_dir.glob("*.json")], key=lambda p: p.name)
1357
+ for p in items:
1358
+ req = _load_json(p)
1359
+ request_id = str(req.get("request_id") or p.stem).strip()
1360
+ status = str(req.get("status") or "").strip().lower()
1361
+ if not request_id or status != "queued":
1362
+ continue
1363
+
1364
+ lock = _claim_lock(queue_dir, request_id)
1365
+ if lock is None:
1366
+ continue
1367
+
1368
+ try:
1369
+ run_root = exec_runs_dir(gateway_data_dir)
1370
+ run_dir = (run_root / request_id).resolve()
1371
+ run_root.mkdir(parents=True, exist_ok=True)
1372
+
1373
+ exec_mode = str(req.get("execution_mode") or "").strip().lower() or str(getattr(cfg, "exec_mode_default", "") or "").strip().lower()
1374
+ if not exec_mode:
1375
+ exec_mode = "uat"
1376
+ if exec_mode == "candidate":
1377
+ exec_mode = "uat"
1378
+ req["execution_mode"] = exec_mode
1379
+
1380
+ candidate_root = repo_root
1381
+ candidate_relpath = ""
1382
+ if _is_uat_mode(exec_mode):
1383
+ ws = _ensure_uat_workspace(repo_root=repo_root, request_id=request_id)
1384
+ if ws is None:
1385
+ req["status"] = "failed"
1386
+ req["finished_at"] = _now_iso()
1387
+ req["result"] = {"ok": False, "error": "Failed to create UAT workspace"}
1388
+ _atomic_write_json(p, req)
1389
+ return True, request_id
1390
+ candidate_root = ws
1391
+ rel = _safe_relpath_from_repo_root(repo_root=repo_root, path=ws)
1392
+ candidate_relpath = rel or ""
1393
+ if candidate_relpath:
1394
+ req["candidate_relpath"] = candidate_relpath
1395
+
1396
+ # Update to running.
1397
+ req["status"] = "running"
1398
+ req["started_at"] = _now_iso()
1399
+ req.setdefault("executor", {})["type"] = executor.name
1400
+ req.setdefault("executor", {})["version"] = "v0"
1401
+ if isinstance(executor, CodexCliExecutor):
1402
+ try:
1403
+ model_id, inferred_effort = parse_codex_model_spec(executor.model)
1404
+ except Exception:
1405
+ model_id, inferred_effort = ("gpt-5.2", None)
1406
+ effort = normalize_codex_reasoning_effort(getattr(executor, "reasoning_effort", "")) or inferred_effort or None
1407
+ req.setdefault("executor", {})["model"] = model_id
1408
+ if effort:
1409
+ req.setdefault("executor", {})["reasoning_effort"] = effort
1410
+ req.setdefault("executor", {})["sandbox"] = str(getattr(executor, "sandbox", "") or "").strip()
1411
+ # Older queued requests won't have these keys; fill for observability.
1412
+ if "target_model" not in req:
1413
+ req["target_model"] = model_id
1414
+ if effort and "target_reasoning_effort" not in req:
1415
+ req["target_reasoning_effort"] = effort
1416
+ req.setdefault("run_dir_relpath", str(Path("backlog_exec_runs") / request_id).replace("\\", "/"))
1417
+ _atomic_write_json(p, req)
1418
+
1419
+ prompt = _prompt_with_feedback(str(req.get("prompt") or ""), req=req)
1420
+ if not prompt:
1421
+ req["status"] = "failed"
1422
+ req["finished_at"] = _now_iso()
1423
+ req["result"] = {"ok": False, "error": "Missing prompt"}
1424
+ _atomic_write_json(p, req)
1425
+ return True, request_id
1426
+
1427
+ exec_env: Dict[str, str] = {}
1428
+ if _is_uat_mode(exec_mode):
1429
+ exec_env["PYTHONPATH"] = _build_pythonpath_for_repo(repo_root=candidate_root)
1430
+
1431
+ result = executor.execute(prompt=prompt, repo_root=candidate_root, run_dir=run_dir, env=exec_env)
1432
+
1433
+ # Backlog hygiene (UAT only): some runs create the completed file but forget to delete the planned one.
1434
+ # Do this *before* producing patch/manifest so the deletion is visible and gets promoted.
1435
+ if _is_uat_mode(exec_mode) and isinstance(result, dict) and bool(result.get("ok") is True):
1436
+ try:
1437
+ cleanup = _maybe_fix_backlog_move_in_candidate(candidate_root=candidate_root, req=req)
1438
+ if isinstance(cleanup, dict) and cleanup:
1439
+ req["candidate_backlog_cleanup"] = cleanup
1440
+ except Exception as e:
1441
+ req["candidate_backlog_cleanup"] = {"ok": False, "action": "error", "error": str(e)}
1442
+
1443
+ # Candidate review artifacts (best-effort): patch + manifest.
1444
+ patch_relpath = ""
1445
+ manifest_relpath = ""
1446
+ if _is_uat_mode(exec_mode):
1447
+ patch_path = (run_dir / "candidate.patch").resolve()
1448
+ if _write_candidate_git_diff_patch(candidate_root=candidate_root, out_path=patch_path):
1449
+ patch_relpath = str(Path("backlog_exec_runs") / request_id / "candidate.patch").replace("\\", "/")
1450
+ req["candidate_patch_relpath"] = patch_relpath
1451
+ manifest_path = _candidate_manifest_path(run_dir)
1452
+ if _write_candidate_manifest(
1453
+ candidate_root=candidate_root,
1454
+ run_dir=run_dir,
1455
+ request_id=request_id,
1456
+ candidate_relpath=candidate_relpath,
1457
+ ):
1458
+ manifest_relpath = str(Path("backlog_exec_runs") / request_id / manifest_path.name).replace("\\", "/")
1459
+ req["candidate_manifest_relpath"] = manifest_relpath
1460
+
1461
+ # Persist a durable copy of the execution log into the gateway ledger (best-effort).
1462
+ persisted = _store_backlog_exec_logs_to_ledger(
1463
+ gateway_data_dir=gateway_data_dir,
1464
+ request_id=request_id,
1465
+ req=req,
1466
+ prompt=prompt,
1467
+ run_dir=run_dir,
1468
+ result=result if isinstance(result, dict) else {},
1469
+ )
1470
+ if isinstance(result, dict) and isinstance(persisted, dict) and persisted:
1471
+ result.setdefault("ledger", {}).update(persisted)
1472
+
1473
+ req["result"] = result
1474
+ req["finished_at"] = str(result.get("finished_at") or _now_iso())
1475
+ if bool(result.get("ok") is True):
1476
+ # Success always requires an explicit human decision (UAT approve or inplace approve).
1477
+ req["status"] = "awaiting_qa"
1478
+ if _is_uat_mode(exec_mode):
1479
+ req["uat_deploy"] = {"at": _now_iso(), "processes": {"status": "not_deployed", "reason": "manual_operator_action_required"}}
1480
+ else:
1481
+ req["inplace_warning"] = "Execution mode was inplace (prod may already be mutated)."
1482
+ else:
1483
+ req["status"] = "failed"
1484
+ _atomic_write_json(p, req)
1485
+
1486
+ if cfg.notify:
1487
+ _notify_backlog_exec_done(req=req)
1488
+
1489
+ return True, request_id
1490
+ finally:
1491
+ _release_lock(lock)
1492
+
1493
+ return False, None
1494
+
1495
+
1496
+ def _notify_backlog_exec_done(*, req: Dict[str, Any]) -> None:
1497
+ status = str(req.get("status") or "").strip() or "unknown"
1498
+ request_id = str(req.get("request_id") or "").strip() or "unknown"
1499
+ backlog = req.get("backlog") if isinstance(req.get("backlog"), dict) else {}
1500
+ rel = str(backlog.get("relpath") or backlog.get("filename") or "").strip()
1501
+ result = req.get("result") if isinstance(req.get("result"), dict) else {}
1502
+ exit_code = result.get("exit_code")
1503
+ last_msg = str(result.get("last_message") or "").strip()
1504
+ run_dir = str(req.get("run_dir_relpath") or "").strip()
1505
+
1506
+ subject = f"[AbstractFramework] Backlog exec {status}: {request_id}"
1507
+ body_lines = [
1508
+ f"status: {status}",
1509
+ f"request_id: {request_id}",
1510
+ f"backlog: {rel}" if rel else "backlog: (unknown)",
1511
+ f"run_dir: {run_dir}" if run_dir else "",
1512
+ f"exit_code: {exit_code}" if exit_code is not None else "",
1513
+ "",
1514
+ ]
1515
+ if last_msg:
1516
+ body_lines.append("last_message:")
1517
+ body_lines.append(last_msg[:3500])
1518
+ body = "\n".join([l for l in body_lines if l is not None])
1519
+
1520
+ try:
1521
+ send_telegram_notification(text=body[:3500])
1522
+ except Exception:
1523
+ pass
1524
+ try:
1525
+ send_email_notification(subject=subject, body_text=body)
1526
+ except Exception:
1527
+ pass
1528
+
1529
+
1530
+ class BacklogExecRunner:
1531
+ def __init__(self, *, gateway_data_dir: Path, cfg: BacklogExecRunnerConfig):
1532
+ self.gateway_data_dir = Path(gateway_data_dir).expanduser().resolve()
1533
+ self.cfg = cfg
1534
+ self._stop = threading.Event()
1535
+ self._threads: list[threading.Thread] = []
1536
+ self._last_error: Optional[str] = None
1537
+
1538
+ def last_error(self) -> Optional[str]:
1539
+ return self._last_error
1540
+
1541
+ def start(self) -> None:
1542
+ if not self.cfg.enabled:
1543
+ return
1544
+ if self.is_running():
1545
+ return
1546
+ self._stop.clear()
1547
+ self._threads = []
1548
+ workers = max(1, min(32, int(getattr(self.cfg, "workers", 1) or 1)))
1549
+ for i in range(workers):
1550
+ name = f"BacklogExecWorker-{i + 1}"
1551
+ t = threading.Thread(target=self._run, name=name, daemon=True)
1552
+ self._threads.append(t)
1553
+ t.start()
1554
+
1555
+ def stop(self) -> None:
1556
+ self._stop.set()
1557
+ threads = list(self._threads)
1558
+ self._threads = []
1559
+ for t in threads:
1560
+ try:
1561
+ t.join(timeout=5.0)
1562
+ except Exception:
1563
+ pass
1564
+
1565
+ def is_running(self) -> bool:
1566
+ if self._stop.is_set():
1567
+ return False
1568
+ return any(t.is_alive() for t in self._threads)
1569
+
1570
+ def _run(self) -> None:
1571
+ repo_root = _triage_repo_root_from_env()
1572
+ if repo_root is None:
1573
+ # Can't run without repo root; stay idle.
1574
+ while not self._stop.wait(self.cfg.poll_interval_s):
1575
+ continue
1576
+ return
1577
+
1578
+ while not self._stop.is_set():
1579
+ try:
1580
+ processed, _rid = process_next_backlog_exec_request(
1581
+ gateway_data_dir=self.gateway_data_dir, repo_root=repo_root, cfg=self.cfg
1582
+ )
1583
+ if processed:
1584
+ # Drain quickly when there is work.
1585
+ continue
1586
+ except Exception as e:
1587
+ # Best-effort; do not crash the gateway.
1588
+ try:
1589
+ self._last_error = str(e)
1590
+ except Exception:
1591
+ pass
1592
+ self._stop.wait(self.cfg.poll_interval_s)