AbstractRuntime 0.2.0__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. abstractruntime/__init__.py +83 -3
  2. abstractruntime/core/config.py +82 -2
  3. abstractruntime/core/event_keys.py +62 -0
  4. abstractruntime/core/models.py +17 -1
  5. abstractruntime/core/policy.py +74 -3
  6. abstractruntime/core/runtime.py +3334 -28
  7. abstractruntime/core/vars.py +103 -2
  8. abstractruntime/evidence/__init__.py +10 -0
  9. abstractruntime/evidence/recorder.py +325 -0
  10. abstractruntime/history_bundle.py +772 -0
  11. abstractruntime/integrations/abstractcore/__init__.py +6 -0
  12. abstractruntime/integrations/abstractcore/constants.py +19 -0
  13. abstractruntime/integrations/abstractcore/default_tools.py +258 -0
  14. abstractruntime/integrations/abstractcore/effect_handlers.py +2622 -32
  15. abstractruntime/integrations/abstractcore/embeddings_client.py +69 -0
  16. abstractruntime/integrations/abstractcore/factory.py +149 -16
  17. abstractruntime/integrations/abstractcore/llm_client.py +891 -55
  18. abstractruntime/integrations/abstractcore/mcp_worker.py +587 -0
  19. abstractruntime/integrations/abstractcore/observability.py +80 -0
  20. abstractruntime/integrations/abstractcore/session_attachments.py +946 -0
  21. abstractruntime/integrations/abstractcore/summarizer.py +154 -0
  22. abstractruntime/integrations/abstractcore/tool_executor.py +509 -31
  23. abstractruntime/integrations/abstractcore/workspace_scoped_tools.py +561 -0
  24. abstractruntime/integrations/abstractmemory/__init__.py +3 -0
  25. abstractruntime/integrations/abstractmemory/effect_handlers.py +946 -0
  26. abstractruntime/memory/__init__.py +21 -0
  27. abstractruntime/memory/active_context.py +751 -0
  28. abstractruntime/memory/active_memory.py +452 -0
  29. abstractruntime/memory/compaction.py +105 -0
  30. abstractruntime/memory/kg_packets.py +164 -0
  31. abstractruntime/memory/memact_composer.py +175 -0
  32. abstractruntime/memory/recall_levels.py +163 -0
  33. abstractruntime/memory/token_budget.py +86 -0
  34. abstractruntime/rendering/__init__.py +17 -0
  35. abstractruntime/rendering/agent_trace_report.py +256 -0
  36. abstractruntime/rendering/json_stringify.py +136 -0
  37. abstractruntime/scheduler/scheduler.py +93 -2
  38. abstractruntime/storage/__init__.py +7 -2
  39. abstractruntime/storage/artifacts.py +175 -32
  40. abstractruntime/storage/base.py +17 -1
  41. abstractruntime/storage/commands.py +339 -0
  42. abstractruntime/storage/in_memory.py +41 -1
  43. abstractruntime/storage/json_files.py +210 -14
  44. abstractruntime/storage/observable.py +136 -0
  45. abstractruntime/storage/offloading.py +433 -0
  46. abstractruntime/storage/sqlite.py +836 -0
  47. abstractruntime/visualflow_compiler/__init__.py +29 -0
  48. abstractruntime/visualflow_compiler/adapters/__init__.py +11 -0
  49. abstractruntime/visualflow_compiler/adapters/agent_adapter.py +126 -0
  50. abstractruntime/visualflow_compiler/adapters/context_adapter.py +109 -0
  51. abstractruntime/visualflow_compiler/adapters/control_adapter.py +615 -0
  52. abstractruntime/visualflow_compiler/adapters/effect_adapter.py +1051 -0
  53. abstractruntime/visualflow_compiler/adapters/event_adapter.py +307 -0
  54. abstractruntime/visualflow_compiler/adapters/function_adapter.py +97 -0
  55. abstractruntime/visualflow_compiler/adapters/memact_adapter.py +114 -0
  56. abstractruntime/visualflow_compiler/adapters/subflow_adapter.py +74 -0
  57. abstractruntime/visualflow_compiler/adapters/variable_adapter.py +316 -0
  58. abstractruntime/visualflow_compiler/compiler.py +3832 -0
  59. abstractruntime/visualflow_compiler/flow.py +247 -0
  60. abstractruntime/visualflow_compiler/visual/__init__.py +13 -0
  61. abstractruntime/visualflow_compiler/visual/agent_ids.py +29 -0
  62. abstractruntime/visualflow_compiler/visual/builtins.py +1376 -0
  63. abstractruntime/visualflow_compiler/visual/code_executor.py +214 -0
  64. abstractruntime/visualflow_compiler/visual/executor.py +2804 -0
  65. abstractruntime/visualflow_compiler/visual/models.py +211 -0
  66. abstractruntime/workflow_bundle/__init__.py +52 -0
  67. abstractruntime/workflow_bundle/models.py +236 -0
  68. abstractruntime/workflow_bundle/packer.py +317 -0
  69. abstractruntime/workflow_bundle/reader.py +87 -0
  70. abstractruntime/workflow_bundle/registry.py +587 -0
  71. abstractruntime-0.4.1.dist-info/METADATA +177 -0
  72. abstractruntime-0.4.1.dist-info/RECORD +86 -0
  73. abstractruntime-0.4.1.dist-info/entry_points.txt +2 -0
  74. abstractruntime-0.2.0.dist-info/METADATA +0 -163
  75. abstractruntime-0.2.0.dist-info/RECORD +0 -32
  76. {abstractruntime-0.2.0.dist-info → abstractruntime-0.4.1.dist-info}/WHEEL +0 -0
  77. {abstractruntime-0.2.0.dist-info → abstractruntime-0.4.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,836 @@
1
+ """abstractruntime.storage.sqlite
2
+
3
+ SQLite-backed durability stores for production-oriented single-host deployments.
4
+
5
+ Design goals:
6
+ - Keep the durable execution substrate dependency-light (stdlib `sqlite3`).
7
+ - Provide restart-safe storage with real indexing (avoid directory scans + JSON parsing loops).
8
+ - Preserve the existing store interfaces (RunStore/LedgerStore/CommandStore) so hosts can
9
+ switch backends without rewriting runtime logic.
10
+
11
+ Scope (backlog 446):
12
+ - RunStore (checkpointed RunState JSON)
13
+ - LedgerStore (append-only StepRecord JSON with per-run seq)
14
+ - CommandStore + CommandCursorStore (durable inbox + consumer cursor)
15
+ - WAIT_UNTIL index (wait_index table) so runners can query due runs efficiently.
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ import json
21
+ import logging
22
+ import sqlite3
23
+ import threading
24
+ import uuid
25
+ from dataclasses import asdict
26
+ from datetime import datetime, timezone
27
+ from pathlib import Path
28
+ from typing import Any, Dict, List, Optional, Tuple
29
+
30
+ from ..core.models import RunState, RunStatus, StepRecord, WaitReason, WaitState
31
+ from .base import LedgerStore, RunStore
32
+ from .commands import CommandAppendResult, CommandCursorStore, CommandRecord, CommandStore
33
+
34
+ logger = logging.getLogger(__name__)
35
+
36
+
37
+ def _utc_now_iso() -> str:
38
+ return datetime.now(timezone.utc).isoformat()
39
+
40
+
41
+ def _is_json_value(value: Any) -> bool:
42
+ if value is None or isinstance(value, (str, int, float, bool)):
43
+ return True
44
+ if isinstance(value, list):
45
+ return all(_is_json_value(v) for v in value)
46
+ if isinstance(value, dict):
47
+ return all(isinstance(k, str) and _is_json_value(v) for k, v in value.items())
48
+ return False
49
+
50
+
51
+ def _runstate_from_dict(data: Dict[str, Any]) -> RunState:
52
+ raw_status = data.get("status")
53
+ status = raw_status if isinstance(raw_status, RunStatus) else RunStatus(str(raw_status))
54
+
55
+ waiting: Optional[WaitState] = None
56
+ raw_waiting = data.get("waiting")
57
+ if isinstance(raw_waiting, dict):
58
+ raw_reason = raw_waiting.get("reason")
59
+ if raw_reason is None:
60
+ raise ValueError("Persisted waiting state missing 'reason'")
61
+ reason = raw_reason if isinstance(raw_reason, WaitReason) else WaitReason(str(raw_reason))
62
+ waiting = WaitState(
63
+ reason=reason,
64
+ wait_key=raw_waiting.get("wait_key"),
65
+ until=raw_waiting.get("until"),
66
+ resume_to_node=raw_waiting.get("resume_to_node"),
67
+ result_key=raw_waiting.get("result_key"),
68
+ prompt=raw_waiting.get("prompt"),
69
+ choices=raw_waiting.get("choices"),
70
+ allow_free_text=bool(raw_waiting.get("allow_free_text", True)),
71
+ details=raw_waiting.get("details"),
72
+ )
73
+
74
+ return RunState(
75
+ run_id=str(data.get("run_id") or ""),
76
+ workflow_id=str(data.get("workflow_id") or ""),
77
+ status=status,
78
+ current_node=str(data.get("current_node") or ""),
79
+ vars=dict(data.get("vars") or {}),
80
+ waiting=waiting,
81
+ output=data.get("output"),
82
+ error=data.get("error"),
83
+ created_at=str(data.get("created_at") or ""),
84
+ updated_at=str(data.get("updated_at") or ""),
85
+ actor_id=data.get("actor_id"),
86
+ session_id=data.get("session_id"),
87
+ parent_run_id=data.get("parent_run_id"),
88
+ )
89
+
90
+
91
+ class SqliteDatabase:
92
+ """Small helper around a SQLite file with per-thread connections."""
93
+
94
+ def __init__(self, path: str | Path) -> None:
95
+ self._path = Path(path).expanduser().resolve()
96
+ self._path.parent.mkdir(parents=True, exist_ok=True)
97
+ self._local = threading.local()
98
+ self._init_lock = threading.Lock()
99
+ self._initialized = False
100
+ self._ensure_schema()
101
+
102
+ @property
103
+ def path(self) -> Path:
104
+ return self._path
105
+
106
+ def connection(self) -> sqlite3.Connection:
107
+ conn = getattr(self._local, "conn", None)
108
+ if conn is None:
109
+ conn = sqlite3.connect(str(self._path), timeout=30.0)
110
+ conn.row_factory = sqlite3.Row
111
+ self._apply_pragmas(conn)
112
+ self._local.conn = conn
113
+ return conn
114
+
115
+ def _apply_pragmas(self, conn: sqlite3.Connection) -> None:
116
+ # WAL improves writer/reader concurrency for the API+runner split.
117
+ try:
118
+ conn.execute("PRAGMA journal_mode=WAL;")
119
+ except Exception:
120
+ pass
121
+ try:
122
+ conn.execute("PRAGMA synchronous=NORMAL;")
123
+ except Exception:
124
+ pass
125
+ try:
126
+ conn.execute("PRAGMA foreign_keys=ON;")
127
+ except Exception:
128
+ pass
129
+ try:
130
+ conn.execute("PRAGMA busy_timeout=5000;")
131
+ except Exception:
132
+ pass
133
+
134
+ def _ensure_schema(self) -> None:
135
+ with self._init_lock:
136
+ if self._initialized:
137
+ return
138
+ conn = sqlite3.connect(str(self._path), timeout=30.0)
139
+ try:
140
+ conn.row_factory = sqlite3.Row
141
+ self._apply_pragmas(conn)
142
+
143
+ # --- Runs ---
144
+ conn.execute(
145
+ """
146
+ CREATE TABLE IF NOT EXISTS runs (
147
+ run_id TEXT PRIMARY KEY,
148
+ workflow_id TEXT NOT NULL,
149
+ status TEXT NOT NULL,
150
+ wait_reason TEXT,
151
+ wait_until TEXT,
152
+ parent_run_id TEXT,
153
+ actor_id TEXT,
154
+ session_id TEXT,
155
+ created_at TEXT,
156
+ updated_at TEXT,
157
+ run_json TEXT NOT NULL
158
+ );
159
+ """
160
+ )
161
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_runs_status_updated ON runs(status, updated_at DESC);")
162
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_runs_workflow_updated ON runs(workflow_id, updated_at DESC);")
163
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_runs_parent ON runs(parent_run_id);")
164
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_runs_waiting ON runs(status, wait_reason, wait_until);")
165
+
166
+ # --- WAIT_UNTIL index (scheduler) ---
167
+ conn.execute(
168
+ """
169
+ CREATE TABLE IF NOT EXISTS wait_index (
170
+ run_id TEXT PRIMARY KEY,
171
+ next_due_iso TEXT NOT NULL,
172
+ updated_at_iso TEXT NOT NULL,
173
+ status TEXT
174
+ );
175
+ """
176
+ )
177
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_wait_index_next_due ON wait_index(next_due_iso);")
178
+
179
+ # --- Ledger ---
180
+ conn.execute(
181
+ """
182
+ CREATE TABLE IF NOT EXISTS ledger (
183
+ run_id TEXT NOT NULL,
184
+ seq INTEGER NOT NULL,
185
+ record_json TEXT NOT NULL,
186
+ PRIMARY KEY (run_id, seq)
187
+ );
188
+ """
189
+ )
190
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_ledger_run_seq ON ledger(run_id, seq);")
191
+ conn.execute(
192
+ """
193
+ CREATE TABLE IF NOT EXISTS ledger_heads (
194
+ run_id TEXT PRIMARY KEY,
195
+ last_seq INTEGER NOT NULL
196
+ );
197
+ """
198
+ )
199
+ # Backfill ledger_heads for upgraded DBs.
200
+ #
201
+ # When `ledger_heads` is introduced after `ledger` already has rows, future appends must
202
+ # continue from the existing MAX(seq) for each run_id (otherwise we would re-allocate
203
+ # seq starting at 1 and hit `UNIQUE constraint failed: ledger.run_id, ledger.seq`).
204
+ #
205
+ # This is idempotent and safe to run on every startup.
206
+ conn.execute(
207
+ """
208
+ INSERT INTO ledger_heads (run_id, last_seq)
209
+ SELECT run_id, MAX(seq) AS last_seq
210
+ FROM ledger
211
+ GROUP BY run_id
212
+ ON CONFLICT(run_id) DO UPDATE SET last_seq =
213
+ CASE
214
+ WHEN excluded.last_seq > ledger_heads.last_seq THEN excluded.last_seq
215
+ ELSE ledger_heads.last_seq
216
+ END;
217
+ """
218
+ )
219
+
220
+ # --- Commands (durable inbox) ---
221
+ conn.execute(
222
+ """
223
+ CREATE TABLE IF NOT EXISTS commands (
224
+ seq INTEGER PRIMARY KEY AUTOINCREMENT,
225
+ command_id TEXT NOT NULL UNIQUE,
226
+ run_id TEXT NOT NULL,
227
+ type TEXT NOT NULL,
228
+ payload_json TEXT NOT NULL,
229
+ ts TEXT NOT NULL,
230
+ client_id TEXT
231
+ );
232
+ """
233
+ )
234
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_commands_run_id ON commands(run_id);")
235
+
236
+ # --- Command consumer cursors ---
237
+ conn.execute(
238
+ """
239
+ CREATE TABLE IF NOT EXISTS command_cursors (
240
+ consumer_id TEXT PRIMARY KEY,
241
+ cursor INTEGER NOT NULL,
242
+ updated_at TEXT NOT NULL
243
+ );
244
+ """
245
+ )
246
+
247
+ conn.commit()
248
+ self._initialized = True
249
+ finally:
250
+ try:
251
+ conn.close()
252
+ except Exception:
253
+ pass
254
+
255
+
256
+ class SqliteRunStore(RunStore):
257
+ """SQLite-backed RunStore with QueryableRunStore methods and a WAIT_UNTIL index."""
258
+
259
+ def __init__(self, db: SqliteDatabase) -> None:
260
+ self._db = db
261
+
262
+ def save(self, run: RunState) -> None:
263
+ wait_reason: Optional[str] = None
264
+ wait_until: Optional[str] = None
265
+ if run.waiting is not None:
266
+ try:
267
+ wait_reason = str(getattr(run.waiting.reason, "value", run.waiting.reason))
268
+ except Exception:
269
+ wait_reason = None
270
+ if run.waiting.reason == WaitReason.UNTIL:
271
+ wait_until = str(run.waiting.until) if run.waiting.until else None
272
+
273
+ payload = json.dumps(asdict(run), ensure_ascii=False)
274
+
275
+ conn = self._db.connection()
276
+ with conn:
277
+ conn.execute(
278
+ """
279
+ INSERT INTO runs (
280
+ run_id, workflow_id, status, wait_reason, wait_until,
281
+ parent_run_id, actor_id, session_id,
282
+ created_at, updated_at,
283
+ run_json
284
+ )
285
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
286
+ ON CONFLICT(run_id) DO UPDATE SET
287
+ workflow_id=excluded.workflow_id,
288
+ status=excluded.status,
289
+ wait_reason=excluded.wait_reason,
290
+ wait_until=excluded.wait_until,
291
+ parent_run_id=excluded.parent_run_id,
292
+ actor_id=excluded.actor_id,
293
+ session_id=excluded.session_id,
294
+ updated_at=excluded.updated_at,
295
+ run_json=excluded.run_json;
296
+ """,
297
+ (
298
+ str(run.run_id),
299
+ str(run.workflow_id),
300
+ str(getattr(run.status, "value", run.status)),
301
+ wait_reason,
302
+ wait_until,
303
+ str(run.parent_run_id) if run.parent_run_id else None,
304
+ str(run.actor_id) if run.actor_id else None,
305
+ str(run.session_id) if run.session_id else None,
306
+ str(run.created_at),
307
+ str(run.updated_at),
308
+ payload,
309
+ ),
310
+ )
311
+
312
+ # Maintain WAIT_UNTIL index (only applies to WAITING runs).
313
+ if run.status == RunStatus.WAITING and wait_reason == WaitReason.UNTIL.value and wait_until:
314
+ conn.execute(
315
+ """
316
+ INSERT INTO wait_index (run_id, next_due_iso, updated_at_iso, status)
317
+ VALUES (?, ?, ?, ?)
318
+ ON CONFLICT(run_id) DO UPDATE SET
319
+ next_due_iso=excluded.next_due_iso,
320
+ updated_at_iso=excluded.updated_at_iso,
321
+ status=excluded.status;
322
+ """,
323
+ (str(run.run_id), str(wait_until), str(run.updated_at), "waiting_until"),
324
+ )
325
+ else:
326
+ conn.execute("DELETE FROM wait_index WHERE run_id = ?;", (str(run.run_id),))
327
+
328
+ def load(self, run_id: str) -> Optional[RunState]:
329
+ rid = str(run_id or "").strip()
330
+ if not rid:
331
+ return None
332
+ conn = self._db.connection()
333
+ row = conn.execute("SELECT run_json FROM runs WHERE run_id = ?;", (rid,)).fetchone()
334
+ if row is None:
335
+ return None
336
+ try:
337
+ data = json.loads(str(row["run_json"] or "{}"))
338
+ except Exception:
339
+ return None
340
+ if not isinstance(data, dict):
341
+ return None
342
+ try:
343
+ return _runstate_from_dict(data)
344
+ except Exception:
345
+ return None
346
+
347
+ # --- QueryableRunStore methods ---
348
+
349
+ def list_runs(
350
+ self,
351
+ *,
352
+ status: Optional[RunStatus] = None,
353
+ wait_reason: Optional[WaitReason] = None,
354
+ workflow_id: Optional[str] = None,
355
+ limit: int = 100,
356
+ ) -> List[RunState]:
357
+ clauses: list[str] = []
358
+ params: list[Any] = []
359
+
360
+ if status is not None:
361
+ clauses.append("status = ?")
362
+ params.append(str(getattr(status, "value", status)))
363
+ if workflow_id is not None:
364
+ clauses.append("workflow_id = ?")
365
+ params.append(str(workflow_id))
366
+ if wait_reason is not None:
367
+ clauses.append("wait_reason = ?")
368
+ params.append(str(getattr(wait_reason, "value", wait_reason)))
369
+
370
+ where = "WHERE " + " AND ".join(clauses) if clauses else ""
371
+ lim = max(1, int(limit or 100))
372
+
373
+ conn = self._db.connection()
374
+ rows = conn.execute(
375
+ f"SELECT run_json FROM runs {where} ORDER BY updated_at DESC LIMIT ?;",
376
+ (*params, lim),
377
+ ).fetchall()
378
+
379
+ out: List[RunState] = []
380
+ for row in rows or []:
381
+ try:
382
+ data = json.loads(str(row["run_json"] or "{}"))
383
+ except Exception:
384
+ continue
385
+ if not isinstance(data, dict):
386
+ continue
387
+ try:
388
+ out.append(_runstate_from_dict(data))
389
+ except Exception:
390
+ continue
391
+ return out
392
+
393
+ def list_run_index(
394
+ self,
395
+ *,
396
+ status: Optional[RunStatus] = None,
397
+ workflow_id: Optional[str] = None,
398
+ session_id: Optional[str] = None,
399
+ root_only: bool = False,
400
+ limit: int = 100,
401
+ ) -> List[Dict[str, Any]]:
402
+ clauses: list[str] = []
403
+ params: list[Any] = []
404
+
405
+ if status is not None:
406
+ clauses.append("status = ?")
407
+ params.append(str(getattr(status, "value", status)))
408
+ if workflow_id is not None:
409
+ clauses.append("workflow_id = ?")
410
+ params.append(str(workflow_id))
411
+ if session_id is not None:
412
+ clauses.append("session_id = ?")
413
+ params.append(str(session_id))
414
+ if bool(root_only):
415
+ clauses.append("(parent_run_id IS NULL OR parent_run_id = '')")
416
+
417
+ where = "WHERE " + " AND ".join(clauses) if clauses else ""
418
+ lim = max(1, int(limit or 100))
419
+
420
+ conn = self._db.connection()
421
+ rows = conn.execute(
422
+ f"""
423
+ SELECT
424
+ run_id, workflow_id, status,
425
+ wait_reason, wait_until,
426
+ parent_run_id, actor_id, session_id,
427
+ created_at, updated_at
428
+ FROM runs
429
+ {where}
430
+ ORDER BY updated_at DESC
431
+ LIMIT ?;
432
+ """,
433
+ (*params, lim),
434
+ ).fetchall()
435
+
436
+ out: List[Dict[str, Any]] = []
437
+ for row in rows or []:
438
+ out.append(
439
+ {
440
+ "run_id": str(row["run_id"] or ""),
441
+ "workflow_id": str(row["workflow_id"] or ""),
442
+ "status": str(row["status"] or ""),
443
+ "wait_reason": str(row["wait_reason"] or "") or None,
444
+ "wait_until": str(row["wait_until"] or "") or None,
445
+ "parent_run_id": str(row["parent_run_id"] or "") or None,
446
+ "actor_id": str(row["actor_id"] or "") or None,
447
+ "session_id": str(row["session_id"] or "") or None,
448
+ "created_at": str(row["created_at"] or "") or None,
449
+ "updated_at": str(row["updated_at"] or "") or None,
450
+ }
451
+ )
452
+ return out
453
+
454
+ def list_due_wait_until(
455
+ self,
456
+ *,
457
+ now_iso: str,
458
+ limit: int = 100,
459
+ ) -> List[RunState]:
460
+ now = str(now_iso or "").strip()
461
+ lim = max(1, int(limit or 100))
462
+ conn = self._db.connection()
463
+ rows = conn.execute(
464
+ """
465
+ SELECT r.run_json
466
+ FROM wait_index w
467
+ JOIN runs r ON r.run_id = w.run_id
468
+ WHERE w.next_due_iso <= ?
469
+ ORDER BY w.next_due_iso ASC
470
+ LIMIT ?;
471
+ """,
472
+ (now, lim),
473
+ ).fetchall()
474
+
475
+ out: List[RunState] = []
476
+ for row in rows or []:
477
+ try:
478
+ data = json.loads(str(row["run_json"] or "{}"))
479
+ except Exception:
480
+ continue
481
+ if not isinstance(data, dict):
482
+ continue
483
+ try:
484
+ out.append(_runstate_from_dict(data))
485
+ except Exception:
486
+ continue
487
+ return out
488
+
489
+ def list_children(
490
+ self,
491
+ *,
492
+ parent_run_id: str,
493
+ status: Optional[RunStatus] = None,
494
+ ) -> List[RunState]:
495
+ parent = str(parent_run_id or "").strip()
496
+ if not parent:
497
+ return []
498
+ clauses = ["parent_run_id = ?"]
499
+ params: list[Any] = [parent]
500
+
501
+ if status is not None:
502
+ clauses.append("status = ?")
503
+ params.append(str(getattr(status, "value", status)))
504
+
505
+ where = "WHERE " + " AND ".join(clauses)
506
+ conn = self._db.connection()
507
+ rows = conn.execute(
508
+ f"SELECT run_json FROM runs {where} ORDER BY created_at ASC, run_id ASC;",
509
+ tuple(params),
510
+ ).fetchall()
511
+
512
+ out: List[RunState] = []
513
+ for row in rows or []:
514
+ try:
515
+ data = json.loads(str(row["run_json"] or "{}"))
516
+ except Exception:
517
+ continue
518
+ if not isinstance(data, dict):
519
+ continue
520
+ try:
521
+ out.append(_runstate_from_dict(data))
522
+ except Exception:
523
+ continue
524
+ return out
525
+
526
+
527
+ class SqliteLedgerStore(LedgerStore):
528
+ """SQLite-backed append-only ledger store with per-run seq."""
529
+
530
+ def __init__(self, db: SqliteDatabase) -> None:
531
+ self._db = db
532
+
533
+ def append(self, record: StepRecord) -> None:
534
+ run_id = str(record.run_id or "").strip()
535
+ if not run_id:
536
+ raise ValueError("StepRecord.run_id must be non-empty")
537
+
538
+ payload = json.dumps(asdict(record), ensure_ascii=False)
539
+ conn = self._db.connection()
540
+ with conn:
541
+ conn.execute(
542
+ """
543
+ INSERT INTO ledger_heads (run_id, last_seq)
544
+ VALUES (?, 0)
545
+ ON CONFLICT(run_id) DO NOTHING;
546
+ """,
547
+ (run_id,),
548
+ )
549
+ conn.execute("UPDATE ledger_heads SET last_seq = last_seq + 1 WHERE run_id = ?;", (run_id,))
550
+ row = conn.execute("SELECT last_seq FROM ledger_heads WHERE run_id = ?;", (run_id,)).fetchone()
551
+ if row is None:
552
+ raise RuntimeError("Failed to allocate ledger seq")
553
+ seq = int(row["last_seq"] or 0)
554
+ try:
555
+ conn.execute(
556
+ "INSERT INTO ledger (run_id, seq, record_json) VALUES (?, ?, ?);",
557
+ (run_id, int(seq), payload),
558
+ )
559
+ except sqlite3.IntegrityError as e:
560
+ msg = str(e)
561
+ if "UNIQUE constraint failed: ledger.run_id, ledger.seq" in msg:
562
+ logger.error(
563
+ "SqliteLedgerStore.append failed due to duplicate seq allocation "
564
+ "(run_id=%s seq=%s db=%s). This usually indicates concurrent writers running "
565
+ "a non-atomic seq allocator or a corrupted ledger_heads table.",
566
+ run_id,
567
+ seq,
568
+ self._db.path,
569
+ )
570
+ raise sqlite3.IntegrityError(f"{msg} (run_id={run_id!r}, seq={seq}, db={self._db.path})") from e
571
+
572
+ def list(self, run_id: str) -> List[Dict[str, Any]]:
573
+ rid = str(run_id or "").strip()
574
+ if not rid:
575
+ return []
576
+ conn = self._db.connection()
577
+ rows = conn.execute(
578
+ "SELECT record_json FROM ledger WHERE run_id = ? ORDER BY seq ASC;",
579
+ (rid,),
580
+ ).fetchall()
581
+ out: List[Dict[str, Any]] = []
582
+ for row in rows or []:
583
+ try:
584
+ obj = json.loads(str(row["record_json"] or "{}"))
585
+ except Exception:
586
+ continue
587
+ if isinstance(obj, dict):
588
+ out.append(obj)
589
+ return out
590
+
591
+ def count(self, run_id: str) -> int:
592
+ rid = str(run_id or "").strip()
593
+ if not rid:
594
+ return 0
595
+ conn = self._db.connection()
596
+ row = conn.execute("SELECT last_seq FROM ledger_heads WHERE run_id = ?;", (rid,)).fetchone()
597
+ if row is None:
598
+ return 0
599
+ try:
600
+ return int(row["last_seq"] or 0)
601
+ except Exception:
602
+ return 0
603
+
604
+ def count_many(self, run_ids: List[str]) -> Dict[str, int]:
605
+ ids = [str(r or "").strip() for r in (run_ids or []) if str(r or "").strip()]
606
+ if not ids:
607
+ return {}
608
+ # SQLite parameter limit is high enough for typical UI pages; chunk defensively.
609
+ out: Dict[str, int] = {}
610
+ conn = self._db.connection()
611
+ for i in range(0, len(ids), 900):
612
+ chunk = ids[i : i + 900]
613
+ q = ",".join(["?"] * len(chunk))
614
+ rows = conn.execute(f"SELECT run_id, last_seq FROM ledger_heads WHERE run_id IN ({q});", tuple(chunk)).fetchall()
615
+ for row in rows or []:
616
+ rid = str(row["run_id"] or "").strip()
617
+ if not rid:
618
+ continue
619
+ try:
620
+ out[rid] = int(row["last_seq"] or 0)
621
+ except Exception:
622
+ out[rid] = 0
623
+ return out
624
+
625
+ def metrics_many(self, run_ids: List[str]) -> Dict[str, Dict[str, int]]:
626
+ """Return best-effort per-run metrics derived from completed ledger records."""
627
+ ids = [str(r or "").strip() for r in (run_ids or []) if str(r or "").strip()]
628
+ if not ids:
629
+ return {}
630
+ out: Dict[str, Dict[str, int]] = {}
631
+ conn = self._db.connection()
632
+ for i in range(0, len(ids), 300):
633
+ chunk = ids[i : i + 300]
634
+ q = ",".join(["?"] * len(chunk))
635
+ rows = conn.execute(
636
+ f"""
637
+ WITH completed AS (
638
+ SELECT run_id, record_json
639
+ FROM ledger
640
+ WHERE run_id IN ({q})
641
+ AND json_extract(record_json, '$.status') = 'completed'
642
+ )
643
+ SELECT
644
+ run_id AS run_id,
645
+ COUNT(*) AS steps,
646
+ SUM(CASE WHEN json_extract(record_json, '$.effect.type') = 'llm_call' THEN 1 ELSE 0 END) AS llm_calls,
647
+ SUM(
648
+ CASE
649
+ WHEN json_extract(record_json, '$.effect.type') = 'tool_calls'
650
+ THEN COALESCE(json_array_length(json_extract(record_json, '$.effect.payload.tool_calls')), 0)
651
+ ELSE 0
652
+ END
653
+ ) AS tool_calls,
654
+ SUM(
655
+ CASE
656
+ WHEN json_extract(record_json, '$.effect.type') = 'llm_call'
657
+ THEN COALESCE(json_extract(record_json, '$.result.usage.total_tokens'), 0)
658
+ ELSE 0
659
+ END
660
+ ) AS tokens_total
661
+ FROM completed
662
+ GROUP BY run_id;
663
+ """,
664
+ tuple(chunk),
665
+ ).fetchall()
666
+ for row in rows or []:
667
+ rid = str(row["run_id"] or "").strip()
668
+ if not rid:
669
+ continue
670
+ def _i(v: Any) -> int:
671
+ try:
672
+ return int(v or 0)
673
+ except Exception:
674
+ return 0
675
+ out[rid] = {
676
+ "steps": _i(row["steps"]),
677
+ "llm_calls": _i(row["llm_calls"]),
678
+ "tool_calls": _i(row["tool_calls"]),
679
+ "tokens_total": _i(row["tokens_total"]),
680
+ }
681
+ return out
682
+
683
+ def list_after(self, *, run_id: str, after: int, limit: int = 1000) -> Tuple[List[Dict[str, Any]], int]:
684
+ """Optional cursor API (not part of LedgerStore ABC).
685
+
686
+ Cursor semantics match the existing gateway API: `after` is the last consumed seq.
687
+ """
688
+ rid = str(run_id or "").strip()
689
+ a = int(after or 0)
690
+ lim = max(1, int(limit or 1000))
691
+ if not rid:
692
+ return ([], a)
693
+ conn = self._db.connection()
694
+ rows = conn.execute(
695
+ "SELECT seq, record_json FROM ledger WHERE run_id = ? AND seq > ? ORDER BY seq ASC LIMIT ?;",
696
+ (rid, a, lim),
697
+ ).fetchall()
698
+ out: List[Dict[str, Any]] = []
699
+ next_cursor = a
700
+ for row in rows or []:
701
+ try:
702
+ obj = json.loads(str(row["record_json"] or "{}"))
703
+ except Exception:
704
+ continue
705
+ if not isinstance(obj, dict):
706
+ continue
707
+ out.append(obj)
708
+ try:
709
+ next_cursor = max(next_cursor, int(row["seq"] or next_cursor))
710
+ except Exception:
711
+ pass
712
+ return (out, next_cursor)
713
+
714
+
715
+ class SqliteCommandStore(CommandStore):
716
+ """SQLite-backed CommandStore (append-only, idempotent by command_id)."""
717
+
718
+ def __init__(self, db: SqliteDatabase) -> None:
719
+ self._db = db
720
+
721
+ def append(self, record: CommandRecord) -> CommandAppendResult:
722
+ cid = str(record.command_id or "").strip() or uuid.uuid4().hex
723
+ run_id = str(record.run_id or "").strip()
724
+ typ = str(record.type or "").strip()
725
+ payload = dict(record.payload or {})
726
+ ts = str(record.ts or "").strip() or _utc_now_iso()
727
+ client_id = str(record.client_id).strip() if isinstance(record.client_id, str) and record.client_id else None
728
+
729
+ if not run_id:
730
+ raise ValueError("CommandRecord.run_id must be non-empty")
731
+ if not typ:
732
+ raise ValueError("CommandRecord.type must be non-empty")
733
+ if not isinstance(payload, dict) or not _is_json_value(payload):
734
+ raise ValueError("CommandRecord.payload must be a JSON-serializable dict")
735
+
736
+ conn = self._db.connection()
737
+ with conn:
738
+ cur = conn.execute(
739
+ """
740
+ INSERT OR IGNORE INTO commands (command_id, run_id, type, payload_json, ts, client_id)
741
+ VALUES (?, ?, ?, ?, ?, ?);
742
+ """,
743
+ (cid, run_id, typ, json.dumps(payload, ensure_ascii=False), ts, client_id),
744
+ )
745
+ if int(cur.rowcount or 0) == 1:
746
+ seq = int(cur.lastrowid or 0)
747
+ return CommandAppendResult(accepted=True, duplicate=False, seq=seq)
748
+
749
+ row = conn.execute("SELECT seq FROM commands WHERE command_id = ?;", (cid,)).fetchone()
750
+ seq2 = int(row["seq"]) if row is not None else 0
751
+ return CommandAppendResult(accepted=False, duplicate=True, seq=seq2)
752
+
753
+ def list_after(self, *, after: int, limit: int = 1000) -> Tuple[List[CommandRecord], int]:
754
+ a = int(after or 0)
755
+ lim = max(1, int(limit or 1000))
756
+ conn = self._db.connection()
757
+ rows = conn.execute(
758
+ """
759
+ SELECT seq, command_id, run_id, type, payload_json, ts, client_id
760
+ FROM commands
761
+ WHERE seq > ?
762
+ ORDER BY seq ASC
763
+ LIMIT ?;
764
+ """,
765
+ (a, lim),
766
+ ).fetchall()
767
+
768
+ out: List[CommandRecord] = []
769
+ next_cursor = a
770
+ for row in rows or []:
771
+ try:
772
+ payload = json.loads(str(row["payload_json"] or "{}"))
773
+ except Exception:
774
+ payload = {}
775
+ if not isinstance(payload, dict):
776
+ payload = {}
777
+ seq = int(row["seq"] or 0)
778
+ out.append(
779
+ CommandRecord(
780
+ command_id=str(row["command_id"] or ""),
781
+ run_id=str(row["run_id"] or ""),
782
+ type=str(row["type"] or ""),
783
+ payload=payload,
784
+ ts=str(row["ts"] or ""),
785
+ client_id=str(row["client_id"] or "") or None,
786
+ seq=seq,
787
+ )
788
+ )
789
+ next_cursor = max(next_cursor, seq)
790
+ return (out, next_cursor)
791
+
792
+ def get_last_seq(self) -> int:
793
+ conn = self._db.connection()
794
+ row = conn.execute("SELECT COALESCE(MAX(seq), 0) AS max_seq FROM commands;").fetchone()
795
+ if row is None:
796
+ return 0
797
+ try:
798
+ return int(row["max_seq"] or 0)
799
+ except Exception:
800
+ return 0
801
+
802
+
803
+ class SqliteCommandCursorStore(CommandCursorStore):
804
+ """SQLite-backed durable cursor store for CommandStore replay."""
805
+
806
+ def __init__(self, db: SqliteDatabase, *, consumer_id: str = "gateway_runner") -> None:
807
+ self._db = db
808
+ self._consumer_id = str(consumer_id or "gateway_runner").strip() or "gateway_runner"
809
+
810
+ def load(self) -> int:
811
+ conn = self._db.connection()
812
+ row = conn.execute(
813
+ "SELECT cursor FROM command_cursors WHERE consumer_id = ?;",
814
+ (self._consumer_id,),
815
+ ).fetchone()
816
+ if row is None:
817
+ return 0
818
+ try:
819
+ return int(row["cursor"] or 0)
820
+ except Exception:
821
+ return 0
822
+
823
+ def save(self, cursor: int) -> None:
824
+ cur = int(cursor or 0)
825
+ conn = self._db.connection()
826
+ with conn:
827
+ conn.execute(
828
+ """
829
+ INSERT INTO command_cursors (consumer_id, cursor, updated_at)
830
+ VALUES (?, ?, ?)
831
+ ON CONFLICT(consumer_id) DO UPDATE SET
832
+ cursor=excluded.cursor,
833
+ updated_at=excluded.updated_at;
834
+ """,
835
+ (self._consumer_id, cur, _utc_now_iso()),
836
+ )