stabilize 0.9.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. stabilize/__init__.py +29 -0
  2. stabilize/cli.py +1193 -0
  3. stabilize/context/__init__.py +7 -0
  4. stabilize/context/stage_context.py +170 -0
  5. stabilize/dag/__init__.py +15 -0
  6. stabilize/dag/graph.py +215 -0
  7. stabilize/dag/topological.py +199 -0
  8. stabilize/examples/__init__.py +1 -0
  9. stabilize/examples/docker-example.py +759 -0
  10. stabilize/examples/golden-standard-expected-result.txt +1 -0
  11. stabilize/examples/golden-standard.py +488 -0
  12. stabilize/examples/http-example.py +606 -0
  13. stabilize/examples/llama-example.py +662 -0
  14. stabilize/examples/python-example.py +731 -0
  15. stabilize/examples/shell-example.py +399 -0
  16. stabilize/examples/ssh-example.py +603 -0
  17. stabilize/handlers/__init__.py +53 -0
  18. stabilize/handlers/base.py +226 -0
  19. stabilize/handlers/complete_stage.py +209 -0
  20. stabilize/handlers/complete_task.py +75 -0
  21. stabilize/handlers/complete_workflow.py +150 -0
  22. stabilize/handlers/run_task.py +369 -0
  23. stabilize/handlers/start_stage.py +262 -0
  24. stabilize/handlers/start_task.py +74 -0
  25. stabilize/handlers/start_workflow.py +136 -0
  26. stabilize/launcher.py +307 -0
  27. stabilize/migrations/01KDQ4N9QPJ6Q4MCV3V9GHWPV4_initial_schema.sql +97 -0
  28. stabilize/migrations/01KDRK3TXW4R2GERC1WBCQYJGG_rag_embeddings.sql +25 -0
  29. stabilize/migrations/__init__.py +1 -0
  30. stabilize/models/__init__.py +15 -0
  31. stabilize/models/stage.py +389 -0
  32. stabilize/models/status.py +146 -0
  33. stabilize/models/task.py +125 -0
  34. stabilize/models/workflow.py +317 -0
  35. stabilize/orchestrator.py +113 -0
  36. stabilize/persistence/__init__.py +28 -0
  37. stabilize/persistence/connection.py +185 -0
  38. stabilize/persistence/factory.py +136 -0
  39. stabilize/persistence/memory.py +214 -0
  40. stabilize/persistence/postgres.py +655 -0
  41. stabilize/persistence/sqlite.py +674 -0
  42. stabilize/persistence/store.py +235 -0
  43. stabilize/queue/__init__.py +59 -0
  44. stabilize/queue/messages.py +377 -0
  45. stabilize/queue/processor.py +312 -0
  46. stabilize/queue/queue.py +526 -0
  47. stabilize/queue/sqlite_queue.py +354 -0
  48. stabilize/rag/__init__.py +19 -0
  49. stabilize/rag/assistant.py +459 -0
  50. stabilize/rag/cache.py +294 -0
  51. stabilize/stages/__init__.py +11 -0
  52. stabilize/stages/builder.py +253 -0
  53. stabilize/tasks/__init__.py +19 -0
  54. stabilize/tasks/interface.py +335 -0
  55. stabilize/tasks/registry.py +255 -0
  56. stabilize/tasks/result.py +283 -0
  57. stabilize-0.9.2.dist-info/METADATA +301 -0
  58. stabilize-0.9.2.dist-info/RECORD +61 -0
  59. stabilize-0.9.2.dist-info/WHEEL +4 -0
  60. stabilize-0.9.2.dist-info/entry_points.txt +2 -0
  61. stabilize-0.9.2.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,655 @@
1
+ """
2
+ PostgreSQL execution repository.
3
+
4
+ Production-grade persistence using native psycopg3 with connection pooling.
5
+ Uses singleton ConnectionManager for efficient connection pool sharing.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import json
11
+ import time
12
+ from collections.abc import Iterator
13
+ from typing import Any, cast
14
+
15
+ from stabilize.models.stage import StageExecution, SyntheticStageOwner
16
+ from stabilize.models.status import WorkflowStatus
17
+ from stabilize.models.task import TaskExecution
18
+ from stabilize.models.workflow import (
19
+ PausedDetails,
20
+ Trigger,
21
+ Workflow,
22
+ WorkflowType,
23
+ )
24
+ from stabilize.persistence.store import (
25
+ WorkflowCriteria,
26
+ WorkflowNotFoundError,
27
+ WorkflowStore,
28
+ )
29
+
30
+
31
+ class PostgresWorkflowStore(WorkflowStore):
32
+ """
33
+ PostgreSQL implementation of WorkflowStore.
34
+
35
+ Uses native psycopg3 with connection pooling for database operations.
36
+ Supports concurrent access and provides efficient queries for pipeline
37
+ execution tracking.
38
+
39
+ Connection pools are managed by singleton ConnectionManager for
40
+ efficient resource sharing across all repository instances.
41
+ """
42
+
43
+ def __init__(
44
+ self,
45
+ connection_string: str,
46
+ create_tables: bool = False,
47
+ ) -> None:
48
+ """
49
+ Initialize the repository.
50
+
51
+ Args:
52
+ connection_string: PostgreSQL connection string
53
+ create_tables: Whether to create tables if they don't exist
54
+ """
55
+ from stabilize.persistence.connection import get_connection_manager
56
+
57
+ self.connection_string = connection_string
58
+ self._manager = get_connection_manager()
59
+ self._pool = self._manager.get_postgres_pool(connection_string)
60
+
61
+ if create_tables:
62
+ self._create_tables()
63
+
64
+ def _create_tables(self) -> None:
65
+ """Create database tables if they don't exist."""
66
+ schema = """
67
+ CREATE TABLE IF NOT EXISTS pipeline_executions (
68
+ id VARCHAR(26) PRIMARY KEY,
69
+ type VARCHAR(50) NOT NULL,
70
+ application VARCHAR(255) NOT NULL,
71
+ name VARCHAR(255),
72
+ status VARCHAR(50) NOT NULL,
73
+ start_time BIGINT,
74
+ end_time BIGINT,
75
+ start_time_expiry BIGINT,
76
+ trigger JSONB,
77
+ is_canceled BOOLEAN DEFAULT FALSE,
78
+ canceled_by VARCHAR(255),
79
+ cancellation_reason TEXT,
80
+ paused JSONB,
81
+ pipeline_config_id VARCHAR(255),
82
+ is_limit_concurrent BOOLEAN DEFAULT FALSE,
83
+ max_concurrent_executions INT DEFAULT 0,
84
+ keep_waiting_pipelines BOOLEAN DEFAULT FALSE,
85
+ origin VARCHAR(255),
86
+ created_at TIMESTAMP DEFAULT NOW()
87
+ );
88
+
89
+ CREATE TABLE IF NOT EXISTS stage_executions (
90
+ id VARCHAR(26) PRIMARY KEY,
91
+ execution_id VARCHAR(26) REFERENCES pipeline_executions(id) ON DELETE CASCADE,
92
+ ref_id VARCHAR(50) NOT NULL,
93
+ type VARCHAR(100) NOT NULL,
94
+ name VARCHAR(255),
95
+ status VARCHAR(50) NOT NULL,
96
+ context JSONB DEFAULT '{}',
97
+ outputs JSONB DEFAULT '{}',
98
+ requisite_stage_ref_ids TEXT[],
99
+ parent_stage_id VARCHAR(26),
100
+ synthetic_stage_owner VARCHAR(20),
101
+ start_time BIGINT,
102
+ end_time BIGINT,
103
+ start_time_expiry BIGINT,
104
+ scheduled_time BIGINT,
105
+ UNIQUE(execution_id, ref_id)
106
+ );
107
+
108
+ CREATE TABLE IF NOT EXISTS task_executions (
109
+ id VARCHAR(26) PRIMARY KEY,
110
+ stage_id VARCHAR(26) REFERENCES stage_executions(id) ON DELETE CASCADE,
111
+ name VARCHAR(255) NOT NULL,
112
+ implementing_class VARCHAR(255) NOT NULL,
113
+ status VARCHAR(50) NOT NULL,
114
+ start_time BIGINT,
115
+ end_time BIGINT,
116
+ stage_start BOOLEAN DEFAULT FALSE,
117
+ stage_end BOOLEAN DEFAULT FALSE,
118
+ loop_start BOOLEAN DEFAULT FALSE,
119
+ loop_end BOOLEAN DEFAULT FALSE,
120
+ task_exception_details JSONB DEFAULT '{}'
121
+ );
122
+
123
+ CREATE INDEX IF NOT EXISTS idx_execution_application
124
+ ON pipeline_executions(application);
125
+ CREATE INDEX IF NOT EXISTS idx_execution_config
126
+ ON pipeline_executions(pipeline_config_id);
127
+ CREATE INDEX IF NOT EXISTS idx_execution_status
128
+ ON pipeline_executions(status);
129
+ CREATE INDEX IF NOT EXISTS idx_stage_execution
130
+ ON stage_executions(execution_id);
131
+ CREATE INDEX IF NOT EXISTS idx_task_stage
132
+ ON task_executions(stage_id);
133
+ """
134
+
135
+ with self._pool.connection() as conn:
136
+ with conn.cursor() as cur:
137
+ for statement in schema.split(";"):
138
+ statement = statement.strip()
139
+ if statement:
140
+ cur.execute(statement)
141
+ conn.commit()
142
+
143
+ def close(self) -> None:
144
+ """Close the connection pool via connection manager."""
145
+ self._manager.close_postgres_pool(self.connection_string)
146
+
147
+ def store(self, execution: Workflow) -> None:
148
+ """Store a complete execution."""
149
+ with self._pool.connection() as conn:
150
+ with conn.cursor() as cur:
151
+ # Insert execution
152
+ cur.execute(
153
+ """
154
+ INSERT INTO pipeline_executions (
155
+ id, type, application, name, status, start_time, end_time,
156
+ start_time_expiry, trigger, is_canceled, canceled_by,
157
+ cancellation_reason, paused, pipeline_config_id,
158
+ is_limit_concurrent, max_concurrent_executions,
159
+ keep_waiting_pipelines, origin
160
+ ) VALUES (
161
+ %(id)s, %(type)s, %(application)s, %(name)s, %(status)s,
162
+ %(start_time)s, %(end_time)s, %(start_time_expiry)s,
163
+ %(trigger)s::jsonb, %(is_canceled)s, %(canceled_by)s,
164
+ %(cancellation_reason)s, %(paused)s::jsonb, %(pipeline_config_id)s,
165
+ %(is_limit_concurrent)s, %(max_concurrent_executions)s,
166
+ %(keep_waiting_pipelines)s, %(origin)s
167
+ )
168
+ """,
169
+ self._execution_to_dict(execution),
170
+ )
171
+
172
+ # Insert stages
173
+ for stage in execution.stages:
174
+ self._insert_stage(cur, stage, execution.id)
175
+
176
+ conn.commit()
177
+
178
+ def retrieve(self, execution_id: str) -> Workflow:
179
+ """Retrieve an execution by ID."""
180
+ with self._pool.connection() as conn:
181
+ with conn.cursor() as cur:
182
+ # Get execution
183
+ cur.execute(
184
+ "SELECT * FROM pipeline_executions WHERE id = %(id)s",
185
+ {"id": execution_id},
186
+ )
187
+ row = cur.fetchone()
188
+ if not row:
189
+ raise WorkflowNotFoundError(execution_id)
190
+
191
+ execution = self._row_to_execution(cast(dict[str, Any], row))
192
+
193
+ # Get stages
194
+ cur.execute(
195
+ """
196
+ SELECT * FROM stage_executions
197
+ WHERE execution_id = %(execution_id)s
198
+ """,
199
+ {"execution_id": execution_id},
200
+ )
201
+ stages = []
202
+ for stage_row in cur.fetchall():
203
+ stage = self._row_to_stage(cast(dict[str, Any], stage_row))
204
+ stage._execution = execution
205
+
206
+ # Get tasks for stage
207
+ cur.execute(
208
+ """
209
+ SELECT * FROM task_executions
210
+ WHERE stage_id = %(stage_id)s
211
+ """,
212
+ {"stage_id": stage.id},
213
+ )
214
+ for task_row in cur.fetchall():
215
+ task = self._row_to_task(cast(dict[str, Any], task_row))
216
+ task._stage = stage
217
+ stage.tasks.append(task)
218
+
219
+ stages.append(stage)
220
+
221
+ execution.stages = stages
222
+ return execution
223
+
224
+ def update_status(self, execution: Workflow) -> None:
225
+ """Update execution status."""
226
+ with self._pool.connection() as conn:
227
+ with conn.cursor() as cur:
228
+ cur.execute(
229
+ """
230
+ UPDATE pipeline_executions SET
231
+ status = %(status)s,
232
+ start_time = %(start_time)s,
233
+ end_time = %(end_time)s,
234
+ is_canceled = %(is_canceled)s,
235
+ canceled_by = %(canceled_by)s,
236
+ cancellation_reason = %(cancellation_reason)s,
237
+ paused = %(paused)s::jsonb
238
+ WHERE id = %(id)s
239
+ """,
240
+ {
241
+ "id": execution.id,
242
+ "status": execution.status.name,
243
+ "start_time": execution.start_time,
244
+ "end_time": execution.end_time,
245
+ "is_canceled": execution.is_canceled,
246
+ "canceled_by": execution.canceled_by,
247
+ "cancellation_reason": execution.cancellation_reason,
248
+ "paused": (json.dumps(self._paused_to_dict(execution.paused)) if execution.paused else None),
249
+ },
250
+ )
251
+ conn.commit()
252
+
253
+ def delete(self, execution_id: str) -> None:
254
+ """Delete an execution."""
255
+ with self._pool.connection() as conn:
256
+ with conn.cursor() as cur:
257
+ cur.execute(
258
+ "DELETE FROM pipeline_executions WHERE id = %(id)s",
259
+ {"id": execution_id},
260
+ )
261
+ conn.commit()
262
+
263
+ def store_stage(self, stage: StageExecution) -> None:
264
+ """Store or update a stage."""
265
+ with self._pool.connection() as conn:
266
+ with conn.cursor() as cur:
267
+ # Check if stage exists
268
+ cur.execute(
269
+ "SELECT id FROM stage_executions WHERE id = %(id)s",
270
+ {"id": stage.id},
271
+ )
272
+ exists = cur.fetchone() is not None
273
+
274
+ if exists:
275
+ # Update
276
+ cur.execute(
277
+ """
278
+ UPDATE stage_executions SET
279
+ status = %(status)s,
280
+ context = %(context)s::jsonb,
281
+ outputs = %(outputs)s::jsonb,
282
+ start_time = %(start_time)s,
283
+ end_time = %(end_time)s
284
+ WHERE id = %(id)s
285
+ """,
286
+ {
287
+ "id": stage.id,
288
+ "status": stage.status.name,
289
+ "context": json.dumps(stage.context),
290
+ "outputs": json.dumps(stage.outputs),
291
+ "start_time": stage.start_time,
292
+ "end_time": stage.end_time,
293
+ },
294
+ )
295
+
296
+ # Update tasks
297
+ for task in stage.tasks:
298
+ self._upsert_task(cur, task, stage.id)
299
+ else:
300
+ self._insert_stage(cur, stage, stage.execution.id)
301
+
302
+ conn.commit()
303
+
304
+ def add_stage(self, stage: StageExecution) -> None:
305
+ """Add a new stage."""
306
+ self.store_stage(stage)
307
+
308
+ def remove_stage(
309
+ self,
310
+ execution: Workflow,
311
+ stage_id: str,
312
+ ) -> None:
313
+ """Remove a stage."""
314
+ with self._pool.connection() as conn:
315
+ with conn.cursor() as cur:
316
+ cur.execute(
317
+ "DELETE FROM stage_executions WHERE id = %(id)s",
318
+ {"id": stage_id},
319
+ )
320
+ conn.commit()
321
+
322
+ def retrieve_by_pipeline_config_id(
323
+ self,
324
+ pipeline_config_id: str,
325
+ criteria: WorkflowCriteria | None = None,
326
+ ) -> Iterator[Workflow]:
327
+ """Retrieve executions by pipeline config ID."""
328
+ query = """
329
+ SELECT id FROM pipeline_executions
330
+ WHERE pipeline_config_id = %(config_id)s
331
+ """
332
+ params: dict[str, Any] = {"config_id": pipeline_config_id}
333
+
334
+ if criteria:
335
+ if criteria.statuses:
336
+ status_names = [s.name for s in criteria.statuses]
337
+ query += " AND status = ANY(%(statuses)s)"
338
+ params["statuses"] = status_names
339
+
340
+ query += " ORDER BY start_time DESC"
341
+
342
+ if criteria and criteria.page_size:
343
+ query += f" LIMIT {criteria.page_size}"
344
+
345
+ with self._pool.connection() as conn:
346
+ with conn.cursor() as cur:
347
+ cur.execute(query, params)
348
+ for row in cur.fetchall():
349
+ yield self.retrieve(cast(dict[str, Any], row)["id"])
350
+
351
+ def retrieve_by_application(
352
+ self,
353
+ application: str,
354
+ criteria: WorkflowCriteria | None = None,
355
+ ) -> Iterator[Workflow]:
356
+ """Retrieve executions by application."""
357
+ query = """
358
+ SELECT id FROM pipeline_executions
359
+ WHERE application = %(application)s
360
+ """
361
+ params: dict[str, Any] = {"application": application}
362
+
363
+ if criteria:
364
+ if criteria.statuses:
365
+ status_names = [s.name for s in criteria.statuses]
366
+ query += " AND status = ANY(%(statuses)s)"
367
+ params["statuses"] = status_names
368
+
369
+ query += " ORDER BY start_time DESC"
370
+
371
+ if criteria and criteria.page_size:
372
+ query += f" LIMIT {criteria.page_size}"
373
+
374
+ with self._pool.connection() as conn:
375
+ with conn.cursor() as cur:
376
+ cur.execute(query, params)
377
+ for row in cur.fetchall():
378
+ yield self.retrieve(cast(dict[str, Any], row)["id"])
379
+
380
+ def pause(self, execution_id: str, paused_by: str) -> None:
381
+ """Pause an execution."""
382
+ paused = PausedDetails(
383
+ paused_by=paused_by,
384
+ pause_time=int(time.time() * 1000),
385
+ )
386
+
387
+ with self._pool.connection() as conn:
388
+ with conn.cursor() as cur:
389
+ cur.execute(
390
+ """
391
+ UPDATE pipeline_executions SET
392
+ status = %(status)s,
393
+ paused = %(paused)s::jsonb
394
+ WHERE id = %(id)s
395
+ """,
396
+ {
397
+ "id": execution_id,
398
+ "status": WorkflowStatus.PAUSED.name,
399
+ "paused": json.dumps(self._paused_to_dict(paused)),
400
+ },
401
+ )
402
+ conn.commit()
403
+
404
+ def resume(self, execution_id: str) -> None:
405
+ """Resume a paused execution."""
406
+ # First get current paused details
407
+ execution = self.retrieve(execution_id)
408
+ if execution.paused and execution.paused.pause_time:
409
+ current_time = int(time.time() * 1000)
410
+ execution.paused.resume_time = current_time
411
+ execution.paused.paused_ms = current_time - execution.paused.pause_time
412
+
413
+ with self._pool.connection() as conn:
414
+ with conn.cursor() as cur:
415
+ cur.execute(
416
+ """
417
+ UPDATE pipeline_executions SET
418
+ status = %(status)s,
419
+ paused = %(paused)s::jsonb
420
+ WHERE id = %(id)s
421
+ """,
422
+ {
423
+ "id": execution_id,
424
+ "status": WorkflowStatus.RUNNING.name,
425
+ "paused": (json.dumps(self._paused_to_dict(execution.paused)) if execution.paused else None),
426
+ },
427
+ )
428
+ conn.commit()
429
+
430
+ def cancel(
431
+ self,
432
+ execution_id: str,
433
+ canceled_by: str,
434
+ reason: str,
435
+ ) -> None:
436
+ """Cancel an execution."""
437
+ with self._pool.connection() as conn:
438
+ with conn.cursor() as cur:
439
+ cur.execute(
440
+ """
441
+ UPDATE pipeline_executions SET
442
+ is_canceled = TRUE,
443
+ canceled_by = %(canceled_by)s,
444
+ cancellation_reason = %(reason)s
445
+ WHERE id = %(id)s
446
+ """,
447
+ {
448
+ "id": execution_id,
449
+ "canceled_by": canceled_by,
450
+ "reason": reason,
451
+ },
452
+ )
453
+ conn.commit()
454
+
455
+ # ========== Helper Methods ==========
456
+
457
+ def _insert_stage(self, cur: Any, stage: StageExecution, execution_id: str) -> None:
458
+ """Insert a stage."""
459
+ cur.execute(
460
+ """
461
+ INSERT INTO stage_executions (
462
+ id, execution_id, ref_id, type, name, status, context, outputs,
463
+ requisite_stage_ref_ids, parent_stage_id, synthetic_stage_owner,
464
+ start_time, end_time, start_time_expiry, scheduled_time
465
+ ) VALUES (
466
+ %(id)s, %(execution_id)s, %(ref_id)s, %(type)s, %(name)s, %(status)s,
467
+ %(context)s::jsonb, %(outputs)s::jsonb, %(requisite_stage_ref_ids)s,
468
+ %(parent_stage_id)s, %(synthetic_stage_owner)s, %(start_time)s,
469
+ %(end_time)s, %(start_time_expiry)s, %(scheduled_time)s
470
+ )
471
+ """,
472
+ {
473
+ "id": stage.id,
474
+ "execution_id": execution_id,
475
+ "ref_id": stage.ref_id,
476
+ "type": stage.type,
477
+ "name": stage.name,
478
+ "status": stage.status.name,
479
+ "context": json.dumps(stage.context),
480
+ "outputs": json.dumps(stage.outputs),
481
+ "requisite_stage_ref_ids": list(stage.requisite_stage_ref_ids),
482
+ "parent_stage_id": stage.parent_stage_id,
483
+ "synthetic_stage_owner": (stage.synthetic_stage_owner.value if stage.synthetic_stage_owner else None),
484
+ "start_time": stage.start_time,
485
+ "end_time": stage.end_time,
486
+ "start_time_expiry": stage.start_time_expiry,
487
+ "scheduled_time": stage.scheduled_time,
488
+ },
489
+ )
490
+
491
+ # Insert tasks
492
+ for task in stage.tasks:
493
+ self._upsert_task(cur, task, stage.id)
494
+
495
+ def _upsert_task(self, cur: Any, task: TaskExecution, stage_id: str) -> None:
496
+ """Insert or update a task."""
497
+ cur.execute(
498
+ """
499
+ INSERT INTO task_executions (
500
+ id, stage_id, name, implementing_class, status,
501
+ start_time, end_time, stage_start, stage_end,
502
+ loop_start, loop_end, task_exception_details
503
+ ) VALUES (
504
+ %(id)s, %(stage_id)s, %(name)s, %(implementing_class)s, %(status)s,
505
+ %(start_time)s, %(end_time)s, %(stage_start)s, %(stage_end)s,
506
+ %(loop_start)s, %(loop_end)s, %(task_exception_details)s::jsonb
507
+ )
508
+ ON CONFLICT (id) DO UPDATE SET
509
+ status = EXCLUDED.status,
510
+ start_time = EXCLUDED.start_time,
511
+ end_time = EXCLUDED.end_time,
512
+ task_exception_details = EXCLUDED.task_exception_details
513
+ """,
514
+ {
515
+ "id": task.id,
516
+ "stage_id": stage_id,
517
+ "name": task.name,
518
+ "implementing_class": task.implementing_class,
519
+ "status": task.status.name,
520
+ "start_time": task.start_time,
521
+ "end_time": task.end_time,
522
+ "stage_start": task.stage_start,
523
+ "stage_end": task.stage_end,
524
+ "loop_start": task.loop_start,
525
+ "loop_end": task.loop_end,
526
+ "task_exception_details": json.dumps(task.task_exception_details),
527
+ },
528
+ )
529
+
530
+ def _execution_to_dict(self, execution: Workflow) -> dict[str, Any]:
531
+ """Convert execution to dictionary for storage."""
532
+ return {
533
+ "id": execution.id,
534
+ "type": execution.type.value,
535
+ "application": execution.application,
536
+ "name": execution.name,
537
+ "status": execution.status.name,
538
+ "start_time": execution.start_time,
539
+ "end_time": execution.end_time,
540
+ "start_time_expiry": execution.start_time_expiry,
541
+ "trigger": json.dumps(execution.trigger.to_dict()),
542
+ "is_canceled": execution.is_canceled,
543
+ "canceled_by": execution.canceled_by,
544
+ "cancellation_reason": execution.cancellation_reason,
545
+ "paused": (json.dumps(self._paused_to_dict(execution.paused)) if execution.paused else None),
546
+ "pipeline_config_id": execution.pipeline_config_id,
547
+ "is_limit_concurrent": execution.is_limit_concurrent,
548
+ "max_concurrent_executions": execution.max_concurrent_executions,
549
+ "keep_waiting_pipelines": execution.keep_waiting_pipelines,
550
+ "origin": execution.origin,
551
+ }
552
+
553
+ def _paused_to_dict(self, paused: PausedDetails | None) -> dict[str, Any] | None:
554
+ """Convert PausedDetails to dict."""
555
+ if paused is None:
556
+ return None
557
+ return {
558
+ "paused_by": paused.paused_by,
559
+ "pause_time": paused.pause_time,
560
+ "resume_time": paused.resume_time,
561
+ "paused_ms": paused.paused_ms,
562
+ }
563
+
564
+ def _row_to_execution(self, row: dict[str, Any]) -> Workflow:
565
+ """Convert database row to Workflow."""
566
+ trigger_data = row["trigger"] if isinstance(row["trigger"], dict) else json.loads(row["trigger"] or "{}")
567
+ paused_data = (
568
+ row["paused"] if isinstance(row["paused"], dict) else json.loads(row["paused"]) if row["paused"] else None
569
+ )
570
+
571
+ paused = None
572
+ if paused_data:
573
+ paused = PausedDetails(
574
+ paused_by=paused_data.get("paused_by", ""),
575
+ pause_time=paused_data.get("pause_time"),
576
+ resume_time=paused_data.get("resume_time"),
577
+ paused_ms=paused_data.get("paused_ms", 0),
578
+ )
579
+
580
+ return Workflow(
581
+ id=row["id"],
582
+ type=WorkflowType(row["type"]),
583
+ application=row["application"],
584
+ name=row["name"] or "",
585
+ status=WorkflowStatus[row["status"]],
586
+ start_time=row["start_time"],
587
+ end_time=row["end_time"],
588
+ start_time_expiry=row["start_time_expiry"],
589
+ trigger=Trigger.from_dict(trigger_data),
590
+ is_canceled=row["is_canceled"] or False,
591
+ canceled_by=row["canceled_by"],
592
+ cancellation_reason=row["cancellation_reason"],
593
+ paused=paused,
594
+ pipeline_config_id=row["pipeline_config_id"],
595
+ is_limit_concurrent=row["is_limit_concurrent"] or False,
596
+ max_concurrent_executions=row["max_concurrent_executions"] or 0,
597
+ keep_waiting_pipelines=row["keep_waiting_pipelines"] or False,
598
+ origin=row["origin"] or "unknown",
599
+ )
600
+
601
+ def _row_to_stage(self, row: dict[str, Any]) -> StageExecution:
602
+ """Convert database row to StageExecution."""
603
+ context = row["context"] if isinstance(row["context"], dict) else json.loads(row["context"] or "{}")
604
+ outputs = row["outputs"] if isinstance(row["outputs"], dict) else json.loads(row["outputs"] or "{}")
605
+
606
+ synthetic_owner = None
607
+ if row["synthetic_stage_owner"]:
608
+ synthetic_owner = SyntheticStageOwner(row["synthetic_stage_owner"])
609
+
610
+ return StageExecution(
611
+ id=row["id"],
612
+ ref_id=row["ref_id"],
613
+ type=row["type"],
614
+ name=row["name"] or "",
615
+ status=WorkflowStatus[row["status"]],
616
+ context=context,
617
+ outputs=outputs,
618
+ requisite_stage_ref_ids=set(row["requisite_stage_ref_ids"] or []),
619
+ parent_stage_id=row["parent_stage_id"],
620
+ synthetic_stage_owner=synthetic_owner,
621
+ start_time=row["start_time"],
622
+ end_time=row["end_time"],
623
+ start_time_expiry=row["start_time_expiry"],
624
+ scheduled_time=row["scheduled_time"],
625
+ )
626
+
627
+ def _row_to_task(self, row: dict[str, Any]) -> TaskExecution:
628
+ """Convert database row to TaskExecution."""
629
+ exception_details = row["task_exception_details"]
630
+ if isinstance(exception_details, str):
631
+ exception_details = json.loads(exception_details or "{}")
632
+
633
+ return TaskExecution(
634
+ id=row["id"],
635
+ name=row["name"],
636
+ implementing_class=row["implementing_class"],
637
+ status=WorkflowStatus[row["status"]],
638
+ start_time=row["start_time"],
639
+ end_time=row["end_time"],
640
+ stage_start=row["stage_start"] or False,
641
+ stage_end=row["stage_end"] or False,
642
+ loop_start=row["loop_start"] or False,
643
+ loop_end=row["loop_end"] or False,
644
+ task_exception_details=exception_details or {},
645
+ )
646
+
647
+ def is_healthy(self) -> bool:
648
+ """Check if the database connection is healthy."""
649
+ try:
650
+ with self._pool.connection() as conn:
651
+ with conn.cursor() as cur:
652
+ cur.execute("SELECT 1")
653
+ return True
654
+ except Exception:
655
+ return False