stabilize 0.9.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. stabilize/__init__.py +29 -0
  2. stabilize/cli.py +1193 -0
  3. stabilize/context/__init__.py +7 -0
  4. stabilize/context/stage_context.py +170 -0
  5. stabilize/dag/__init__.py +15 -0
  6. stabilize/dag/graph.py +215 -0
  7. stabilize/dag/topological.py +199 -0
  8. stabilize/examples/__init__.py +1 -0
  9. stabilize/examples/docker-example.py +759 -0
  10. stabilize/examples/golden-standard-expected-result.txt +1 -0
  11. stabilize/examples/golden-standard.py +488 -0
  12. stabilize/examples/http-example.py +606 -0
  13. stabilize/examples/llama-example.py +662 -0
  14. stabilize/examples/python-example.py +731 -0
  15. stabilize/examples/shell-example.py +399 -0
  16. stabilize/examples/ssh-example.py +603 -0
  17. stabilize/handlers/__init__.py +53 -0
  18. stabilize/handlers/base.py +226 -0
  19. stabilize/handlers/complete_stage.py +209 -0
  20. stabilize/handlers/complete_task.py +75 -0
  21. stabilize/handlers/complete_workflow.py +150 -0
  22. stabilize/handlers/run_task.py +369 -0
  23. stabilize/handlers/start_stage.py +262 -0
  24. stabilize/handlers/start_task.py +74 -0
  25. stabilize/handlers/start_workflow.py +136 -0
  26. stabilize/launcher.py +307 -0
  27. stabilize/migrations/01KDQ4N9QPJ6Q4MCV3V9GHWPV4_initial_schema.sql +97 -0
  28. stabilize/migrations/01KDRK3TXW4R2GERC1WBCQYJGG_rag_embeddings.sql +25 -0
  29. stabilize/migrations/__init__.py +1 -0
  30. stabilize/models/__init__.py +15 -0
  31. stabilize/models/stage.py +389 -0
  32. stabilize/models/status.py +146 -0
  33. stabilize/models/task.py +125 -0
  34. stabilize/models/workflow.py +317 -0
  35. stabilize/orchestrator.py +113 -0
  36. stabilize/persistence/__init__.py +28 -0
  37. stabilize/persistence/connection.py +185 -0
  38. stabilize/persistence/factory.py +136 -0
  39. stabilize/persistence/memory.py +214 -0
  40. stabilize/persistence/postgres.py +655 -0
  41. stabilize/persistence/sqlite.py +674 -0
  42. stabilize/persistence/store.py +235 -0
  43. stabilize/queue/__init__.py +59 -0
  44. stabilize/queue/messages.py +377 -0
  45. stabilize/queue/processor.py +312 -0
  46. stabilize/queue/queue.py +526 -0
  47. stabilize/queue/sqlite_queue.py +354 -0
  48. stabilize/rag/__init__.py +19 -0
  49. stabilize/rag/assistant.py +459 -0
  50. stabilize/rag/cache.py +294 -0
  51. stabilize/stages/__init__.py +11 -0
  52. stabilize/stages/builder.py +253 -0
  53. stabilize/tasks/__init__.py +19 -0
  54. stabilize/tasks/interface.py +335 -0
  55. stabilize/tasks/registry.py +255 -0
  56. stabilize/tasks/result.py +283 -0
  57. stabilize-0.9.2.dist-info/METADATA +301 -0
  58. stabilize-0.9.2.dist-info/RECORD +61 -0
  59. stabilize-0.9.2.dist-info/WHEEL +4 -0
  60. stabilize-0.9.2.dist-info/entry_points.txt +2 -0
  61. stabilize-0.9.2.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,759 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Docker Example - Demonstrates running Docker containers with Stabilize.
4
+
5
+ This example shows how to:
6
+ 1. Create a custom Task that runs Docker commands
7
+ 2. Run containers, build images, and manage docker-compose
8
+ 3. Build CI/CD-style container workflows
9
+
10
+ Requirements:
11
+ Docker CLI installed and running
12
+ User must have permissions to run docker commands
13
+
14
+ Run with:
15
+ python examples/docker-example.py
16
+ """
17
+
18
+ import logging
19
+ import subprocess
20
+ from typing import Any
21
+
22
+ logging.basicConfig(level=logging.ERROR)
23
+
24
+ from stabilize import StageExecution, TaskExecution, Workflow, WorkflowStatus
25
+ from stabilize.handlers.complete_stage import CompleteStageHandler
26
+ from stabilize.handlers.complete_task import CompleteTaskHandler
27
+ from stabilize.handlers.complete_workflow import CompleteWorkflowHandler
28
+ from stabilize.handlers.run_task import RunTaskHandler
29
+ from stabilize.handlers.start_stage import StartStageHandler
30
+ from stabilize.handlers.start_task import StartTaskHandler
31
+ from stabilize.handlers.start_workflow import StartWorkflowHandler
32
+ from stabilize.orchestrator import Orchestrator
33
+ from stabilize.persistence.sqlite import SqliteWorkflowStore
34
+ from stabilize.persistence.store import WorkflowStore
35
+ from stabilize.queue.processor import QueueProcessor
36
+ from stabilize.queue.queue import Queue
37
+ from stabilize.queue.sqlite_queue import SqliteQueue
38
+ from stabilize.tasks.interface import Task
39
+ from stabilize.tasks.registry import TaskRegistry
40
+ from stabilize.tasks.result import TaskResult
41
+
42
+ # =============================================================================
43
+ # Custom Task: DockerTask
44
+ # =============================================================================
45
+
46
+
47
+ class DockerTask(Task):
48
+ """
49
+ Execute Docker commands.
50
+
51
+ Context Parameters:
52
+ action: Action to perform - run, exec, build, pull, ps, images, logs, stop, rm
53
+ image: Docker image name (for run, pull, build)
54
+ command: Command to run in container (optional)
55
+ name: Container name (optional)
56
+ tag: Image tag for build (optional)
57
+ dockerfile: Dockerfile path for build (default: Dockerfile)
58
+ context: Build context path (default: .)
59
+ volumes: Volume mounts as list of "host:container" strings (optional)
60
+ ports: Port mappings as list of "host:container" strings (optional)
61
+ environment: Environment variables as dict (optional)
62
+ workdir: Working directory in container (optional)
63
+ network: Docker network to connect (optional)
64
+ remove: Remove container after run (default: True)
65
+ detach: Run in detached mode (default: False)
66
+ timeout: Command timeout in seconds (default: 300)
67
+
68
+ Outputs:
69
+ stdout: Command standard output
70
+ stderr: Command standard error
71
+ exit_code: Command exit code
72
+ container_id: Container ID (for run with detach)
73
+ image_id: Image ID (for build)
74
+ """
75
+
76
+ SUPPORTED_ACTIONS = {
77
+ "run",
78
+ "exec",
79
+ "build",
80
+ "pull",
81
+ "ps",
82
+ "images",
83
+ "logs",
84
+ "stop",
85
+ "rm",
86
+ }
87
+
88
+ def execute(self, stage: StageExecution) -> TaskResult:
89
+ action = stage.context.get("action", "run")
90
+ timeout = stage.context.get("timeout", 300)
91
+
92
+ if action not in self.SUPPORTED_ACTIONS:
93
+ return TaskResult.terminal(error=f"Unsupported action '{action}'. Supported: {self.SUPPORTED_ACTIONS}")
94
+
95
+ # Check Docker availability
96
+ try:
97
+ subprocess.run(
98
+ ["docker", "version"],
99
+ capture_output=True,
100
+ timeout=10,
101
+ check=True,
102
+ )
103
+ except (
104
+ subprocess.CalledProcessError,
105
+ FileNotFoundError,
106
+ subprocess.TimeoutExpired,
107
+ ):
108
+ return TaskResult.terminal(error="Docker is not available. Ensure Docker is installed and running.")
109
+
110
+ # Build command based on action
111
+ try:
112
+ cmd = self._build_command(action, stage.context)
113
+ except ValueError as e:
114
+ return TaskResult.terminal(error=str(e))
115
+
116
+ print(f" [DockerTask] {' '.join(cmd)}")
117
+
118
+ try:
119
+ result = subprocess.run(
120
+ cmd,
121
+ capture_output=True,
122
+ text=True,
123
+ timeout=timeout,
124
+ )
125
+
126
+ outputs: dict[str, Any] = {
127
+ "stdout": result.stdout.strip(),
128
+ "stderr": result.stderr.strip(),
129
+ "exit_code": result.returncode,
130
+ }
131
+
132
+ # Extract container/image ID for relevant actions
133
+ if action == "run" and stage.context.get("detach"):
134
+ outputs["container_id"] = result.stdout.strip()[:12]
135
+ elif action == "build" and result.returncode == 0:
136
+ # Try to get image ID from output
137
+ for line in result.stdout.split("\n"):
138
+ if "Successfully built" in line:
139
+ outputs["image_id"] = line.split()[-1]
140
+ break
141
+
142
+ if result.returncode == 0:
143
+ print(" [DockerTask] Success")
144
+ return TaskResult.success(outputs=outputs)
145
+ else:
146
+ print(f" [DockerTask] Failed with exit code {result.returncode}")
147
+ if stage.context.get("continue_on_failure"):
148
+ return TaskResult.failed_continue(
149
+ error=f"Docker command failed with exit code {result.returncode}",
150
+ outputs=outputs,
151
+ )
152
+ return TaskResult.terminal(
153
+ error=f"Docker command failed with exit code {result.returncode}",
154
+ context=outputs,
155
+ )
156
+
157
+ except subprocess.TimeoutExpired:
158
+ return TaskResult.terminal(error=f"Docker command timed out after {timeout}s")
159
+
160
+ def _build_command(self, action: str, context: dict[str, Any]) -> list[str]:
161
+ """Build Docker command based on action and context."""
162
+ if action == "run":
163
+ return self._build_run_command(context)
164
+ elif action == "exec":
165
+ return self._build_exec_command(context)
166
+ elif action == "build":
167
+ return self._build_build_command(context)
168
+ elif action == "pull":
169
+ image = context.get("image")
170
+ if not image:
171
+ raise ValueError("'image' is required for pull action")
172
+ return ["docker", "pull", image]
173
+ elif action == "ps":
174
+ cmd = ["docker", "ps"]
175
+ if context.get("all"):
176
+ cmd.append("-a")
177
+ return cmd
178
+ elif action == "images":
179
+ return ["docker", "images"]
180
+ elif action == "logs":
181
+ name = context.get("name")
182
+ if not name:
183
+ raise ValueError("'name' is required for logs action")
184
+ cmd = ["docker", "logs"]
185
+ if context.get("follow"):
186
+ cmd.append("-f")
187
+ if context.get("tail"):
188
+ cmd.extend(["--tail", str(context["tail"])])
189
+ cmd.append(name)
190
+ return cmd
191
+ elif action == "stop":
192
+ name = context.get("name")
193
+ if not name:
194
+ raise ValueError("'name' is required for stop action")
195
+ return ["docker", "stop", name]
196
+ elif action == "rm":
197
+ name = context.get("name")
198
+ if not name:
199
+ raise ValueError("'name' is required for rm action")
200
+ cmd = ["docker", "rm"]
201
+ if context.get("force"):
202
+ cmd.append("-f")
203
+ cmd.append(name)
204
+ return cmd
205
+ else:
206
+ raise ValueError(f"Unknown action: {action}")
207
+
208
+ def _build_run_command(self, context: dict[str, Any]) -> list[str]:
209
+ """Build docker run command."""
210
+ image = context.get("image")
211
+ if not image:
212
+ raise ValueError("'image' is required for run action")
213
+
214
+ cmd = ["docker", "run"]
215
+
216
+ # Container name
217
+ if context.get("name"):
218
+ cmd.extend(["--name", context["name"]])
219
+
220
+ # Remove after exit
221
+ if context.get("remove", True):
222
+ cmd.append("--rm")
223
+
224
+ # Detach mode
225
+ if context.get("detach"):
226
+ cmd.append("-d")
227
+
228
+ # Volumes
229
+ for vol in context.get("volumes", []):
230
+ cmd.extend(["-v", vol])
231
+
232
+ # Ports
233
+ for port in context.get("ports", []):
234
+ cmd.extend(["-p", port])
235
+
236
+ # Environment variables
237
+ for key, value in context.get("environment", {}).items():
238
+ cmd.extend(["-e", f"{key}={value}"])
239
+
240
+ # Working directory
241
+ if context.get("workdir"):
242
+ cmd.extend(["-w", context["workdir"]])
243
+
244
+ # Network
245
+ if context.get("network"):
246
+ cmd.extend(["--network", context["network"]])
247
+
248
+ # Image
249
+ cmd.append(image)
250
+
251
+ # Command
252
+ container_cmd = context.get("command")
253
+ if container_cmd:
254
+ if isinstance(container_cmd, str):
255
+ cmd.extend(container_cmd.split())
256
+ else:
257
+ cmd.extend(container_cmd)
258
+
259
+ return cmd
260
+
261
+ def _build_exec_command(self, context: dict[str, Any]) -> list[str]:
262
+ """Build docker exec command."""
263
+ name = context.get("name")
264
+ command = context.get("command")
265
+
266
+ if not name:
267
+ raise ValueError("'name' is required for exec action")
268
+ if not command:
269
+ raise ValueError("'command' is required for exec action")
270
+
271
+ cmd = ["docker", "exec"]
272
+
273
+ # Interactive/TTY
274
+ if context.get("interactive"):
275
+ cmd.append("-i")
276
+ if context.get("tty"):
277
+ cmd.append("-t")
278
+
279
+ # Working directory
280
+ if context.get("workdir"):
281
+ cmd.extend(["-w", context["workdir"]])
282
+
283
+ # Environment variables
284
+ for key, value in context.get("environment", {}).items():
285
+ cmd.extend(["-e", f"{key}={value}"])
286
+
287
+ cmd.append(name)
288
+
289
+ if isinstance(command, str):
290
+ cmd.extend(command.split())
291
+ else:
292
+ cmd.extend(command)
293
+
294
+ return cmd
295
+
296
+ def _build_build_command(self, context: dict[str, Any]) -> list[str]:
297
+ """Build docker build command."""
298
+ cmd = ["docker", "build"]
299
+
300
+ # Tag
301
+ tag = context.get("tag") or context.get("image")
302
+ if tag:
303
+ cmd.extend(["-t", tag])
304
+
305
+ # Dockerfile
306
+ if context.get("dockerfile"):
307
+ cmd.extend(["-f", context["dockerfile"]])
308
+
309
+ # Build args
310
+ for key, value in context.get("build_args", {}).items():
311
+ cmd.extend(["--build-arg", f"{key}={value}"])
312
+
313
+ # No cache
314
+ if context.get("no_cache"):
315
+ cmd.append("--no-cache")
316
+
317
+ # Context path
318
+ build_context = context.get("context", ".")
319
+ cmd.append(build_context)
320
+
321
+ return cmd
322
+
323
+
324
+ # =============================================================================
325
+ # Helper: Setup pipeline infrastructure
326
+ # =============================================================================
327
+
328
+
329
+ def setup_pipeline_runner(store: WorkflowStore, queue: Queue) -> tuple[QueueProcessor, Orchestrator]:
330
+ """Create processor and orchestrator with DockerTask registered."""
331
+ task_registry = TaskRegistry()
332
+ task_registry.register("docker", DockerTask)
333
+
334
+ processor = QueueProcessor(queue)
335
+
336
+ handlers: list[Any] = [
337
+ StartWorkflowHandler(queue, store),
338
+ StartStageHandler(queue, store),
339
+ StartTaskHandler(queue, store),
340
+ RunTaskHandler(queue, store, task_registry),
341
+ CompleteTaskHandler(queue, store),
342
+ CompleteStageHandler(queue, store),
343
+ CompleteWorkflowHandler(queue, store),
344
+ ]
345
+
346
+ for handler in handlers:
347
+ processor.register_handler(handler)
348
+
349
+ orchestrator = Orchestrator(queue)
350
+ return processor, orchestrator
351
+
352
+
353
+ # =============================================================================
354
+ # Example 1: Simple Container Run
355
+ # =============================================================================
356
+
357
+
358
+ def example_simple_run() -> None:
359
+ """Run a simple container command."""
360
+ print("\n" + "=" * 60)
361
+ print("Example 1: Simple Container Run")
362
+ print("=" * 60)
363
+
364
+ store = SqliteWorkflowStore("sqlite:///:memory:", create_tables=True)
365
+ queue = SqliteQueue("sqlite:///:memory:", table_name="queue_messages")
366
+ queue._create_table()
367
+ processor, orchestrator = setup_pipeline_runner(store, queue)
368
+
369
+ workflow = Workflow.create(
370
+ application="docker-example",
371
+ name="Simple Run",
372
+ stages=[
373
+ StageExecution(
374
+ ref_id="1",
375
+ type="docker",
376
+ name="Run Alpine",
377
+ context={
378
+ "action": "run",
379
+ "image": "alpine:latest",
380
+ "command": "echo Hello from Docker",
381
+ },
382
+ tasks=[
383
+ TaskExecution.create(
384
+ name="Docker Run",
385
+ implementing_class="docker",
386
+ stage_start=True,
387
+ stage_end=True,
388
+ ),
389
+ ],
390
+ ),
391
+ ],
392
+ )
393
+
394
+ store.store(workflow)
395
+ orchestrator.start(workflow)
396
+ processor.process_all(timeout=60.0)
397
+
398
+ result = store.retrieve(workflow.id)
399
+ print(f"\nWorkflow Status: {result.status}")
400
+ print(f"Output: {result.stages[0].outputs.get('stdout')}")
401
+
402
+
403
+ # =============================================================================
404
+ # Example 2: Pull and Run
405
+ # =============================================================================
406
+
407
+
408
+ def example_pull_and_run() -> None:
409
+ """Pull an image then run a container."""
410
+ print("\n" + "=" * 60)
411
+ print("Example 2: Pull and Run")
412
+ print("=" * 60)
413
+
414
+ store = SqliteWorkflowStore("sqlite:///:memory:", create_tables=True)
415
+ queue = SqliteQueue("sqlite:///:memory:", table_name="queue_messages")
416
+ queue._create_table()
417
+ processor, orchestrator = setup_pipeline_runner(store, queue)
418
+
419
+ workflow = Workflow.create(
420
+ application="docker-example",
421
+ name="Pull and Run",
422
+ stages=[
423
+ StageExecution(
424
+ ref_id="1",
425
+ type="docker",
426
+ name="Pull Image",
427
+ context={
428
+ "action": "pull",
429
+ "image": "busybox:latest",
430
+ },
431
+ tasks=[
432
+ TaskExecution.create(
433
+ name="Docker Pull",
434
+ implementing_class="docker",
435
+ stage_start=True,
436
+ stage_end=True,
437
+ ),
438
+ ],
439
+ ),
440
+ StageExecution(
441
+ ref_id="2",
442
+ type="docker",
443
+ name="Run Container",
444
+ requisite_stage_ref_ids={"1"},
445
+ context={
446
+ "action": "run",
447
+ "image": "busybox:latest",
448
+ "command": "uname -a",
449
+ },
450
+ tasks=[
451
+ TaskExecution.create(
452
+ name="Docker Run",
453
+ implementing_class="docker",
454
+ stage_start=True,
455
+ stage_end=True,
456
+ ),
457
+ ],
458
+ ),
459
+ ],
460
+ )
461
+
462
+ store.store(workflow)
463
+ orchestrator.start(workflow)
464
+ processor.process_all(timeout=120.0)
465
+
466
+ result = store.retrieve(workflow.id)
467
+ print(f"\nWorkflow Status: {result.status}")
468
+ for stage in result.stages:
469
+ stdout = stage.outputs.get("stdout", "")
470
+ print(f" {stage.name}: {stdout[:100]}")
471
+
472
+
473
+ # =============================================================================
474
+ # Example 3: Container with Environment Variables
475
+ # =============================================================================
476
+
477
+
478
+ def example_with_environment() -> None:
479
+ """Run container with environment variables and volumes."""
480
+ print("\n" + "=" * 60)
481
+ print("Example 3: Container with Environment")
482
+ print("=" * 60)
483
+
484
+ store = SqliteWorkflowStore("sqlite:///:memory:", create_tables=True)
485
+ queue = SqliteQueue("sqlite:///:memory:", table_name="queue_messages")
486
+ queue._create_table()
487
+ processor, orchestrator = setup_pipeline_runner(store, queue)
488
+
489
+ workflow = Workflow.create(
490
+ application="docker-example",
491
+ name="Environment Variables",
492
+ stages=[
493
+ StageExecution(
494
+ ref_id="1",
495
+ type="docker",
496
+ name="Run with Env",
497
+ context={
498
+ "action": "run",
499
+ "image": "alpine:latest",
500
+ "environment": {
501
+ "APP_NAME": "Stabilize",
502
+ "APP_VERSION": "0.9.0",
503
+ "DEBUG": "true",
504
+ },
505
+ "command": "sh -c 'echo App: $APP_NAME v$APP_VERSION, Debug: $DEBUG'",
506
+ },
507
+ tasks=[
508
+ TaskExecution.create(
509
+ name="Docker Run",
510
+ implementing_class="docker",
511
+ stage_start=True,
512
+ stage_end=True,
513
+ ),
514
+ ],
515
+ ),
516
+ ],
517
+ )
518
+
519
+ store.store(workflow)
520
+ orchestrator.start(workflow)
521
+ processor.process_all(timeout=60.0)
522
+
523
+ result = store.retrieve(workflow.id)
524
+ print(f"\nWorkflow Status: {result.status}")
525
+ print(f"Output: {result.stages[0].outputs.get('stdout')}")
526
+
527
+
528
+ # =============================================================================
529
+ # Example 4: Parallel Container Operations
530
+ # =============================================================================
531
+
532
+
533
+ def example_parallel_containers() -> None:
534
+ """Run multiple containers in parallel."""
535
+ print("\n" + "=" * 60)
536
+ print("Example 4: Parallel Containers")
537
+ print("=" * 60)
538
+
539
+ store = SqliteWorkflowStore("sqlite:///:memory:", create_tables=True)
540
+ queue = SqliteQueue("sqlite:///:memory:", table_name="queue_messages")
541
+ queue._create_table()
542
+ processor, orchestrator = setup_pipeline_runner(store, queue)
543
+
544
+ # Start
545
+ # / | \
546
+ # Task1 Task2 Task3
547
+ # \ | /
548
+ # Done
549
+
550
+ workflow = Workflow.create(
551
+ application="docker-example",
552
+ name="Parallel Containers",
553
+ stages=[
554
+ StageExecution(
555
+ ref_id="start",
556
+ type="docker",
557
+ name="Start",
558
+ context={
559
+ "action": "run",
560
+ "image": "alpine:latest",
561
+ "command": "echo Starting parallel tasks",
562
+ },
563
+ tasks=[
564
+ TaskExecution.create(
565
+ name="Start",
566
+ implementing_class="docker",
567
+ stage_start=True,
568
+ stage_end=True,
569
+ ),
570
+ ],
571
+ ),
572
+ # Parallel tasks
573
+ StageExecution(
574
+ ref_id="task1",
575
+ type="docker",
576
+ name="Task 1: CPU Info",
577
+ requisite_stage_ref_ids={"start"},
578
+ context={
579
+ "action": "run",
580
+ "image": "alpine:latest",
581
+ "command": "cat /proc/cpuinfo | head -10",
582
+ },
583
+ tasks=[
584
+ TaskExecution.create(
585
+ name="CPU Info",
586
+ implementing_class="docker",
587
+ stage_start=True,
588
+ stage_end=True,
589
+ ),
590
+ ],
591
+ ),
592
+ StageExecution(
593
+ ref_id="task2",
594
+ type="docker",
595
+ name="Task 2: Memory Info",
596
+ requisite_stage_ref_ids={"start"},
597
+ context={
598
+ "action": "run",
599
+ "image": "alpine:latest",
600
+ "command": "cat /proc/meminfo | head -5",
601
+ },
602
+ tasks=[
603
+ TaskExecution.create(
604
+ name="Memory Info",
605
+ implementing_class="docker",
606
+ stage_start=True,
607
+ stage_end=True,
608
+ ),
609
+ ],
610
+ ),
611
+ StageExecution(
612
+ ref_id="task3",
613
+ type="docker",
614
+ name="Task 3: Disk Info",
615
+ requisite_stage_ref_ids={"start"},
616
+ context={
617
+ "action": "run",
618
+ "image": "alpine:latest",
619
+ "command": "df -h",
620
+ },
621
+ tasks=[
622
+ TaskExecution.create(
623
+ name="Disk Info",
624
+ implementing_class="docker",
625
+ stage_start=True,
626
+ stage_end=True,
627
+ ),
628
+ ],
629
+ ),
630
+ # Join
631
+ StageExecution(
632
+ ref_id="done",
633
+ type="docker",
634
+ name="Done",
635
+ requisite_stage_ref_ids={"task1", "task2", "task3"},
636
+ context={
637
+ "action": "run",
638
+ "image": "alpine:latest",
639
+ "command": "echo All parallel tasks completed",
640
+ },
641
+ tasks=[
642
+ TaskExecution.create(
643
+ name="Done",
644
+ implementing_class="docker",
645
+ stage_start=True,
646
+ stage_end=True,
647
+ ),
648
+ ],
649
+ ),
650
+ ],
651
+ )
652
+
653
+ store.store(workflow)
654
+ orchestrator.start(workflow)
655
+ processor.process_all(timeout=120.0)
656
+
657
+ result = store.retrieve(workflow.id)
658
+ print(f"\nWorkflow Status: {result.status}")
659
+ for stage in result.stages:
660
+ status_mark = "[OK]" if stage.status == WorkflowStatus.SUCCEEDED else "[FAIL]"
661
+ stdout = stage.outputs.get("stdout", "")
662
+ first_line = stdout.split("\n")[0][:50] if stdout else ""
663
+ print(f" {status_mark} {stage.name}: {first_line}")
664
+
665
+
666
+ # =============================================================================
667
+ # Example 5: List Running Containers
668
+ # =============================================================================
669
+
670
+
671
+ def example_list_containers() -> None:
672
+ """List Docker containers and images."""
673
+ print("\n" + "=" * 60)
674
+ print("Example 5: List Containers and Images")
675
+ print("=" * 60)
676
+
677
+ store = SqliteWorkflowStore("sqlite:///:memory:", create_tables=True)
678
+ queue = SqliteQueue("sqlite:///:memory:", table_name="queue_messages")
679
+ queue._create_table()
680
+ processor, orchestrator = setup_pipeline_runner(store, queue)
681
+
682
+ workflow = Workflow.create(
683
+ application="docker-example",
684
+ name="List Resources",
685
+ stages=[
686
+ StageExecution(
687
+ ref_id="1",
688
+ type="docker",
689
+ name="List Containers",
690
+ context={
691
+ "action": "ps",
692
+ "all": True,
693
+ },
694
+ tasks=[
695
+ TaskExecution.create(
696
+ name="Docker PS",
697
+ implementing_class="docker",
698
+ stage_start=True,
699
+ stage_end=True,
700
+ ),
701
+ ],
702
+ ),
703
+ StageExecution(
704
+ ref_id="2",
705
+ type="docker",
706
+ name="List Images",
707
+ requisite_stage_ref_ids={"1"},
708
+ context={
709
+ "action": "images",
710
+ },
711
+ tasks=[
712
+ TaskExecution.create(
713
+ name="Docker Images",
714
+ implementing_class="docker",
715
+ stage_start=True,
716
+ stage_end=True,
717
+ ),
718
+ ],
719
+ ),
720
+ ],
721
+ )
722
+
723
+ store.store(workflow)
724
+ orchestrator.start(workflow)
725
+ processor.process_all(timeout=30.0)
726
+
727
+ result = store.retrieve(workflow.id)
728
+ print(f"\nWorkflow Status: {result.status}")
729
+
730
+ for stage in result.stages:
731
+ print(f"\n{stage.name}:")
732
+ stdout = stage.outputs.get("stdout", "")
733
+ # Show first few lines
734
+ lines = stdout.split("\n")[:5]
735
+ for line in lines:
736
+ print(f" {line[:80]}")
737
+ if len(stdout.split("\n")) > 5:
738
+ print(f" ... ({len(stdout.split(chr(10))) - 5} more lines)")
739
+
740
+
741
+ # =============================================================================
742
+ # Main
743
+ # =============================================================================
744
+
745
+
746
+ if __name__ == "__main__":
747
+ print("Stabilize Docker Examples")
748
+ print("=" * 60)
749
+ print("Requires: Docker installed and running")
750
+
751
+ example_simple_run()
752
+ example_pull_and_run()
753
+ example_with_environment()
754
+ example_parallel_containers()
755
+ example_list_containers()
756
+
757
+ print("\n" + "=" * 60)
758
+ print("All examples completed!")
759
+ print("=" * 60)