stabilize 0.9.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. stabilize/__init__.py +29 -0
  2. stabilize/cli.py +1193 -0
  3. stabilize/context/__init__.py +7 -0
  4. stabilize/context/stage_context.py +170 -0
  5. stabilize/dag/__init__.py +15 -0
  6. stabilize/dag/graph.py +215 -0
  7. stabilize/dag/topological.py +199 -0
  8. stabilize/examples/__init__.py +1 -0
  9. stabilize/examples/docker-example.py +759 -0
  10. stabilize/examples/golden-standard-expected-result.txt +1 -0
  11. stabilize/examples/golden-standard.py +488 -0
  12. stabilize/examples/http-example.py +606 -0
  13. stabilize/examples/llama-example.py +662 -0
  14. stabilize/examples/python-example.py +731 -0
  15. stabilize/examples/shell-example.py +399 -0
  16. stabilize/examples/ssh-example.py +603 -0
  17. stabilize/handlers/__init__.py +53 -0
  18. stabilize/handlers/base.py +226 -0
  19. stabilize/handlers/complete_stage.py +209 -0
  20. stabilize/handlers/complete_task.py +75 -0
  21. stabilize/handlers/complete_workflow.py +150 -0
  22. stabilize/handlers/run_task.py +369 -0
  23. stabilize/handlers/start_stage.py +262 -0
  24. stabilize/handlers/start_task.py +74 -0
  25. stabilize/handlers/start_workflow.py +136 -0
  26. stabilize/launcher.py +307 -0
  27. stabilize/migrations/01KDQ4N9QPJ6Q4MCV3V9GHWPV4_initial_schema.sql +97 -0
  28. stabilize/migrations/01KDRK3TXW4R2GERC1WBCQYJGG_rag_embeddings.sql +25 -0
  29. stabilize/migrations/__init__.py +1 -0
  30. stabilize/models/__init__.py +15 -0
  31. stabilize/models/stage.py +389 -0
  32. stabilize/models/status.py +146 -0
  33. stabilize/models/task.py +125 -0
  34. stabilize/models/workflow.py +317 -0
  35. stabilize/orchestrator.py +113 -0
  36. stabilize/persistence/__init__.py +28 -0
  37. stabilize/persistence/connection.py +185 -0
  38. stabilize/persistence/factory.py +136 -0
  39. stabilize/persistence/memory.py +214 -0
  40. stabilize/persistence/postgres.py +655 -0
  41. stabilize/persistence/sqlite.py +674 -0
  42. stabilize/persistence/store.py +235 -0
  43. stabilize/queue/__init__.py +59 -0
  44. stabilize/queue/messages.py +377 -0
  45. stabilize/queue/processor.py +312 -0
  46. stabilize/queue/queue.py +526 -0
  47. stabilize/queue/sqlite_queue.py +354 -0
  48. stabilize/rag/__init__.py +19 -0
  49. stabilize/rag/assistant.py +459 -0
  50. stabilize/rag/cache.py +294 -0
  51. stabilize/stages/__init__.py +11 -0
  52. stabilize/stages/builder.py +253 -0
  53. stabilize/tasks/__init__.py +19 -0
  54. stabilize/tasks/interface.py +335 -0
  55. stabilize/tasks/registry.py +255 -0
  56. stabilize/tasks/result.py +283 -0
  57. stabilize-0.9.2.dist-info/METADATA +301 -0
  58. stabilize-0.9.2.dist-info/RECORD +61 -0
  59. stabilize-0.9.2.dist-info/WHEEL +4 -0
  60. stabilize-0.9.2.dist-info/entry_points.txt +2 -0
  61. stabilize-0.9.2.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,399 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Shell Command Example - Demonstrates running shell commands with Stabilize.
4
+
5
+ This example shows how to:
6
+ 1. Create a custom Task that runs shell commands
7
+ 2. Build a workflow with multiple stages
8
+ 3. Execute the workflow and see the results
9
+
10
+ Run with:
11
+ python examples/shell-example.py
12
+ """
13
+
14
+ import logging
15
+ import subprocess
16
+ from typing import Any
17
+
18
+ # Configure logging before importing stabilize modules
19
+ logging.basicConfig(level=logging.ERROR) # Suppress all but errors
20
+
21
+ from stabilize import StageExecution, TaskExecution, Workflow, WorkflowStatus
22
+ from stabilize.handlers.complete_stage import CompleteStageHandler
23
+ from stabilize.handlers.complete_task import CompleteTaskHandler
24
+ from stabilize.handlers.complete_workflow import CompleteWorkflowHandler
25
+ from stabilize.handlers.run_task import RunTaskHandler
26
+ from stabilize.handlers.start_stage import StartStageHandler
27
+ from stabilize.handlers.start_task import StartTaskHandler
28
+ from stabilize.handlers.start_workflow import StartWorkflowHandler
29
+ from stabilize.orchestrator import Orchestrator
30
+ from stabilize.persistence.sqlite import SqliteWorkflowStore
31
+ from stabilize.persistence.store import WorkflowStore
32
+ from stabilize.queue.processor import QueueProcessor
33
+ from stabilize.queue.queue import Queue
34
+ from stabilize.queue.sqlite_queue import SqliteQueue
35
+ from stabilize.tasks.interface import Task
36
+ from stabilize.tasks.registry import TaskRegistry
37
+ from stabilize.tasks.result import TaskResult
38
+
39
+ # =============================================================================
40
+ # Custom Task: ShellTask
41
+ # =============================================================================
42
+
43
+
44
+ class ShellTask(Task):
45
+ """
46
+ Execute shell commands.
47
+
48
+ Reads 'command' from stage.context and executes it.
49
+ Outputs: stdout, stderr, exit_code
50
+ """
51
+
52
+ def execute(self, stage: StageExecution) -> TaskResult:
53
+ command = stage.context.get("command")
54
+ timeout = stage.context.get("timeout", 60)
55
+
56
+ if not command:
57
+ return TaskResult.terminal(error="No 'command' specified in context")
58
+
59
+ print(f" [ShellTask] Running: {command}")
60
+
61
+ try:
62
+ result = subprocess.run(
63
+ command,
64
+ shell=True,
65
+ capture_output=True,
66
+ text=True,
67
+ timeout=timeout,
68
+ )
69
+
70
+ outputs = {
71
+ "stdout": result.stdout.strip(),
72
+ "stderr": result.stderr.strip(),
73
+ "exit_code": result.returncode,
74
+ }
75
+
76
+ if result.returncode == 0:
77
+ print(f" [ShellTask] Success! Output: {result.stdout.strip()[:100]}")
78
+ return TaskResult.success(outputs=outputs)
79
+ else:
80
+ print(f" [ShellTask] Failed with exit code {result.returncode}")
81
+ # Use failed_continue to allow workflow to proceed
82
+ if stage.context.get("continue_on_failure"):
83
+ return TaskResult.failed_continue(error=f"Exit code {result.returncode}", outputs=outputs)
84
+ return TaskResult.terminal(
85
+ error=f"Command failed with exit code {result.returncode}",
86
+ context=outputs,
87
+ )
88
+
89
+ except subprocess.TimeoutExpired:
90
+ print(f" [ShellTask] Timed out after {timeout}s")
91
+ return TaskResult.terminal(error=f"Command timed out after {timeout}s")
92
+
93
+
94
+ # =============================================================================
95
+ # Helper: Setup pipeline infrastructure
96
+ # =============================================================================
97
+
98
+
99
+ def setup_pipeline_runner(store: WorkflowStore, queue: Queue) -> tuple[QueueProcessor, Orchestrator]:
100
+ """Create processor and orchestrator with ShellTask registered."""
101
+ # Create task registry and register our ShellTask
102
+ task_registry = TaskRegistry()
103
+ task_registry.register("shell", ShellTask)
104
+
105
+ # Create message processor
106
+ processor = QueueProcessor(queue)
107
+
108
+ # Register all handlers (this is how the engine processes workflow messages)
109
+ handlers: list[Any] = [
110
+ StartWorkflowHandler(queue, store),
111
+ StartStageHandler(queue, store),
112
+ StartTaskHandler(queue, store),
113
+ RunTaskHandler(queue, store, task_registry), # This executes our ShellTask
114
+ CompleteTaskHandler(queue, store),
115
+ CompleteStageHandler(queue, store),
116
+ CompleteWorkflowHandler(queue, store),
117
+ ]
118
+
119
+ for handler in handlers:
120
+ processor.register_handler(handler)
121
+
122
+ # Create orchestrator (starts workflows)
123
+ orchestrator = Orchestrator(queue)
124
+
125
+ return processor, orchestrator
126
+
127
+
128
+ # =============================================================================
129
+ # Example 1: Simple Single Command
130
+ # =============================================================================
131
+
132
+
133
+ def example_simple() -> None:
134
+ """Run a single shell command."""
135
+ print("\n" + "=" * 60)
136
+ print("Example 1: Simple Single Command")
137
+ print("=" * 60)
138
+
139
+ # Setup - use in-memory SQLite for simplicity
140
+ store = SqliteWorkflowStore("sqlite:///:memory:", create_tables=True)
141
+ queue = SqliteQueue("sqlite:///:memory:", table_name="queue_messages")
142
+ queue._create_table()
143
+ processor, orchestrator = setup_pipeline_runner(store, queue)
144
+
145
+ # Create workflow
146
+ workflow = Workflow.create(
147
+ application="shell-example",
148
+ name="Simple Command",
149
+ stages=[
150
+ StageExecution(
151
+ ref_id="1",
152
+ type="shell",
153
+ name="List Current Directory",
154
+ context={"command": "ls -la"},
155
+ tasks=[
156
+ TaskExecution.create(
157
+ name="Run ls",
158
+ implementing_class="shell",
159
+ stage_start=True,
160
+ stage_end=True,
161
+ ),
162
+ ],
163
+ ),
164
+ ],
165
+ )
166
+
167
+ # Run
168
+ store.store(workflow)
169
+ orchestrator.start(workflow)
170
+ processor.process_all(timeout=30.0)
171
+
172
+ # Check result
173
+ result = store.retrieve(workflow.id)
174
+ print(f"\nWorkflow Status: {result.status}")
175
+ print("Stage Output (first 200 chars):")
176
+ stdout = result.stages[0].outputs.get("stdout", "")
177
+ print(stdout[:200] + "..." if len(stdout) > 200 else stdout)
178
+
179
+
180
+ # =============================================================================
181
+ # Example 2: Sequential Commands (Pipeline)
182
+ # =============================================================================
183
+
184
+
185
+ def example_sequential() -> None:
186
+ """Run multiple commands in sequence."""
187
+ print("\n" + "=" * 60)
188
+ print("Example 2: Sequential Commands")
189
+ print("=" * 60)
190
+
191
+ # Setup
192
+ store = SqliteWorkflowStore("sqlite:///:memory:", create_tables=True)
193
+ queue = SqliteQueue("sqlite:///:memory:", table_name="queue_messages")
194
+ queue._create_table()
195
+ processor, orchestrator = setup_pipeline_runner(store, queue)
196
+
197
+ # Create workflow: create dir -> create file -> read file
198
+ workflow = Workflow.create(
199
+ application="shell-example",
200
+ name="Sequential Commands",
201
+ stages=[
202
+ StageExecution(
203
+ ref_id="1",
204
+ type="shell",
205
+ name="Create Temp Directory",
206
+ context={"command": "mkdir -p /tmp/stabilize_test"},
207
+ tasks=[
208
+ TaskExecution.create(
209
+ name="mkdir",
210
+ implementing_class="shell",
211
+ stage_start=True,
212
+ stage_end=True,
213
+ ),
214
+ ],
215
+ ),
216
+ StageExecution(
217
+ ref_id="2",
218
+ type="shell",
219
+ name="Create File",
220
+ requisite_stage_ref_ids={"1"}, # depends on stage 1
221
+ context={"command": "echo 'Hello from Stabilize!' > /tmp/stabilize_test/hello.txt"},
222
+ tasks=[
223
+ TaskExecution.create(
224
+ name="create file",
225
+ implementing_class="shell",
226
+ stage_start=True,
227
+ stage_end=True,
228
+ ),
229
+ ],
230
+ ),
231
+ StageExecution(
232
+ ref_id="3",
233
+ type="shell",
234
+ name="Read File",
235
+ requisite_stage_ref_ids={"2"}, # depends on stage 2
236
+ context={"command": "cat /tmp/stabilize_test/hello.txt"},
237
+ tasks=[
238
+ TaskExecution.create(
239
+ name="read file",
240
+ implementing_class="shell",
241
+ stage_start=True,
242
+ stage_end=True,
243
+ ),
244
+ ],
245
+ ),
246
+ StageExecution(
247
+ ref_id="4",
248
+ type="shell",
249
+ name="Cleanup",
250
+ requisite_stage_ref_ids={"3"},
251
+ context={"command": "rm -rf /tmp/stabilize_test"},
252
+ tasks=[
253
+ TaskExecution.create(
254
+ name="cleanup",
255
+ implementing_class="shell",
256
+ stage_start=True,
257
+ stage_end=True,
258
+ ),
259
+ ],
260
+ ),
261
+ ],
262
+ )
263
+
264
+ # Run
265
+ store.store(workflow)
266
+ orchestrator.start(workflow)
267
+ processor.process_all(timeout=30.0)
268
+
269
+ # Check result
270
+ result = store.retrieve(workflow.id)
271
+ print(f"\nWorkflow Status: {result.status}")
272
+ for stage in result.stages:
273
+ status_icon = "✓" if stage.status == WorkflowStatus.SUCCEEDED else "✗"
274
+ print(f" {status_icon} {stage.name}: {stage.status}")
275
+ if stage.outputs.get("stdout"):
276
+ print(f" Output: {stage.outputs['stdout']}")
277
+
278
+
279
+ # =============================================================================
280
+ # Example 3: Parallel Commands
281
+ # =============================================================================
282
+
283
+
284
+ def example_parallel() -> None:
285
+ """Run commands in parallel branches."""
286
+ print("\n" + "=" * 60)
287
+ print("Example 3: Parallel Commands")
288
+ print("=" * 60)
289
+
290
+ # Setup
291
+ store = SqliteWorkflowStore("sqlite:///:memory:", create_tables=True)
292
+ queue = SqliteQueue("sqlite:///:memory:", table_name="queue_messages")
293
+ queue._create_table()
294
+ processor, orchestrator = setup_pipeline_runner(store, queue)
295
+
296
+ # Create workflow with parallel branches:
297
+ # Setup
298
+ # / \
299
+ # Check1 Check2
300
+ # \ /
301
+ # Report
302
+ workflow = Workflow.create(
303
+ application="shell-example",
304
+ name="Parallel Commands",
305
+ stages=[
306
+ StageExecution(
307
+ ref_id="setup",
308
+ type="shell",
309
+ name="Setup",
310
+ context={"command": "echo 'Starting parallel checks...'"},
311
+ tasks=[
312
+ TaskExecution.create(
313
+ name="setup",
314
+ implementing_class="shell",
315
+ stage_start=True,
316
+ stage_end=True,
317
+ ),
318
+ ],
319
+ ),
320
+ # These two run in parallel (both depend only on setup)
321
+ StageExecution(
322
+ ref_id="check1",
323
+ type="shell",
324
+ name="Check Python Version",
325
+ requisite_stage_ref_ids={"setup"},
326
+ context={"command": "python3 --version"},
327
+ tasks=[
328
+ TaskExecution.create(
329
+ name="check python",
330
+ implementing_class="shell",
331
+ stage_start=True,
332
+ stage_end=True,
333
+ ),
334
+ ],
335
+ ),
336
+ StageExecution(
337
+ ref_id="check2",
338
+ type="shell",
339
+ name="Check Git Version",
340
+ requisite_stage_ref_ids={"setup"},
341
+ context={"command": "git --version"},
342
+ tasks=[
343
+ TaskExecution.create(
344
+ name="check git",
345
+ implementing_class="shell",
346
+ stage_start=True,
347
+ stage_end=True,
348
+ ),
349
+ ],
350
+ ),
351
+ # This waits for both parallel branches
352
+ StageExecution(
353
+ ref_id="report",
354
+ type="shell",
355
+ name="Report",
356
+ requisite_stage_ref_ids={"check1", "check2"},
357
+ context={"command": "echo 'All checks completed!'"},
358
+ tasks=[
359
+ TaskExecution.create(
360
+ name="report",
361
+ implementing_class="shell",
362
+ stage_start=True,
363
+ stage_end=True,
364
+ ),
365
+ ],
366
+ ),
367
+ ],
368
+ )
369
+
370
+ # Run
371
+ store.store(workflow)
372
+ orchestrator.start(workflow)
373
+ processor.process_all(timeout=30.0)
374
+
375
+ # Check result
376
+ result = store.retrieve(workflow.id)
377
+ print(f"\nWorkflow Status: {result.status}")
378
+ for stage in result.stages:
379
+ status_icon = "✓" if stage.status == WorkflowStatus.SUCCEEDED else "✗"
380
+ stdout = stage.outputs.get("stdout", "")
381
+ print(f" {status_icon} {stage.name}: {stdout}")
382
+
383
+
384
+ # =============================================================================
385
+ # Main
386
+ # =============================================================================
387
+
388
+
389
+ if __name__ == "__main__":
390
+ print("Stabilize Shell Command Examples")
391
+ print("=" * 60)
392
+
393
+ example_simple()
394
+ example_sequential()
395
+ example_parallel()
396
+
397
+ print("\n" + "=" * 60)
398
+ print("All examples completed!")
399
+ print("=" * 60)