stabilize 0.9.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- stabilize/__init__.py +29 -0
- stabilize/cli.py +1193 -0
- stabilize/context/__init__.py +7 -0
- stabilize/context/stage_context.py +170 -0
- stabilize/dag/__init__.py +15 -0
- stabilize/dag/graph.py +215 -0
- stabilize/dag/topological.py +199 -0
- stabilize/examples/__init__.py +1 -0
- stabilize/examples/docker-example.py +759 -0
- stabilize/examples/golden-standard-expected-result.txt +1 -0
- stabilize/examples/golden-standard.py +488 -0
- stabilize/examples/http-example.py +606 -0
- stabilize/examples/llama-example.py +662 -0
- stabilize/examples/python-example.py +731 -0
- stabilize/examples/shell-example.py +399 -0
- stabilize/examples/ssh-example.py +603 -0
- stabilize/handlers/__init__.py +53 -0
- stabilize/handlers/base.py +226 -0
- stabilize/handlers/complete_stage.py +209 -0
- stabilize/handlers/complete_task.py +75 -0
- stabilize/handlers/complete_workflow.py +150 -0
- stabilize/handlers/run_task.py +369 -0
- stabilize/handlers/start_stage.py +262 -0
- stabilize/handlers/start_task.py +74 -0
- stabilize/handlers/start_workflow.py +136 -0
- stabilize/launcher.py +307 -0
- stabilize/migrations/01KDQ4N9QPJ6Q4MCV3V9GHWPV4_initial_schema.sql +97 -0
- stabilize/migrations/01KDRK3TXW4R2GERC1WBCQYJGG_rag_embeddings.sql +25 -0
- stabilize/migrations/__init__.py +1 -0
- stabilize/models/__init__.py +15 -0
- stabilize/models/stage.py +389 -0
- stabilize/models/status.py +146 -0
- stabilize/models/task.py +125 -0
- stabilize/models/workflow.py +317 -0
- stabilize/orchestrator.py +113 -0
- stabilize/persistence/__init__.py +28 -0
- stabilize/persistence/connection.py +185 -0
- stabilize/persistence/factory.py +136 -0
- stabilize/persistence/memory.py +214 -0
- stabilize/persistence/postgres.py +655 -0
- stabilize/persistence/sqlite.py +674 -0
- stabilize/persistence/store.py +235 -0
- stabilize/queue/__init__.py +59 -0
- stabilize/queue/messages.py +377 -0
- stabilize/queue/processor.py +312 -0
- stabilize/queue/queue.py +526 -0
- stabilize/queue/sqlite_queue.py +354 -0
- stabilize/rag/__init__.py +19 -0
- stabilize/rag/assistant.py +459 -0
- stabilize/rag/cache.py +294 -0
- stabilize/stages/__init__.py +11 -0
- stabilize/stages/builder.py +253 -0
- stabilize/tasks/__init__.py +19 -0
- stabilize/tasks/interface.py +335 -0
- stabilize/tasks/registry.py +255 -0
- stabilize/tasks/result.py +283 -0
- stabilize-0.9.2.dist-info/METADATA +301 -0
- stabilize-0.9.2.dist-info/RECORD +61 -0
- stabilize-0.9.2.dist-info/WHEEL +4 -0
- stabilize-0.9.2.dist-info/entry_points.txt +2 -0
- stabilize-0.9.2.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,731 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Python Script Example - Demonstrates executing Python code with Stabilize.
|
|
4
|
+
|
|
5
|
+
This example shows how to:
|
|
6
|
+
1. Create a custom Task that executes Python scripts
|
|
7
|
+
2. Run inline Python code or external script files
|
|
8
|
+
3. Pass inputs and capture outputs
|
|
9
|
+
4. Build data processing pipelines
|
|
10
|
+
|
|
11
|
+
Requirements:
|
|
12
|
+
None (uses subprocess from standard library)
|
|
13
|
+
|
|
14
|
+
Run with:
|
|
15
|
+
python examples/python-example.py
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
import json
|
|
19
|
+
import logging
|
|
20
|
+
import subprocess
|
|
21
|
+
import sys
|
|
22
|
+
import tempfile
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
from typing import Any
|
|
25
|
+
|
|
26
|
+
logging.basicConfig(level=logging.ERROR)
|
|
27
|
+
|
|
28
|
+
from stabilize import StageExecution, TaskExecution, Workflow
|
|
29
|
+
from stabilize.handlers.complete_stage import CompleteStageHandler
|
|
30
|
+
from stabilize.handlers.complete_task import CompleteTaskHandler
|
|
31
|
+
from stabilize.handlers.complete_workflow import CompleteWorkflowHandler
|
|
32
|
+
from stabilize.handlers.run_task import RunTaskHandler
|
|
33
|
+
from stabilize.handlers.start_stage import StartStageHandler
|
|
34
|
+
from stabilize.handlers.start_task import StartTaskHandler
|
|
35
|
+
from stabilize.handlers.start_workflow import StartWorkflowHandler
|
|
36
|
+
from stabilize.orchestrator import Orchestrator
|
|
37
|
+
from stabilize.persistence.sqlite import SqliteWorkflowStore
|
|
38
|
+
from stabilize.persistence.store import WorkflowStore
|
|
39
|
+
from stabilize.queue.processor import QueueProcessor
|
|
40
|
+
from stabilize.queue.queue import Queue
|
|
41
|
+
from stabilize.queue.sqlite_queue import SqliteQueue
|
|
42
|
+
from stabilize.tasks.interface import Task
|
|
43
|
+
from stabilize.tasks.registry import TaskRegistry
|
|
44
|
+
from stabilize.tasks.result import TaskResult
|
|
45
|
+
|
|
46
|
+
# =============================================================================
|
|
47
|
+
# Custom Task: PythonTask
|
|
48
|
+
# =============================================================================
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class PythonTask(Task):
|
|
52
|
+
"""
|
|
53
|
+
Execute Python code.
|
|
54
|
+
|
|
55
|
+
Context Parameters:
|
|
56
|
+
script: Inline Python code to execute (string)
|
|
57
|
+
script_file: Path to Python script file (alternative to script)
|
|
58
|
+
args: Command line arguments as list (optional)
|
|
59
|
+
inputs: Input variables as dict, available as INPUT in script (optional)
|
|
60
|
+
python_path: Python interpreter path (default: current interpreter)
|
|
61
|
+
timeout: Execution timeout in seconds (default: 60)
|
|
62
|
+
|
|
63
|
+
Outputs:
|
|
64
|
+
stdout: Standard output
|
|
65
|
+
stderr: Standard error
|
|
66
|
+
exit_code: Process exit code
|
|
67
|
+
result: Value of RESULT variable if set in script
|
|
68
|
+
|
|
69
|
+
Notes:
|
|
70
|
+
- Scripts can access INPUT dict for inputs
|
|
71
|
+
- Scripts should set RESULT variable for return value
|
|
72
|
+
- RESULT must be JSON-serializable
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
# Wrapper template that handles INPUT/RESULT
|
|
76
|
+
WRAPPER_TEMPLATE = """
|
|
77
|
+
import json
|
|
78
|
+
import sys
|
|
79
|
+
|
|
80
|
+
# Input data
|
|
81
|
+
INPUT = {inputs}
|
|
82
|
+
|
|
83
|
+
# User script
|
|
84
|
+
{script}
|
|
85
|
+
|
|
86
|
+
# Output result if set
|
|
87
|
+
if 'RESULT' in dir():
|
|
88
|
+
print("__RESULT_START__")
|
|
89
|
+
print(json.dumps(RESULT))
|
|
90
|
+
print("__RESULT_END__")
|
|
91
|
+
"""
|
|
92
|
+
|
|
93
|
+
def execute(self, stage: StageExecution) -> TaskResult:
|
|
94
|
+
script = stage.context.get("script")
|
|
95
|
+
script_file = stage.context.get("script_file")
|
|
96
|
+
args = stage.context.get("args", [])
|
|
97
|
+
inputs = stage.context.get("inputs", {})
|
|
98
|
+
python_path = stage.context.get("python_path", sys.executable)
|
|
99
|
+
timeout = stage.context.get("timeout", 60)
|
|
100
|
+
|
|
101
|
+
if not script and not script_file:
|
|
102
|
+
return TaskResult.terminal(error="Either 'script' or 'script_file' must be specified")
|
|
103
|
+
|
|
104
|
+
if script and script_file:
|
|
105
|
+
return TaskResult.terminal(error="Cannot specify both 'script' and 'script_file'")
|
|
106
|
+
|
|
107
|
+
# Handle script file
|
|
108
|
+
if script_file:
|
|
109
|
+
script_path = Path(script_file)
|
|
110
|
+
if not script_path.exists():
|
|
111
|
+
return TaskResult.terminal(error=f"Script file not found: {script_file}")
|
|
112
|
+
script = script_path.read_text()
|
|
113
|
+
|
|
114
|
+
# At this point, script is guaranteed to be a string (validated above)
|
|
115
|
+
assert script is not None
|
|
116
|
+
print(f" [PythonTask] Executing script ({len(script)} chars)")
|
|
117
|
+
|
|
118
|
+
# Create wrapped script
|
|
119
|
+
wrapped_script = self.WRAPPER_TEMPLATE.format(
|
|
120
|
+
inputs=json.dumps(inputs),
|
|
121
|
+
script=script,
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
# Write to temp file and execute
|
|
125
|
+
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as tmp:
|
|
126
|
+
tmp.write(wrapped_script)
|
|
127
|
+
tmp.flush()
|
|
128
|
+
tmp_path = tmp.name
|
|
129
|
+
|
|
130
|
+
try:
|
|
131
|
+
cmd = [python_path, tmp_path] + list(args)
|
|
132
|
+
result = subprocess.run(
|
|
133
|
+
cmd,
|
|
134
|
+
capture_output=True,
|
|
135
|
+
text=True,
|
|
136
|
+
timeout=timeout,
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
stdout = result.stdout
|
|
140
|
+
stderr = result.stderr
|
|
141
|
+
exit_code = result.returncode
|
|
142
|
+
|
|
143
|
+
# Extract RESULT if present
|
|
144
|
+
script_result = None
|
|
145
|
+
if "__RESULT_START__" in stdout:
|
|
146
|
+
start = stdout.index("__RESULT_START__") + len("__RESULT_START__\n")
|
|
147
|
+
end = stdout.index("__RESULT_END__")
|
|
148
|
+
result_json = stdout[start:end].strip()
|
|
149
|
+
try:
|
|
150
|
+
script_result = json.loads(result_json)
|
|
151
|
+
except json.JSONDecodeError:
|
|
152
|
+
script_result = result_json
|
|
153
|
+
|
|
154
|
+
# Clean stdout
|
|
155
|
+
stdout = (
|
|
156
|
+
stdout[: stdout.index("__RESULT_START__")]
|
|
157
|
+
+ stdout[stdout.index("__RESULT_END__") + len("__RESULT_END__\n") :]
|
|
158
|
+
).strip()
|
|
159
|
+
|
|
160
|
+
outputs = {
|
|
161
|
+
"stdout": stdout,
|
|
162
|
+
"stderr": stderr,
|
|
163
|
+
"exit_code": exit_code,
|
|
164
|
+
"result": script_result,
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
if exit_code == 0:
|
|
168
|
+
print(f" [PythonTask] Success, result: {str(script_result)[:100]}")
|
|
169
|
+
return TaskResult.success(outputs=outputs)
|
|
170
|
+
else:
|
|
171
|
+
print(f" [PythonTask] Failed with exit code {exit_code}")
|
|
172
|
+
return TaskResult.terminal(
|
|
173
|
+
error=f"Script exited with code {exit_code}",
|
|
174
|
+
context=outputs,
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
except subprocess.TimeoutExpired:
|
|
178
|
+
return TaskResult.terminal(error=f"Script timed out after {timeout}s")
|
|
179
|
+
|
|
180
|
+
finally:
|
|
181
|
+
Path(tmp_path).unlink(missing_ok=True)
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
# =============================================================================
|
|
185
|
+
# Helper: Setup pipeline infrastructure
|
|
186
|
+
# =============================================================================
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def setup_pipeline_runner(store: WorkflowStore, queue: Queue) -> tuple[QueueProcessor, Orchestrator]:
|
|
190
|
+
"""Create processor and orchestrator with PythonTask registered."""
|
|
191
|
+
task_registry = TaskRegistry()
|
|
192
|
+
task_registry.register("python", PythonTask)
|
|
193
|
+
|
|
194
|
+
processor = QueueProcessor(queue)
|
|
195
|
+
|
|
196
|
+
handlers: list[Any] = [
|
|
197
|
+
StartWorkflowHandler(queue, store),
|
|
198
|
+
StartStageHandler(queue, store),
|
|
199
|
+
StartTaskHandler(queue, store),
|
|
200
|
+
RunTaskHandler(queue, store, task_registry),
|
|
201
|
+
CompleteTaskHandler(queue, store),
|
|
202
|
+
CompleteStageHandler(queue, store),
|
|
203
|
+
CompleteWorkflowHandler(queue, store),
|
|
204
|
+
]
|
|
205
|
+
|
|
206
|
+
for handler in handlers:
|
|
207
|
+
processor.register_handler(handler)
|
|
208
|
+
|
|
209
|
+
orchestrator = Orchestrator(queue)
|
|
210
|
+
return processor, orchestrator
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
# =============================================================================
|
|
214
|
+
# Example 1: Simple Calculation
|
|
215
|
+
# =============================================================================
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def example_simple_calculation() -> None:
|
|
219
|
+
"""Run a simple inline Python calculation."""
|
|
220
|
+
print("\n" + "=" * 60)
|
|
221
|
+
print("Example 1: Simple Calculation")
|
|
222
|
+
print("=" * 60)
|
|
223
|
+
|
|
224
|
+
store = SqliteWorkflowStore("sqlite:///:memory:", create_tables=True)
|
|
225
|
+
queue = SqliteQueue("sqlite:///:memory:", table_name="queue_messages")
|
|
226
|
+
queue._create_table()
|
|
227
|
+
processor, orchestrator = setup_pipeline_runner(store, queue)
|
|
228
|
+
|
|
229
|
+
workflow = Workflow.create(
|
|
230
|
+
application="python-example",
|
|
231
|
+
name="Simple Calculation",
|
|
232
|
+
stages=[
|
|
233
|
+
StageExecution(
|
|
234
|
+
ref_id="1",
|
|
235
|
+
type="python",
|
|
236
|
+
name="Calculate Fibonacci",
|
|
237
|
+
context={
|
|
238
|
+
"script": """
|
|
239
|
+
def fib(n):
|
|
240
|
+
if n <= 1:
|
|
241
|
+
return n
|
|
242
|
+
return fib(n-1) + fib(n-2)
|
|
243
|
+
|
|
244
|
+
n = INPUT.get('n', 10)
|
|
245
|
+
RESULT = {
|
|
246
|
+
'n': n,
|
|
247
|
+
'fibonacci': fib(n),
|
|
248
|
+
'sequence': [fib(i) for i in range(n+1)]
|
|
249
|
+
}
|
|
250
|
+
print(f"Fibonacci({n}) = {fib(n)}")
|
|
251
|
+
""",
|
|
252
|
+
"inputs": {"n": 10},
|
|
253
|
+
},
|
|
254
|
+
tasks=[
|
|
255
|
+
TaskExecution.create(
|
|
256
|
+
name="Run Python",
|
|
257
|
+
implementing_class="python",
|
|
258
|
+
stage_start=True,
|
|
259
|
+
stage_end=True,
|
|
260
|
+
),
|
|
261
|
+
],
|
|
262
|
+
),
|
|
263
|
+
],
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
store.store(workflow)
|
|
267
|
+
orchestrator.start(workflow)
|
|
268
|
+
processor.process_all(timeout=30.0)
|
|
269
|
+
|
|
270
|
+
result = store.retrieve(workflow.id)
|
|
271
|
+
print(f"\nWorkflow Status: {result.status}")
|
|
272
|
+
script_result = result.stages[0].outputs.get("result", {})
|
|
273
|
+
print(f"Result: Fibonacci({script_result.get('n')}) = {script_result.get('fibonacci')}")
|
|
274
|
+
print(f"Sequence: {script_result.get('sequence')}")
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
# =============================================================================
|
|
278
|
+
# Example 2: Data Processing Pipeline
|
|
279
|
+
# =============================================================================
|
|
280
|
+
|
|
281
|
+
|
|
282
|
+
def example_data_pipeline() -> None:
|
|
283
|
+
"""Sequential data processing: generate -> transform -> validate."""
|
|
284
|
+
print("\n" + "=" * 60)
|
|
285
|
+
print("Example 2: Data Processing Pipeline")
|
|
286
|
+
print("=" * 60)
|
|
287
|
+
|
|
288
|
+
store = SqliteWorkflowStore("sqlite:///:memory:", create_tables=True)
|
|
289
|
+
queue = SqliteQueue("sqlite:///:memory:", table_name="queue_messages")
|
|
290
|
+
queue._create_table()
|
|
291
|
+
processor, orchestrator = setup_pipeline_runner(store, queue)
|
|
292
|
+
|
|
293
|
+
workflow = Workflow.create(
|
|
294
|
+
application="python-example",
|
|
295
|
+
name="Data Pipeline",
|
|
296
|
+
stages=[
|
|
297
|
+
# Stage 1: Generate data
|
|
298
|
+
StageExecution(
|
|
299
|
+
ref_id="1",
|
|
300
|
+
type="python",
|
|
301
|
+
name="Generate Data",
|
|
302
|
+
context={
|
|
303
|
+
"script": """
|
|
304
|
+
import random
|
|
305
|
+
random.seed(42) # Reproducible
|
|
306
|
+
|
|
307
|
+
data = [
|
|
308
|
+
{'id': i, 'value': random.randint(1, 100), 'name': f'item_{i}'}
|
|
309
|
+
for i in range(10)
|
|
310
|
+
]
|
|
311
|
+
RESULT = data
|
|
312
|
+
print(f"Generated {len(data)} records")
|
|
313
|
+
""",
|
|
314
|
+
},
|
|
315
|
+
tasks=[
|
|
316
|
+
TaskExecution.create(
|
|
317
|
+
name="Generate",
|
|
318
|
+
implementing_class="python",
|
|
319
|
+
stage_start=True,
|
|
320
|
+
stage_end=True,
|
|
321
|
+
),
|
|
322
|
+
],
|
|
323
|
+
),
|
|
324
|
+
# Stage 2: Transform data
|
|
325
|
+
StageExecution(
|
|
326
|
+
ref_id="2",
|
|
327
|
+
type="python",
|
|
328
|
+
name="Transform Data",
|
|
329
|
+
requisite_stage_ref_ids={"1"},
|
|
330
|
+
context={
|
|
331
|
+
"script": """
|
|
332
|
+
data = INPUT['data']
|
|
333
|
+
|
|
334
|
+
# Transform: double values, uppercase names
|
|
335
|
+
transformed = [
|
|
336
|
+
{
|
|
337
|
+
'id': item['id'],
|
|
338
|
+
'value': item['value'] * 2,
|
|
339
|
+
'name': item['name'].upper(),
|
|
340
|
+
'category': 'HIGH' if item['value'] > 50 else 'LOW'
|
|
341
|
+
}
|
|
342
|
+
for item in data
|
|
343
|
+
]
|
|
344
|
+
RESULT = transformed
|
|
345
|
+
print(f"Transformed {len(transformed)} records")
|
|
346
|
+
""",
|
|
347
|
+
"inputs": {"data": []}, # Will be populated from stage context
|
|
348
|
+
},
|
|
349
|
+
tasks=[
|
|
350
|
+
TaskExecution.create(
|
|
351
|
+
name="Transform",
|
|
352
|
+
implementing_class="python",
|
|
353
|
+
stage_start=True,
|
|
354
|
+
stage_end=True,
|
|
355
|
+
),
|
|
356
|
+
],
|
|
357
|
+
),
|
|
358
|
+
# Stage 3: Validate and summarize
|
|
359
|
+
StageExecution(
|
|
360
|
+
ref_id="3",
|
|
361
|
+
type="python",
|
|
362
|
+
name="Validate Data",
|
|
363
|
+
requisite_stage_ref_ids={"2"},
|
|
364
|
+
context={
|
|
365
|
+
"script": """
|
|
366
|
+
data = INPUT['data']
|
|
367
|
+
|
|
368
|
+
# Validation
|
|
369
|
+
errors = []
|
|
370
|
+
for item in data:
|
|
371
|
+
if item['value'] < 0:
|
|
372
|
+
errors.append(f"Negative value for {item['id']}")
|
|
373
|
+
if not item['name']:
|
|
374
|
+
errors.append(f"Empty name for {item['id']}")
|
|
375
|
+
|
|
376
|
+
# Summary
|
|
377
|
+
summary = {
|
|
378
|
+
'total_records': len(data),
|
|
379
|
+
'high_count': sum(1 for d in data if d['category'] == 'HIGH'),
|
|
380
|
+
'low_count': sum(1 for d in data if d['category'] == 'LOW'),
|
|
381
|
+
'total_value': sum(d['value'] for d in data),
|
|
382
|
+
'avg_value': sum(d['value'] for d in data) / len(data) if data else 0,
|
|
383
|
+
'errors': errors,
|
|
384
|
+
'valid': len(errors) == 0
|
|
385
|
+
}
|
|
386
|
+
RESULT = summary
|
|
387
|
+
print(f"Validation: {'PASSED' if summary['valid'] else 'FAILED'}")
|
|
388
|
+
print(f"Total value: {summary['total_value']}")
|
|
389
|
+
""",
|
|
390
|
+
"inputs": {"data": []},
|
|
391
|
+
},
|
|
392
|
+
tasks=[
|
|
393
|
+
TaskExecution.create(
|
|
394
|
+
name="Validate",
|
|
395
|
+
implementing_class="python",
|
|
396
|
+
stage_start=True,
|
|
397
|
+
stage_end=True,
|
|
398
|
+
),
|
|
399
|
+
],
|
|
400
|
+
),
|
|
401
|
+
],
|
|
402
|
+
)
|
|
403
|
+
|
|
404
|
+
store.store(workflow)
|
|
405
|
+
orchestrator.start(workflow)
|
|
406
|
+
processor.process_all(timeout=30.0)
|
|
407
|
+
|
|
408
|
+
result = store.retrieve(workflow.id)
|
|
409
|
+
print(f"\nWorkflow Status: {result.status}")
|
|
410
|
+
|
|
411
|
+
for stage in result.stages:
|
|
412
|
+
print(f"\n{stage.name}:")
|
|
413
|
+
script_result = stage.outputs.get("result")
|
|
414
|
+
if isinstance(script_result, dict):
|
|
415
|
+
for k, v in script_result.items():
|
|
416
|
+
print(f" {k}: {v}")
|
|
417
|
+
elif isinstance(script_result, list):
|
|
418
|
+
print(f" {len(script_result)} items")
|
|
419
|
+
else:
|
|
420
|
+
print(f" {script_result}")
|
|
421
|
+
|
|
422
|
+
|
|
423
|
+
# =============================================================================
|
|
424
|
+
# Example 3: Parallel Processing
|
|
425
|
+
# =============================================================================
|
|
426
|
+
|
|
427
|
+
|
|
428
|
+
def example_parallel_processing() -> None:
|
|
429
|
+
"""Process data in parallel branches."""
|
|
430
|
+
print("\n" + "=" * 60)
|
|
431
|
+
print("Example 3: Parallel Processing")
|
|
432
|
+
print("=" * 60)
|
|
433
|
+
|
|
434
|
+
store = SqliteWorkflowStore("sqlite:///:memory:", create_tables=True)
|
|
435
|
+
queue = SqliteQueue("sqlite:///:memory:", table_name="queue_messages")
|
|
436
|
+
queue._create_table()
|
|
437
|
+
processor, orchestrator = setup_pipeline_runner(store, queue)
|
|
438
|
+
|
|
439
|
+
# Generate
|
|
440
|
+
# / | \
|
|
441
|
+
# Stats Sort Filter
|
|
442
|
+
# \ | /
|
|
443
|
+
# Combine
|
|
444
|
+
|
|
445
|
+
workflow = Workflow.create(
|
|
446
|
+
application="python-example",
|
|
447
|
+
name="Parallel Processing",
|
|
448
|
+
stages=[
|
|
449
|
+
# Generate
|
|
450
|
+
StageExecution(
|
|
451
|
+
ref_id="generate",
|
|
452
|
+
type="python",
|
|
453
|
+
name="Generate Numbers",
|
|
454
|
+
context={
|
|
455
|
+
"script": """
|
|
456
|
+
import random
|
|
457
|
+
random.seed(123)
|
|
458
|
+
numbers = [random.randint(1, 1000) for _ in range(100)]
|
|
459
|
+
RESULT = numbers
|
|
460
|
+
print(f"Generated {len(numbers)} numbers")
|
|
461
|
+
""",
|
|
462
|
+
},
|
|
463
|
+
tasks=[
|
|
464
|
+
TaskExecution.create(
|
|
465
|
+
name="Generate",
|
|
466
|
+
implementing_class="python",
|
|
467
|
+
stage_start=True,
|
|
468
|
+
stage_end=True,
|
|
469
|
+
),
|
|
470
|
+
],
|
|
471
|
+
),
|
|
472
|
+
# Parallel: Statistics
|
|
473
|
+
StageExecution(
|
|
474
|
+
ref_id="stats",
|
|
475
|
+
type="python",
|
|
476
|
+
name="Calculate Statistics",
|
|
477
|
+
requisite_stage_ref_ids={"generate"},
|
|
478
|
+
context={
|
|
479
|
+
"script": """
|
|
480
|
+
numbers = INPUT['numbers']
|
|
481
|
+
RESULT = {
|
|
482
|
+
'count': len(numbers),
|
|
483
|
+
'sum': sum(numbers),
|
|
484
|
+
'min': min(numbers),
|
|
485
|
+
'max': max(numbers),
|
|
486
|
+
'avg': sum(numbers) / len(numbers),
|
|
487
|
+
}
|
|
488
|
+
print(f"Stats: min={RESULT['min']}, max={RESULT['max']}, avg={RESULT['avg']:.2f}")
|
|
489
|
+
""",
|
|
490
|
+
"inputs": {"numbers": []},
|
|
491
|
+
},
|
|
492
|
+
tasks=[
|
|
493
|
+
TaskExecution.create(
|
|
494
|
+
name="Stats",
|
|
495
|
+
implementing_class="python",
|
|
496
|
+
stage_start=True,
|
|
497
|
+
stage_end=True,
|
|
498
|
+
),
|
|
499
|
+
],
|
|
500
|
+
),
|
|
501
|
+
# Parallel: Sort
|
|
502
|
+
StageExecution(
|
|
503
|
+
ref_id="sort",
|
|
504
|
+
type="python",
|
|
505
|
+
name="Sort Numbers",
|
|
506
|
+
requisite_stage_ref_ids={"generate"},
|
|
507
|
+
context={
|
|
508
|
+
"script": """
|
|
509
|
+
numbers = INPUT['numbers']
|
|
510
|
+
sorted_nums = sorted(numbers)
|
|
511
|
+
RESULT = {
|
|
512
|
+
'sorted': sorted_nums,
|
|
513
|
+
'median': sorted_nums[len(sorted_nums)//2],
|
|
514
|
+
}
|
|
515
|
+
print(f"Sorted {len(sorted_nums)} numbers, median={RESULT['median']}")
|
|
516
|
+
""",
|
|
517
|
+
"inputs": {"numbers": []},
|
|
518
|
+
},
|
|
519
|
+
tasks=[
|
|
520
|
+
TaskExecution.create(
|
|
521
|
+
name="Sort",
|
|
522
|
+
implementing_class="python",
|
|
523
|
+
stage_start=True,
|
|
524
|
+
stage_end=True,
|
|
525
|
+
),
|
|
526
|
+
],
|
|
527
|
+
),
|
|
528
|
+
# Parallel: Filter
|
|
529
|
+
StageExecution(
|
|
530
|
+
ref_id="filter",
|
|
531
|
+
type="python",
|
|
532
|
+
name="Filter Numbers",
|
|
533
|
+
requisite_stage_ref_ids={"generate"},
|
|
534
|
+
context={
|
|
535
|
+
"script": """
|
|
536
|
+
numbers = INPUT['numbers']
|
|
537
|
+
threshold = 500
|
|
538
|
+
above = [n for n in numbers if n > threshold]
|
|
539
|
+
below = [n for n in numbers if n <= threshold]
|
|
540
|
+
RESULT = {
|
|
541
|
+
'above_threshold': len(above),
|
|
542
|
+
'below_threshold': len(below),
|
|
543
|
+
'threshold': threshold,
|
|
544
|
+
}
|
|
545
|
+
print(f"Above {threshold}: {len(above)}, Below: {len(below)}")
|
|
546
|
+
""",
|
|
547
|
+
"inputs": {"numbers": []},
|
|
548
|
+
},
|
|
549
|
+
tasks=[
|
|
550
|
+
TaskExecution.create(
|
|
551
|
+
name="Filter",
|
|
552
|
+
implementing_class="python",
|
|
553
|
+
stage_start=True,
|
|
554
|
+
stage_end=True,
|
|
555
|
+
),
|
|
556
|
+
],
|
|
557
|
+
),
|
|
558
|
+
# Combine results
|
|
559
|
+
StageExecution(
|
|
560
|
+
ref_id="combine",
|
|
561
|
+
type="python",
|
|
562
|
+
name="Combine Results",
|
|
563
|
+
requisite_stage_ref_ids={"stats", "sort", "filter"},
|
|
564
|
+
context={
|
|
565
|
+
"script": """
|
|
566
|
+
RESULT = {
|
|
567
|
+
'processing': 'complete',
|
|
568
|
+
'branches': ['stats', 'sort', 'filter'],
|
|
569
|
+
'summary': 'All parallel branches completed successfully'
|
|
570
|
+
}
|
|
571
|
+
print("Combined results from all branches")
|
|
572
|
+
""",
|
|
573
|
+
},
|
|
574
|
+
tasks=[
|
|
575
|
+
TaskExecution.create(
|
|
576
|
+
name="Combine",
|
|
577
|
+
implementing_class="python",
|
|
578
|
+
stage_start=True,
|
|
579
|
+
stage_end=True,
|
|
580
|
+
),
|
|
581
|
+
],
|
|
582
|
+
),
|
|
583
|
+
],
|
|
584
|
+
)
|
|
585
|
+
|
|
586
|
+
store.store(workflow)
|
|
587
|
+
orchestrator.start(workflow)
|
|
588
|
+
processor.process_all(timeout=30.0)
|
|
589
|
+
|
|
590
|
+
result = store.retrieve(workflow.id)
|
|
591
|
+
print(f"\nWorkflow Status: {result.status}")
|
|
592
|
+
|
|
593
|
+
for stage in result.stages:
|
|
594
|
+
script_result = stage.outputs.get("result", {})
|
|
595
|
+
if isinstance(script_result, dict):
|
|
596
|
+
# Show key metrics
|
|
597
|
+
display = {k: v for k, v in script_result.items() if k != "sorted"}
|
|
598
|
+
print(f" {stage.name}: {display}")
|
|
599
|
+
|
|
600
|
+
|
|
601
|
+
# =============================================================================
|
|
602
|
+
# Example 4: Error Handling
|
|
603
|
+
# =============================================================================
|
|
604
|
+
|
|
605
|
+
|
|
606
|
+
def example_error_handling() -> None:
|
|
607
|
+
"""Demonstrate error handling in Python scripts."""
|
|
608
|
+
print("\n" + "=" * 60)
|
|
609
|
+
print("Example 4: Error Handling")
|
|
610
|
+
print("=" * 60)
|
|
611
|
+
|
|
612
|
+
store = SqliteWorkflowStore("sqlite:///:memory:", create_tables=True)
|
|
613
|
+
queue = SqliteQueue("sqlite:///:memory:", table_name="queue_messages")
|
|
614
|
+
queue._create_table()
|
|
615
|
+
processor, orchestrator = setup_pipeline_runner(store, queue)
|
|
616
|
+
|
|
617
|
+
workflow = Workflow.create(
|
|
618
|
+
application="python-example",
|
|
619
|
+
name="Error Handling",
|
|
620
|
+
stages=[
|
|
621
|
+
# Stage 1: Validate input (succeeds)
|
|
622
|
+
StageExecution(
|
|
623
|
+
ref_id="1",
|
|
624
|
+
type="python",
|
|
625
|
+
name="Validate Input",
|
|
626
|
+
context={
|
|
627
|
+
"script": """
|
|
628
|
+
data = INPUT.get('data', {})
|
|
629
|
+
|
|
630
|
+
if not isinstance(data, dict):
|
|
631
|
+
raise ValueError("Data must be a dictionary")
|
|
632
|
+
|
|
633
|
+
required_fields = ['name', 'value']
|
|
634
|
+
missing = [f for f in required_fields if f not in data]
|
|
635
|
+
|
|
636
|
+
if missing:
|
|
637
|
+
raise ValueError(f"Missing required fields: {missing}")
|
|
638
|
+
|
|
639
|
+
RESULT = {'valid': True, 'data': data}
|
|
640
|
+
print("Validation passed")
|
|
641
|
+
""",
|
|
642
|
+
"inputs": {"data": {"name": "test", "value": 42}},
|
|
643
|
+
},
|
|
644
|
+
tasks=[
|
|
645
|
+
TaskExecution.create(
|
|
646
|
+
name="Validate",
|
|
647
|
+
implementing_class="python",
|
|
648
|
+
stage_start=True,
|
|
649
|
+
stage_end=True,
|
|
650
|
+
),
|
|
651
|
+
],
|
|
652
|
+
),
|
|
653
|
+
# Stage 2: Process with try/except
|
|
654
|
+
StageExecution(
|
|
655
|
+
ref_id="2",
|
|
656
|
+
type="python",
|
|
657
|
+
name="Safe Processing",
|
|
658
|
+
requisite_stage_ref_ids={"1"},
|
|
659
|
+
context={
|
|
660
|
+
"script": """
|
|
661
|
+
try:
|
|
662
|
+
# Simulate processing that might fail
|
|
663
|
+
value = INPUT['value']
|
|
664
|
+
result = 100 / value # Would fail if value is 0
|
|
665
|
+
|
|
666
|
+
RESULT = {
|
|
667
|
+
'success': True,
|
|
668
|
+
'result': result,
|
|
669
|
+
'error': None
|
|
670
|
+
}
|
|
671
|
+
print(f"Processing succeeded: {result}")
|
|
672
|
+
|
|
673
|
+
except ZeroDivisionError as e:
|
|
674
|
+
RESULT = {
|
|
675
|
+
'success': False,
|
|
676
|
+
'result': None,
|
|
677
|
+
'error': str(e)
|
|
678
|
+
}
|
|
679
|
+
print(f"Processing failed: {e}")
|
|
680
|
+
|
|
681
|
+
except Exception as e:
|
|
682
|
+
RESULT = {
|
|
683
|
+
'success': False,
|
|
684
|
+
'result': None,
|
|
685
|
+
'error': f"Unexpected error: {e}"
|
|
686
|
+
}
|
|
687
|
+
print(f"Unexpected error: {e}")
|
|
688
|
+
""",
|
|
689
|
+
"inputs": {"value": 5},
|
|
690
|
+
},
|
|
691
|
+
tasks=[
|
|
692
|
+
TaskExecution.create(
|
|
693
|
+
name="Process",
|
|
694
|
+
implementing_class="python",
|
|
695
|
+
stage_start=True,
|
|
696
|
+
stage_end=True,
|
|
697
|
+
),
|
|
698
|
+
],
|
|
699
|
+
),
|
|
700
|
+
],
|
|
701
|
+
)
|
|
702
|
+
|
|
703
|
+
store.store(workflow)
|
|
704
|
+
orchestrator.start(workflow)
|
|
705
|
+
processor.process_all(timeout=30.0)
|
|
706
|
+
|
|
707
|
+
result = store.retrieve(workflow.id)
|
|
708
|
+
print(f"\nWorkflow Status: {result.status}")
|
|
709
|
+
|
|
710
|
+
for stage in result.stages:
|
|
711
|
+
script_result = stage.outputs.get("result", {})
|
|
712
|
+
print(f" {stage.name}: {script_result}")
|
|
713
|
+
|
|
714
|
+
|
|
715
|
+
# =============================================================================
|
|
716
|
+
# Main
|
|
717
|
+
# =============================================================================
|
|
718
|
+
|
|
719
|
+
|
|
720
|
+
if __name__ == "__main__":
|
|
721
|
+
print("Stabilize Python Script Examples")
|
|
722
|
+
print("=" * 60)
|
|
723
|
+
|
|
724
|
+
example_simple_calculation()
|
|
725
|
+
example_data_pipeline()
|
|
726
|
+
example_parallel_processing()
|
|
727
|
+
example_error_handling()
|
|
728
|
+
|
|
729
|
+
print("\n" + "=" * 60)
|
|
730
|
+
print("All examples completed!")
|
|
731
|
+
print("=" * 60)
|