stabilize 0.9.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- stabilize/__init__.py +29 -0
- stabilize/cli.py +1193 -0
- stabilize/context/__init__.py +7 -0
- stabilize/context/stage_context.py +170 -0
- stabilize/dag/__init__.py +15 -0
- stabilize/dag/graph.py +215 -0
- stabilize/dag/topological.py +199 -0
- stabilize/examples/__init__.py +1 -0
- stabilize/examples/docker-example.py +759 -0
- stabilize/examples/golden-standard-expected-result.txt +1 -0
- stabilize/examples/golden-standard.py +488 -0
- stabilize/examples/http-example.py +606 -0
- stabilize/examples/llama-example.py +662 -0
- stabilize/examples/python-example.py +731 -0
- stabilize/examples/shell-example.py +399 -0
- stabilize/examples/ssh-example.py +603 -0
- stabilize/handlers/__init__.py +53 -0
- stabilize/handlers/base.py +226 -0
- stabilize/handlers/complete_stage.py +209 -0
- stabilize/handlers/complete_task.py +75 -0
- stabilize/handlers/complete_workflow.py +150 -0
- stabilize/handlers/run_task.py +369 -0
- stabilize/handlers/start_stage.py +262 -0
- stabilize/handlers/start_task.py +74 -0
- stabilize/handlers/start_workflow.py +136 -0
- stabilize/launcher.py +307 -0
- stabilize/migrations/01KDQ4N9QPJ6Q4MCV3V9GHWPV4_initial_schema.sql +97 -0
- stabilize/migrations/01KDRK3TXW4R2GERC1WBCQYJGG_rag_embeddings.sql +25 -0
- stabilize/migrations/__init__.py +1 -0
- stabilize/models/__init__.py +15 -0
- stabilize/models/stage.py +389 -0
- stabilize/models/status.py +146 -0
- stabilize/models/task.py +125 -0
- stabilize/models/workflow.py +317 -0
- stabilize/orchestrator.py +113 -0
- stabilize/persistence/__init__.py +28 -0
- stabilize/persistence/connection.py +185 -0
- stabilize/persistence/factory.py +136 -0
- stabilize/persistence/memory.py +214 -0
- stabilize/persistence/postgres.py +655 -0
- stabilize/persistence/sqlite.py +674 -0
- stabilize/persistence/store.py +235 -0
- stabilize/queue/__init__.py +59 -0
- stabilize/queue/messages.py +377 -0
- stabilize/queue/processor.py +312 -0
- stabilize/queue/queue.py +526 -0
- stabilize/queue/sqlite_queue.py +354 -0
- stabilize/rag/__init__.py +19 -0
- stabilize/rag/assistant.py +459 -0
- stabilize/rag/cache.py +294 -0
- stabilize/stages/__init__.py +11 -0
- stabilize/stages/builder.py +253 -0
- stabilize/tasks/__init__.py +19 -0
- stabilize/tasks/interface.py +335 -0
- stabilize/tasks/registry.py +255 -0
- stabilize/tasks/result.py +283 -0
- stabilize-0.9.2.dist-info/METADATA +301 -0
- stabilize-0.9.2.dist-info/RECORD +61 -0
- stabilize-0.9.2.dist-info/WHEEL +4 -0
- stabilize-0.9.2.dist-info/entry_points.txt +2 -0
- stabilize-0.9.2.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,603 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
SSH Example - Demonstrates executing remote commands via SSH with Stabilize.
|
|
4
|
+
|
|
5
|
+
This example shows how to:
|
|
6
|
+
1. Create a custom Task that executes commands over SSH
|
|
7
|
+
2. Run commands on remote servers
|
|
8
|
+
3. Build deployment and administration workflows
|
|
9
|
+
|
|
10
|
+
Requirements:
|
|
11
|
+
SSH client installed (ssh command available)
|
|
12
|
+
SSH access to target hosts (key-based auth recommended)
|
|
13
|
+
|
|
14
|
+
Run with:
|
|
15
|
+
python examples/ssh-example.py
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
import logging
|
|
19
|
+
import os
|
|
20
|
+
import subprocess
|
|
21
|
+
from typing import Any
|
|
22
|
+
|
|
23
|
+
logging.basicConfig(level=logging.ERROR)
|
|
24
|
+
|
|
25
|
+
from stabilize import StageExecution, TaskExecution, Workflow, WorkflowStatus
|
|
26
|
+
from stabilize.handlers.complete_stage import CompleteStageHandler
|
|
27
|
+
from stabilize.handlers.complete_task import CompleteTaskHandler
|
|
28
|
+
from stabilize.handlers.complete_workflow import CompleteWorkflowHandler
|
|
29
|
+
from stabilize.handlers.run_task import RunTaskHandler
|
|
30
|
+
from stabilize.handlers.start_stage import StartStageHandler
|
|
31
|
+
from stabilize.handlers.start_task import StartTaskHandler
|
|
32
|
+
from stabilize.handlers.start_workflow import StartWorkflowHandler
|
|
33
|
+
from stabilize.orchestrator import Orchestrator
|
|
34
|
+
from stabilize.persistence.sqlite import SqliteWorkflowStore
|
|
35
|
+
from stabilize.persistence.store import WorkflowStore
|
|
36
|
+
from stabilize.queue.processor import QueueProcessor
|
|
37
|
+
from stabilize.queue.queue import Queue
|
|
38
|
+
from stabilize.queue.sqlite_queue import SqliteQueue
|
|
39
|
+
from stabilize.tasks.interface import Task
|
|
40
|
+
from stabilize.tasks.registry import TaskRegistry
|
|
41
|
+
from stabilize.tasks.result import TaskResult
|
|
42
|
+
|
|
43
|
+
# =============================================================================
|
|
44
|
+
# Custom Task: SSHTask
|
|
45
|
+
# =============================================================================
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class SSHTask(Task):
|
|
49
|
+
"""
|
|
50
|
+
Execute commands on remote hosts via SSH.
|
|
51
|
+
|
|
52
|
+
Context Parameters:
|
|
53
|
+
host: Remote hostname or IP address (required)
|
|
54
|
+
user: SSH username (default: current user)
|
|
55
|
+
command: Command to execute on remote host (required)
|
|
56
|
+
port: SSH port (default: 22)
|
|
57
|
+
key_file: Path to private key file (optional)
|
|
58
|
+
timeout: Command timeout in seconds (default: 60)
|
|
59
|
+
strict_host_key: Strict host key checking (default: False)
|
|
60
|
+
connect_timeout: SSH connection timeout (default: 10)
|
|
61
|
+
|
|
62
|
+
Outputs:
|
|
63
|
+
stdout: Command standard output
|
|
64
|
+
stderr: Command standard error
|
|
65
|
+
exit_code: Remote command exit code
|
|
66
|
+
host: Target host
|
|
67
|
+
user: SSH user
|
|
68
|
+
|
|
69
|
+
Notes:
|
|
70
|
+
- Uses ssh CLI command via subprocess
|
|
71
|
+
- Key-based authentication recommended
|
|
72
|
+
- For password auth, use ssh-agent or sshpass (not recommended)
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
def execute(self, stage: StageExecution) -> TaskResult:
|
|
76
|
+
host = stage.context.get("host")
|
|
77
|
+
user = stage.context.get("user", os.environ.get("USER", "farshid"))
|
|
78
|
+
command = stage.context.get("command")
|
|
79
|
+
port = stage.context.get("port", 22)
|
|
80
|
+
key_file = stage.context.get("key_file")
|
|
81
|
+
timeout = stage.context.get("timeout", 60)
|
|
82
|
+
strict_host_key = stage.context.get("strict_host_key", False)
|
|
83
|
+
connect_timeout = stage.context.get("connect_timeout", 10)
|
|
84
|
+
|
|
85
|
+
if not host:
|
|
86
|
+
return TaskResult.terminal(error="No 'host' specified in context")
|
|
87
|
+
|
|
88
|
+
if not command:
|
|
89
|
+
return TaskResult.terminal(error="No 'command' specified in context")
|
|
90
|
+
|
|
91
|
+
# Check SSH availability
|
|
92
|
+
try:
|
|
93
|
+
subprocess.run(
|
|
94
|
+
["ssh", "-V"],
|
|
95
|
+
capture_output=True,
|
|
96
|
+
timeout=5,
|
|
97
|
+
)
|
|
98
|
+
except (FileNotFoundError, subprocess.TimeoutExpired):
|
|
99
|
+
return TaskResult.terminal(error="SSH client not available. Ensure ssh is installed.")
|
|
100
|
+
|
|
101
|
+
# Build SSH command
|
|
102
|
+
ssh_cmd = ["ssh"]
|
|
103
|
+
|
|
104
|
+
# Port
|
|
105
|
+
if port != 22:
|
|
106
|
+
ssh_cmd.extend(["-p", str(port)])
|
|
107
|
+
|
|
108
|
+
# Key file
|
|
109
|
+
if key_file:
|
|
110
|
+
ssh_cmd.extend(["-i", key_file])
|
|
111
|
+
|
|
112
|
+
# Connection timeout
|
|
113
|
+
ssh_cmd.extend(["-o", f"ConnectTimeout={connect_timeout}"])
|
|
114
|
+
|
|
115
|
+
# Host key checking
|
|
116
|
+
if not strict_host_key:
|
|
117
|
+
ssh_cmd.extend(["-o", "StrictHostKeyChecking=no"])
|
|
118
|
+
ssh_cmd.extend(["-o", "UserKnownHostsFile=/dev/null"])
|
|
119
|
+
|
|
120
|
+
# Disable pseudo-terminal allocation for non-interactive commands
|
|
121
|
+
ssh_cmd.append("-T")
|
|
122
|
+
|
|
123
|
+
# Batch mode (no password prompts)
|
|
124
|
+
ssh_cmd.extend(["-o", "BatchMode=yes"])
|
|
125
|
+
|
|
126
|
+
# Target
|
|
127
|
+
target = f"{user}@{host}"
|
|
128
|
+
ssh_cmd.append(target)
|
|
129
|
+
|
|
130
|
+
# Command
|
|
131
|
+
ssh_cmd.append(command)
|
|
132
|
+
|
|
133
|
+
print(f" [SSHTask] {user}@{host}: {command}")
|
|
134
|
+
|
|
135
|
+
try:
|
|
136
|
+
result = subprocess.run(
|
|
137
|
+
ssh_cmd,
|
|
138
|
+
capture_output=True,
|
|
139
|
+
text=True,
|
|
140
|
+
timeout=timeout,
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
outputs = {
|
|
144
|
+
"stdout": result.stdout.strip(),
|
|
145
|
+
"stderr": result.stderr.strip(),
|
|
146
|
+
"exit_code": result.returncode,
|
|
147
|
+
"host": host,
|
|
148
|
+
"user": user,
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
if result.returncode == 0:
|
|
152
|
+
print(f" [SSHTask] Success on {host}")
|
|
153
|
+
return TaskResult.success(outputs=outputs)
|
|
154
|
+
elif result.returncode == 255:
|
|
155
|
+
# SSH connection error
|
|
156
|
+
print(f" [SSHTask] Connection failed to {host}")
|
|
157
|
+
return TaskResult.terminal(
|
|
158
|
+
error=f"SSH connection failed to {host}: {result.stderr}",
|
|
159
|
+
context=outputs,
|
|
160
|
+
)
|
|
161
|
+
else:
|
|
162
|
+
print(f" [SSHTask] Command failed on {host} with exit code {result.returncode}")
|
|
163
|
+
if stage.context.get("continue_on_failure"):
|
|
164
|
+
return TaskResult.failed_continue(
|
|
165
|
+
error=f"Remote command failed with exit code {result.returncode}",
|
|
166
|
+
outputs=outputs,
|
|
167
|
+
)
|
|
168
|
+
return TaskResult.terminal(
|
|
169
|
+
error=f"Remote command failed with exit code {result.returncode}",
|
|
170
|
+
context=outputs,
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
except subprocess.TimeoutExpired:
|
|
174
|
+
return TaskResult.terminal(
|
|
175
|
+
error=f"SSH command timed out after {timeout}s",
|
|
176
|
+
context={"host": host, "user": user},
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
# =============================================================================
|
|
181
|
+
# Helper: Setup pipeline infrastructure
|
|
182
|
+
# =============================================================================
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
def setup_pipeline_runner(store: WorkflowStore, queue: Queue) -> tuple[QueueProcessor, Orchestrator]:
|
|
186
|
+
"""Create processor and orchestrator with SSHTask registered."""
|
|
187
|
+
task_registry = TaskRegistry()
|
|
188
|
+
task_registry.register("ssh", SSHTask)
|
|
189
|
+
|
|
190
|
+
processor = QueueProcessor(queue)
|
|
191
|
+
|
|
192
|
+
handlers: list[Any] = [
|
|
193
|
+
StartWorkflowHandler(queue, store),
|
|
194
|
+
StartStageHandler(queue, store),
|
|
195
|
+
StartTaskHandler(queue, store),
|
|
196
|
+
RunTaskHandler(queue, store, task_registry),
|
|
197
|
+
CompleteTaskHandler(queue, store),
|
|
198
|
+
CompleteStageHandler(queue, store),
|
|
199
|
+
CompleteWorkflowHandler(queue, store),
|
|
200
|
+
]
|
|
201
|
+
|
|
202
|
+
for handler in handlers:
|
|
203
|
+
processor.register_handler(handler)
|
|
204
|
+
|
|
205
|
+
orchestrator = Orchestrator(queue)
|
|
206
|
+
return processor, orchestrator
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
# =============================================================================
|
|
210
|
+
# Example 1: Simple Remote Command
|
|
211
|
+
# =============================================================================
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def example_simple_command() -> None:
|
|
215
|
+
"""Execute a simple command on a remote host."""
|
|
216
|
+
print("\n" + "=" * 60)
|
|
217
|
+
print("Example 1: Simple Remote Command")
|
|
218
|
+
print("=" * 60)
|
|
219
|
+
print("Note: Requires SSH access to localhost or modify host")
|
|
220
|
+
|
|
221
|
+
store = SqliteWorkflowStore("sqlite:///:memory:", create_tables=True)
|
|
222
|
+
queue = SqliteQueue("sqlite:///:memory:", table_name="queue_messages")
|
|
223
|
+
queue._create_table()
|
|
224
|
+
processor, orchestrator = setup_pipeline_runner(store, queue)
|
|
225
|
+
|
|
226
|
+
# Using localhost for demo - change to actual remote host
|
|
227
|
+
workflow = Workflow.create(
|
|
228
|
+
application="ssh-example",
|
|
229
|
+
name="Simple Command",
|
|
230
|
+
stages=[
|
|
231
|
+
StageExecution(
|
|
232
|
+
ref_id="1",
|
|
233
|
+
type="ssh",
|
|
234
|
+
name="Get Hostname",
|
|
235
|
+
context={
|
|
236
|
+
"host": "localhost",
|
|
237
|
+
"command": "hostname && uname -a",
|
|
238
|
+
"timeout": 30,
|
|
239
|
+
},
|
|
240
|
+
tasks=[
|
|
241
|
+
TaskExecution.create(
|
|
242
|
+
name="SSH Command",
|
|
243
|
+
implementing_class="ssh",
|
|
244
|
+
stage_start=True,
|
|
245
|
+
stage_end=True,
|
|
246
|
+
),
|
|
247
|
+
],
|
|
248
|
+
),
|
|
249
|
+
],
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
store.store(workflow)
|
|
253
|
+
orchestrator.start(workflow)
|
|
254
|
+
processor.process_all(timeout=60.0)
|
|
255
|
+
|
|
256
|
+
result = store.retrieve(workflow.id)
|
|
257
|
+
print(f"\nWorkflow Status: {result.status}")
|
|
258
|
+
if result.status == WorkflowStatus.SUCCEEDED:
|
|
259
|
+
print(f"Output: {result.stages[0].outputs.get('stdout')}")
|
|
260
|
+
else:
|
|
261
|
+
print(f"Error: {result.stages[0].outputs.get('stderr', 'Connection failed')}")
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
# =============================================================================
|
|
265
|
+
# Example 2: Sequential Deployment Steps
|
|
266
|
+
# =============================================================================
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
def example_sequential_deployment() -> None:
|
|
270
|
+
"""Sequential deployment: check -> deploy -> verify."""
|
|
271
|
+
print("\n" + "=" * 60)
|
|
272
|
+
print("Example 2: Sequential Deployment Steps")
|
|
273
|
+
print("=" * 60)
|
|
274
|
+
|
|
275
|
+
store = SqliteWorkflowStore("sqlite:///:memory:", create_tables=True)
|
|
276
|
+
queue = SqliteQueue("sqlite:///:memory:", table_name="queue_messages")
|
|
277
|
+
queue._create_table()
|
|
278
|
+
processor, orchestrator = setup_pipeline_runner(store, queue)
|
|
279
|
+
|
|
280
|
+
host = "localhost"
|
|
281
|
+
|
|
282
|
+
workflow = Workflow.create(
|
|
283
|
+
application="ssh-example",
|
|
284
|
+
name="Deployment Pipeline",
|
|
285
|
+
stages=[
|
|
286
|
+
# Step 1: Pre-flight check
|
|
287
|
+
StageExecution(
|
|
288
|
+
ref_id="1",
|
|
289
|
+
type="ssh",
|
|
290
|
+
name="Pre-flight Check",
|
|
291
|
+
context={
|
|
292
|
+
"host": host,
|
|
293
|
+
"command": "echo 'Checking system...' && df -h / | tail -1 && free -m | head -2",
|
|
294
|
+
},
|
|
295
|
+
tasks=[
|
|
296
|
+
TaskExecution.create(
|
|
297
|
+
name="Check System",
|
|
298
|
+
implementing_class="ssh",
|
|
299
|
+
stage_start=True,
|
|
300
|
+
stage_end=True,
|
|
301
|
+
),
|
|
302
|
+
],
|
|
303
|
+
),
|
|
304
|
+
# Step 2: Create deployment directory
|
|
305
|
+
StageExecution(
|
|
306
|
+
ref_id="2",
|
|
307
|
+
type="ssh",
|
|
308
|
+
name="Prepare Directory",
|
|
309
|
+
requisite_stage_ref_ids={"1"},
|
|
310
|
+
context={
|
|
311
|
+
"host": host,
|
|
312
|
+
"command": "mkdir -p /tmp/stabilize_deploy && echo 'Directory ready'",
|
|
313
|
+
},
|
|
314
|
+
tasks=[
|
|
315
|
+
TaskExecution.create(
|
|
316
|
+
name="Prepare",
|
|
317
|
+
implementing_class="ssh",
|
|
318
|
+
stage_start=True,
|
|
319
|
+
stage_end=True,
|
|
320
|
+
),
|
|
321
|
+
],
|
|
322
|
+
),
|
|
323
|
+
# Step 3: Deploy (simulate)
|
|
324
|
+
StageExecution(
|
|
325
|
+
ref_id="3",
|
|
326
|
+
type="ssh",
|
|
327
|
+
name="Deploy Application",
|
|
328
|
+
requisite_stage_ref_ids={"2"},
|
|
329
|
+
context={
|
|
330
|
+
"host": host,
|
|
331
|
+
"command": "echo 'Deploying...' && echo 'version=0.9.0' > /tmp/stabilize_deploy/app.conf && cat /tmp/stabilize_deploy/app.conf",
|
|
332
|
+
},
|
|
333
|
+
tasks=[
|
|
334
|
+
TaskExecution.create(
|
|
335
|
+
name="Deploy",
|
|
336
|
+
implementing_class="ssh",
|
|
337
|
+
stage_start=True,
|
|
338
|
+
stage_end=True,
|
|
339
|
+
),
|
|
340
|
+
],
|
|
341
|
+
),
|
|
342
|
+
# Step 4: Verify
|
|
343
|
+
StageExecution(
|
|
344
|
+
ref_id="4",
|
|
345
|
+
type="ssh",
|
|
346
|
+
name="Verify Deployment",
|
|
347
|
+
requisite_stage_ref_ids={"3"},
|
|
348
|
+
context={
|
|
349
|
+
"host": host,
|
|
350
|
+
"command": "test -f /tmp/stabilize_deploy/app.conf && echo 'Deployment verified' || echo 'Deployment failed'",
|
|
351
|
+
},
|
|
352
|
+
tasks=[
|
|
353
|
+
TaskExecution.create(
|
|
354
|
+
name="Verify",
|
|
355
|
+
implementing_class="ssh",
|
|
356
|
+
stage_start=True,
|
|
357
|
+
stage_end=True,
|
|
358
|
+
),
|
|
359
|
+
],
|
|
360
|
+
),
|
|
361
|
+
# Step 5: Cleanup
|
|
362
|
+
StageExecution(
|
|
363
|
+
ref_id="5",
|
|
364
|
+
type="ssh",
|
|
365
|
+
name="Cleanup",
|
|
366
|
+
requisite_stage_ref_ids={"4"},
|
|
367
|
+
context={
|
|
368
|
+
"host": host,
|
|
369
|
+
"command": "rm -rf /tmp/stabilize_deploy && echo 'Cleanup complete'",
|
|
370
|
+
},
|
|
371
|
+
tasks=[
|
|
372
|
+
TaskExecution.create(
|
|
373
|
+
name="Cleanup",
|
|
374
|
+
implementing_class="ssh",
|
|
375
|
+
stage_start=True,
|
|
376
|
+
stage_end=True,
|
|
377
|
+
),
|
|
378
|
+
],
|
|
379
|
+
),
|
|
380
|
+
],
|
|
381
|
+
)
|
|
382
|
+
|
|
383
|
+
store.store(workflow)
|
|
384
|
+
orchestrator.start(workflow)
|
|
385
|
+
processor.process_all(timeout=120.0)
|
|
386
|
+
|
|
387
|
+
result = store.retrieve(workflow.id)
|
|
388
|
+
print(f"\nWorkflow Status: {result.status}")
|
|
389
|
+
for stage in result.stages:
|
|
390
|
+
status_mark = "[OK]" if stage.status == WorkflowStatus.SUCCEEDED else "[FAIL]"
|
|
391
|
+
stdout = stage.outputs.get("stdout", "")
|
|
392
|
+
first_line = stdout.split("\n")[0][:50] if stdout else "N/A"
|
|
393
|
+
print(f" {status_mark} {stage.name}: {first_line}")
|
|
394
|
+
|
|
395
|
+
|
|
396
|
+
# =============================================================================
|
|
397
|
+
# Example 3: Parallel Health Checks
|
|
398
|
+
# =============================================================================
|
|
399
|
+
|
|
400
|
+
|
|
401
|
+
def example_parallel_health_check() -> None:
|
|
402
|
+
"""Check multiple servers in parallel."""
|
|
403
|
+
print("\n" + "=" * 60)
|
|
404
|
+
print("Example 3: Parallel Health Checks")
|
|
405
|
+
print("=" * 60)
|
|
406
|
+
|
|
407
|
+
store = SqliteWorkflowStore("sqlite:///:memory:", create_tables=True)
|
|
408
|
+
queue = SqliteQueue("sqlite:///:memory:", table_name="queue_messages")
|
|
409
|
+
queue._create_table()
|
|
410
|
+
processor, orchestrator = setup_pipeline_runner(store, queue)
|
|
411
|
+
|
|
412
|
+
# Using localhost multiple times to simulate multiple servers
|
|
413
|
+
# In production, these would be different hosts
|
|
414
|
+
hosts = [
|
|
415
|
+
("server1", "localhost"),
|
|
416
|
+
("server2", "localhost"),
|
|
417
|
+
("server3", "localhost"),
|
|
418
|
+
]
|
|
419
|
+
|
|
420
|
+
# Start
|
|
421
|
+
# / | \
|
|
422
|
+
# S1 S2 S3
|
|
423
|
+
# \ | /
|
|
424
|
+
# Report
|
|
425
|
+
|
|
426
|
+
stages = [
|
|
427
|
+
StageExecution(
|
|
428
|
+
ref_id="start",
|
|
429
|
+
type="ssh",
|
|
430
|
+
name="Start Health Check",
|
|
431
|
+
context={
|
|
432
|
+
"host": "localhost",
|
|
433
|
+
"command": "echo 'Starting health checks...'",
|
|
434
|
+
},
|
|
435
|
+
tasks=[
|
|
436
|
+
TaskExecution.create(
|
|
437
|
+
name="Start",
|
|
438
|
+
implementing_class="ssh",
|
|
439
|
+
stage_start=True,
|
|
440
|
+
stage_end=True,
|
|
441
|
+
),
|
|
442
|
+
],
|
|
443
|
+
),
|
|
444
|
+
]
|
|
445
|
+
|
|
446
|
+
# Parallel health checks
|
|
447
|
+
for name, host in hosts:
|
|
448
|
+
stages.append(
|
|
449
|
+
StageExecution(
|
|
450
|
+
ref_id=name,
|
|
451
|
+
type="ssh",
|
|
452
|
+
name=f"Check {name}",
|
|
453
|
+
requisite_stage_ref_ids={"start"},
|
|
454
|
+
context={
|
|
455
|
+
"host": host,
|
|
456
|
+
"command": f"echo 'Checking {name}...' && uptime && echo 'Status: OK'",
|
|
457
|
+
"continue_on_failure": True, # Continue even if one fails
|
|
458
|
+
},
|
|
459
|
+
tasks=[
|
|
460
|
+
TaskExecution.create(
|
|
461
|
+
name=f"Check {name}",
|
|
462
|
+
implementing_class="ssh",
|
|
463
|
+
stage_start=True,
|
|
464
|
+
stage_end=True,
|
|
465
|
+
),
|
|
466
|
+
],
|
|
467
|
+
)
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
# Report stage
|
|
471
|
+
stages.append(
|
|
472
|
+
StageExecution(
|
|
473
|
+
ref_id="report",
|
|
474
|
+
type="ssh",
|
|
475
|
+
name="Generate Report",
|
|
476
|
+
requisite_stage_ref_ids={name for name, _ in hosts},
|
|
477
|
+
context={
|
|
478
|
+
"host": "localhost",
|
|
479
|
+
"command": "echo 'Health check complete' && date",
|
|
480
|
+
},
|
|
481
|
+
tasks=[
|
|
482
|
+
TaskExecution.create(
|
|
483
|
+
name="Report",
|
|
484
|
+
implementing_class="ssh",
|
|
485
|
+
stage_start=True,
|
|
486
|
+
stage_end=True,
|
|
487
|
+
),
|
|
488
|
+
],
|
|
489
|
+
)
|
|
490
|
+
)
|
|
491
|
+
|
|
492
|
+
workflow = Workflow.create(
|
|
493
|
+
application="ssh-example",
|
|
494
|
+
name="Parallel Health Check",
|
|
495
|
+
stages=stages,
|
|
496
|
+
)
|
|
497
|
+
|
|
498
|
+
store.store(workflow)
|
|
499
|
+
orchestrator.start(workflow)
|
|
500
|
+
processor.process_all(timeout=120.0)
|
|
501
|
+
|
|
502
|
+
result = store.retrieve(workflow.id)
|
|
503
|
+
print(f"\nWorkflow Status: {result.status}")
|
|
504
|
+
for stage in result.stages:
|
|
505
|
+
status_mark = "[OK]" if stage.status == WorkflowStatus.SUCCEEDED else "[FAIL]"
|
|
506
|
+
host = stage.outputs.get("host", "N/A")
|
|
507
|
+
stdout = stage.outputs.get("stdout", "")
|
|
508
|
+
status_line = [line for line in stdout.split("\n") if "Status:" in line or "complete" in line.lower()]
|
|
509
|
+
status = status_line[0] if status_line else "N/A"
|
|
510
|
+
print(f" {status_mark} {stage.name} ({host}): {status[:40]}")
|
|
511
|
+
|
|
512
|
+
|
|
513
|
+
# =============================================================================
|
|
514
|
+
# Example 4: Multi-Command Script
|
|
515
|
+
# =============================================================================
|
|
516
|
+
|
|
517
|
+
|
|
518
|
+
def example_multi_command() -> None:
|
|
519
|
+
"""Execute a multi-command script on remote host."""
|
|
520
|
+
print("\n" + "=" * 60)
|
|
521
|
+
print("Example 4: Multi-Command Script")
|
|
522
|
+
print("=" * 60)
|
|
523
|
+
|
|
524
|
+
store = SqliteWorkflowStore("sqlite:///:memory:", create_tables=True)
|
|
525
|
+
queue = SqliteQueue("sqlite:///:memory:", table_name="queue_messages")
|
|
526
|
+
queue._create_table()
|
|
527
|
+
processor, orchestrator = setup_pipeline_runner(store, queue)
|
|
528
|
+
|
|
529
|
+
# Multi-line script as single command
|
|
530
|
+
script = """
|
|
531
|
+
echo '=== System Information ==='
|
|
532
|
+
echo "Hostname: $(hostname)"
|
|
533
|
+
echo "Kernel: $(uname -r)"
|
|
534
|
+
echo "Uptime: $(uptime -p)"
|
|
535
|
+
echo ''
|
|
536
|
+
echo '=== Disk Usage ==='
|
|
537
|
+
df -h / | tail -1
|
|
538
|
+
echo ''
|
|
539
|
+
echo '=== Memory Usage ==='
|
|
540
|
+
free -h | head -2
|
|
541
|
+
echo ''
|
|
542
|
+
echo '=== Load Average ==='
|
|
543
|
+
cat /proc/loadavg
|
|
544
|
+
echo ''
|
|
545
|
+
echo '=== Done ==='
|
|
546
|
+
"""
|
|
547
|
+
|
|
548
|
+
workflow = Workflow.create(
|
|
549
|
+
application="ssh-example",
|
|
550
|
+
name="System Report",
|
|
551
|
+
stages=[
|
|
552
|
+
StageExecution(
|
|
553
|
+
ref_id="1",
|
|
554
|
+
type="ssh",
|
|
555
|
+
name="Generate System Report",
|
|
556
|
+
context={
|
|
557
|
+
"host": "localhost",
|
|
558
|
+
"command": script.replace("\n", " && ").strip(" && "),
|
|
559
|
+
"timeout": 60,
|
|
560
|
+
},
|
|
561
|
+
tasks=[
|
|
562
|
+
TaskExecution.create(
|
|
563
|
+
name="System Report",
|
|
564
|
+
implementing_class="ssh",
|
|
565
|
+
stage_start=True,
|
|
566
|
+
stage_end=True,
|
|
567
|
+
),
|
|
568
|
+
],
|
|
569
|
+
),
|
|
570
|
+
],
|
|
571
|
+
)
|
|
572
|
+
|
|
573
|
+
store.store(workflow)
|
|
574
|
+
orchestrator.start(workflow)
|
|
575
|
+
processor.process_all(timeout=60.0)
|
|
576
|
+
|
|
577
|
+
result = store.retrieve(workflow.id)
|
|
578
|
+
print(f"\nWorkflow Status: {result.status}")
|
|
579
|
+
if result.status == WorkflowStatus.SUCCEEDED:
|
|
580
|
+
print("\nSystem Report:")
|
|
581
|
+
print("-" * 40)
|
|
582
|
+
print(result.stages[0].outputs.get("stdout", ""))
|
|
583
|
+
|
|
584
|
+
|
|
585
|
+
# =============================================================================
|
|
586
|
+
# Main
|
|
587
|
+
# =============================================================================
|
|
588
|
+
|
|
589
|
+
|
|
590
|
+
if __name__ == "__main__":
|
|
591
|
+
print("Stabilize SSH Examples")
|
|
592
|
+
print("=" * 60)
|
|
593
|
+
print("Requires: SSH client and access to target hosts")
|
|
594
|
+
print("Note: Examples use localhost - modify hosts for real usage")
|
|
595
|
+
|
|
596
|
+
example_simple_command()
|
|
597
|
+
example_sequential_deployment()
|
|
598
|
+
example_parallel_health_check()
|
|
599
|
+
example_multi_command()
|
|
600
|
+
|
|
601
|
+
print("\n" + "=" * 60)
|
|
602
|
+
print("All examples completed!")
|
|
603
|
+
print("=" * 60)
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
"""Message handlers for pipeline execution."""
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from stabilize.handlers.base import MessageHandler, StabilizeHandler
|
|
6
|
+
from stabilize.handlers.complete_stage import CompleteStageHandler
|
|
7
|
+
from stabilize.handlers.complete_task import CompleteTaskHandler
|
|
8
|
+
from stabilize.handlers.complete_workflow import CompleteWorkflowHandler
|
|
9
|
+
from stabilize.handlers.run_task import RunTaskHandler
|
|
10
|
+
from stabilize.handlers.start_stage import StartStageHandler
|
|
11
|
+
from stabilize.handlers.start_task import StartTaskHandler
|
|
12
|
+
from stabilize.handlers.start_workflow import StartWorkflowHandler
|
|
13
|
+
|
|
14
|
+
__all__ = [
|
|
15
|
+
"MessageHandler",
|
|
16
|
+
"StabilizeHandler",
|
|
17
|
+
"StartWorkflowHandler",
|
|
18
|
+
"StartStageHandler",
|
|
19
|
+
"StartTaskHandler",
|
|
20
|
+
"RunTaskHandler",
|
|
21
|
+
"CompleteTaskHandler",
|
|
22
|
+
"CompleteStageHandler",
|
|
23
|
+
"CompleteWorkflowHandler",
|
|
24
|
+
]
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def register_all_handlers(
|
|
28
|
+
processor: Any,
|
|
29
|
+
repository: Any,
|
|
30
|
+
task_registry: Any,
|
|
31
|
+
queue: Any,
|
|
32
|
+
) -> None:
|
|
33
|
+
"""
|
|
34
|
+
Register all handlers with a queue processor.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
processor: The queue processor to register with
|
|
38
|
+
repository: The execution repository
|
|
39
|
+
task_registry: The task registry
|
|
40
|
+
queue: The message queue
|
|
41
|
+
"""
|
|
42
|
+
handlers = [
|
|
43
|
+
StartWorkflowHandler(queue, repository),
|
|
44
|
+
StartStageHandler(queue, repository),
|
|
45
|
+
StartTaskHandler(queue, repository),
|
|
46
|
+
RunTaskHandler(queue, repository, task_registry),
|
|
47
|
+
CompleteTaskHandler(queue, repository),
|
|
48
|
+
CompleteStageHandler(queue, repository),
|
|
49
|
+
CompleteWorkflowHandler(queue, repository),
|
|
50
|
+
]
|
|
51
|
+
|
|
52
|
+
for handler in handlers:
|
|
53
|
+
processor.register_handler(handler)
|