stabilize 0.9.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. stabilize/__init__.py +29 -0
  2. stabilize/cli.py +1193 -0
  3. stabilize/context/__init__.py +7 -0
  4. stabilize/context/stage_context.py +170 -0
  5. stabilize/dag/__init__.py +15 -0
  6. stabilize/dag/graph.py +215 -0
  7. stabilize/dag/topological.py +199 -0
  8. stabilize/examples/__init__.py +1 -0
  9. stabilize/examples/docker-example.py +759 -0
  10. stabilize/examples/golden-standard-expected-result.txt +1 -0
  11. stabilize/examples/golden-standard.py +488 -0
  12. stabilize/examples/http-example.py +606 -0
  13. stabilize/examples/llama-example.py +662 -0
  14. stabilize/examples/python-example.py +731 -0
  15. stabilize/examples/shell-example.py +399 -0
  16. stabilize/examples/ssh-example.py +603 -0
  17. stabilize/handlers/__init__.py +53 -0
  18. stabilize/handlers/base.py +226 -0
  19. stabilize/handlers/complete_stage.py +209 -0
  20. stabilize/handlers/complete_task.py +75 -0
  21. stabilize/handlers/complete_workflow.py +150 -0
  22. stabilize/handlers/run_task.py +369 -0
  23. stabilize/handlers/start_stage.py +262 -0
  24. stabilize/handlers/start_task.py +74 -0
  25. stabilize/handlers/start_workflow.py +136 -0
  26. stabilize/launcher.py +307 -0
  27. stabilize/migrations/01KDQ4N9QPJ6Q4MCV3V9GHWPV4_initial_schema.sql +97 -0
  28. stabilize/migrations/01KDRK3TXW4R2GERC1WBCQYJGG_rag_embeddings.sql +25 -0
  29. stabilize/migrations/__init__.py +1 -0
  30. stabilize/models/__init__.py +15 -0
  31. stabilize/models/stage.py +389 -0
  32. stabilize/models/status.py +146 -0
  33. stabilize/models/task.py +125 -0
  34. stabilize/models/workflow.py +317 -0
  35. stabilize/orchestrator.py +113 -0
  36. stabilize/persistence/__init__.py +28 -0
  37. stabilize/persistence/connection.py +185 -0
  38. stabilize/persistence/factory.py +136 -0
  39. stabilize/persistence/memory.py +214 -0
  40. stabilize/persistence/postgres.py +655 -0
  41. stabilize/persistence/sqlite.py +674 -0
  42. stabilize/persistence/store.py +235 -0
  43. stabilize/queue/__init__.py +59 -0
  44. stabilize/queue/messages.py +377 -0
  45. stabilize/queue/processor.py +312 -0
  46. stabilize/queue/queue.py +526 -0
  47. stabilize/queue/sqlite_queue.py +354 -0
  48. stabilize/rag/__init__.py +19 -0
  49. stabilize/rag/assistant.py +459 -0
  50. stabilize/rag/cache.py +294 -0
  51. stabilize/stages/__init__.py +11 -0
  52. stabilize/stages/builder.py +253 -0
  53. stabilize/tasks/__init__.py +19 -0
  54. stabilize/tasks/interface.py +335 -0
  55. stabilize/tasks/registry.py +255 -0
  56. stabilize/tasks/result.py +283 -0
  57. stabilize-0.9.2.dist-info/METADATA +301 -0
  58. stabilize-0.9.2.dist-info/RECORD +61 -0
  59. stabilize-0.9.2.dist-info/WHEEL +4 -0
  60. stabilize-0.9.2.dist-info/entry_points.txt +2 -0
  61. stabilize-0.9.2.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,662 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Ollama LLM Example - Demonstrates interacting with local LLM via Ollama with Stabilize.
4
+
5
+ This example shows how to:
6
+ 1. Create a custom Task that calls Ollama API
7
+ 2. Generate text completions with local LLMs
8
+ 3. Build LLM-powered processing pipelines
9
+
10
+ Requirements:
11
+ Ollama installed and running (https://ollama.ai)
12
+ At least one model pulled (e.g., ollama pull deepseek-v3.1:671b-cloud)
13
+
14
+ Run with:
15
+ python examples/llama-example.py
16
+ """
17
+
18
+ import json
19
+ import logging
20
+ import time
21
+ import urllib.error
22
+ import urllib.request
23
+ from typing import Any
24
+
25
+ logging.basicConfig(level=logging.ERROR)
26
+
27
+ from stabilize import StageExecution, TaskExecution, Workflow, WorkflowStatus
28
+ from stabilize.handlers.complete_stage import CompleteStageHandler
29
+ from stabilize.handlers.complete_task import CompleteTaskHandler
30
+ from stabilize.handlers.complete_workflow import CompleteWorkflowHandler
31
+ from stabilize.handlers.run_task import RunTaskHandler
32
+ from stabilize.handlers.start_stage import StartStageHandler
33
+ from stabilize.handlers.start_task import StartTaskHandler
34
+ from stabilize.handlers.start_workflow import StartWorkflowHandler
35
+ from stabilize.orchestrator import Orchestrator
36
+ from stabilize.persistence.sqlite import SqliteWorkflowStore
37
+ from stabilize.persistence.store import WorkflowStore
38
+ from stabilize.queue.processor import QueueProcessor
39
+ from stabilize.queue.queue import Queue
40
+ from stabilize.queue.sqlite_queue import SqliteQueue
41
+ from stabilize.tasks.interface import Task
42
+ from stabilize.tasks.registry import TaskRegistry
43
+ from stabilize.tasks.result import TaskResult
44
+
45
+ # =============================================================================
46
+ # Custom Task: OllamaTask
47
+ # =============================================================================
48
+
49
+
50
+ class OllamaTask(Task):
51
+ """
52
+ Generate text using Ollama local LLM.
53
+
54
+ Context Parameters:
55
+ prompt: The prompt to send to the model (required)
56
+ model: Model name (default: deepseek-v3.1:671b-cloud)
57
+ system: System prompt to set context (optional)
58
+ host: Ollama API host (default: http://localhost:11434)
59
+ temperature: Sampling temperature 0.0-2.0 (default: 0.7)
60
+ max_tokens: Maximum tokens to generate (optional)
61
+ timeout: Request timeout in seconds (default: 120)
62
+ format: Response format - "json" for JSON mode (optional)
63
+
64
+ Outputs:
65
+ response: Generated text response
66
+ model: Model used
67
+ total_duration_ms: Total generation time in milliseconds
68
+ prompt_eval_count: Number of tokens in prompt
69
+ eval_count: Number of tokens generated
70
+
71
+ Notes:
72
+ - Requires Ollama running locally
73
+ - Supports any model available in Ollama
74
+ - Use format="json" for structured JSON output
75
+ """
76
+
77
+ DEFAULT_HOST = "http://localhost:11434"
78
+ DEFAULT_MODEL = "deepseek-v3.1:671b-cloud"
79
+
80
+ def execute(self, stage: StageExecution) -> TaskResult:
81
+ prompt = stage.context.get("prompt")
82
+ model = stage.context.get("model", self.DEFAULT_MODEL)
83
+ system = stage.context.get("system")
84
+ host = stage.context.get("host", self.DEFAULT_HOST)
85
+ temperature = stage.context.get("temperature", 0.7)
86
+ max_tokens = stage.context.get("max_tokens")
87
+ timeout = stage.context.get("timeout", 120)
88
+ response_format = stage.context.get("format")
89
+
90
+ if not prompt:
91
+ return TaskResult.terminal(error="No 'prompt' specified in context")
92
+
93
+ # Check Ollama availability
94
+ try:
95
+ health_url = f"{host}/api/tags"
96
+ urllib.request.urlopen(health_url, timeout=5)
97
+ except (urllib.error.URLError, TimeoutError):
98
+ return TaskResult.terminal(error=f"Ollama not available at {host}. Ensure Ollama is running.")
99
+
100
+ # Build request payload
101
+ payload: dict[str, Any] = {
102
+ "model": model,
103
+ "prompt": prompt,
104
+ "stream": False,
105
+ "options": {
106
+ "temperature": temperature,
107
+ },
108
+ }
109
+
110
+ if system:
111
+ payload["system"] = system
112
+
113
+ if max_tokens:
114
+ payload["options"]["num_predict"] = max_tokens
115
+
116
+ if response_format == "json":
117
+ payload["format"] = "json"
118
+
119
+ # Make request
120
+ url = f"{host}/api/generate"
121
+ data = json.dumps(payload).encode("utf-8")
122
+ request = urllib.request.Request(
123
+ url,
124
+ data=data,
125
+ headers={"Content-Type": "application/json"},
126
+ method="POST",
127
+ )
128
+
129
+ print(f" [OllamaTask] Generating with {model}...")
130
+
131
+ try:
132
+ start_time = time.time()
133
+ with urllib.request.urlopen(request, timeout=timeout) as response:
134
+ result = json.loads(response.read().decode("utf-8"))
135
+
136
+ elapsed_ms = int((time.time() - start_time) * 1000)
137
+
138
+ generated_text = result.get("response", "")
139
+ total_duration = result.get("total_duration", 0) // 1_000_000 # ns to ms
140
+ prompt_eval_count = result.get("prompt_eval_count", 0)
141
+ eval_count = result.get("eval_count", 0)
142
+
143
+ outputs = {
144
+ "response": generated_text,
145
+ "model": model,
146
+ "total_duration_ms": total_duration or elapsed_ms,
147
+ "prompt_eval_count": prompt_eval_count,
148
+ "eval_count": eval_count,
149
+ }
150
+
151
+ # Try to parse JSON if format was json
152
+ if response_format == "json":
153
+ try:
154
+ outputs["json"] = json.loads(generated_text)
155
+ except json.JSONDecodeError:
156
+ outputs["json"] = None
157
+ outputs["json_error"] = "Failed to parse JSON response"
158
+
159
+ print(f" [OllamaTask] Generated {eval_count} tokens in {total_duration}ms")
160
+ return TaskResult.success(outputs=outputs)
161
+
162
+ except urllib.error.HTTPError as e:
163
+ error_body = e.read().decode("utf-8") if e.fp else ""
164
+ return TaskResult.terminal(error=f"Ollama API error ({e.code}): {error_body}")
165
+
166
+ except urllib.error.URLError as e:
167
+ return TaskResult.terminal(error=f"Connection error: {e.reason}")
168
+
169
+ except TimeoutError:
170
+ return TaskResult.terminal(error=f"Request timed out after {timeout}s")
171
+
172
+ except json.JSONDecodeError as e:
173
+ return TaskResult.terminal(error=f"Invalid JSON response: {e}")
174
+
175
+
176
+ # =============================================================================
177
+ # Helper: Setup pipeline infrastructure
178
+ # =============================================================================
179
+
180
+
181
+ def setup_pipeline_runner(store: WorkflowStore, queue: Queue) -> tuple[QueueProcessor, Orchestrator]:
182
+ """Create processor and orchestrator with OllamaTask registered."""
183
+ task_registry = TaskRegistry()
184
+ task_registry.register("ollama", OllamaTask)
185
+
186
+ processor = QueueProcessor(queue)
187
+
188
+ handlers: list[Any] = [
189
+ StartWorkflowHandler(queue, store),
190
+ StartStageHandler(queue, store),
191
+ StartTaskHandler(queue, store),
192
+ RunTaskHandler(queue, store, task_registry),
193
+ CompleteTaskHandler(queue, store),
194
+ CompleteStageHandler(queue, store),
195
+ CompleteWorkflowHandler(queue, store),
196
+ ]
197
+
198
+ for handler in handlers:
199
+ processor.register_handler(handler)
200
+
201
+ orchestrator = Orchestrator(queue)
202
+ return processor, orchestrator
203
+
204
+
205
+ # =============================================================================
206
+ # Example 1: Simple Text Generation
207
+ # =============================================================================
208
+
209
+
210
+ def example_simple_generation() -> None:
211
+ """Generate text with a simple prompt."""
212
+ print("\n" + "=" * 60)
213
+ print("Example 1: Simple Text Generation")
214
+ print("=" * 60)
215
+
216
+ store = SqliteWorkflowStore("sqlite:///:memory:", create_tables=True)
217
+ queue = SqliteQueue("sqlite:///:memory:", table_name="queue_messages")
218
+ queue._create_table()
219
+ processor, orchestrator = setup_pipeline_runner(store, queue)
220
+
221
+ workflow = Workflow.create(
222
+ application="llama-example",
223
+ name="Simple Generation",
224
+ stages=[
225
+ StageExecution(
226
+ ref_id="1",
227
+ type="ollama",
228
+ name="Generate Text",
229
+ context={
230
+ "prompt": "Explain what a workflow engine is in 2-3 sentences.",
231
+ "model": "deepseek-v3.1:671b-cloud",
232
+ "temperature": 0.7,
233
+ "max_tokens": 150,
234
+ },
235
+ tasks=[
236
+ TaskExecution.create(
237
+ name="Generate",
238
+ implementing_class="ollama",
239
+ stage_start=True,
240
+ stage_end=True,
241
+ ),
242
+ ],
243
+ ),
244
+ ],
245
+ )
246
+
247
+ store.store(workflow)
248
+ orchestrator.start(workflow)
249
+ processor.process_all(timeout=180.0)
250
+
251
+ result = store.retrieve(workflow.id)
252
+ print(f"\nWorkflow Status: {result.status}")
253
+ if result.status == WorkflowStatus.SUCCEEDED:
254
+ response = result.stages[0].outputs.get("response", "")
255
+ tokens = result.stages[0].outputs.get("eval_count", 0)
256
+ duration = result.stages[0].outputs.get("total_duration_ms", 0)
257
+ print(f"\nResponse ({tokens} tokens, {duration}ms):")
258
+ print("-" * 40)
259
+ print(response)
260
+
261
+
262
+ # =============================================================================
263
+ # Example 2: Text with System Prompt
264
+ # =============================================================================
265
+
266
+
267
+ def example_with_system_prompt() -> None:
268
+ """Generate text with a system prompt for role/context."""
269
+ print("\n" + "=" * 60)
270
+ print("Example 2: Text with System Prompt")
271
+ print("=" * 60)
272
+
273
+ store = SqliteWorkflowStore("sqlite:///:memory:", create_tables=True)
274
+ queue = SqliteQueue("sqlite:///:memory:", table_name="queue_messages")
275
+ queue._create_table()
276
+ processor, orchestrator = setup_pipeline_runner(store, queue)
277
+
278
+ workflow = Workflow.create(
279
+ application="llama-example",
280
+ name="System Prompt",
281
+ stages=[
282
+ StageExecution(
283
+ ref_id="1",
284
+ type="ollama",
285
+ name="Technical Explanation",
286
+ context={
287
+ "system": "You are a senior software architect. Provide clear, technical explanations. Be concise.",
288
+ "prompt": "What are the benefits of using a DAG (Directed Acyclic Graph) for workflow orchestration?",
289
+ "model": "deepseek-v3.1:671b-cloud",
290
+ "temperature": 0.5,
291
+ "max_tokens": 200,
292
+ },
293
+ tasks=[
294
+ TaskExecution.create(
295
+ name="Generate",
296
+ implementing_class="ollama",
297
+ stage_start=True,
298
+ stage_end=True,
299
+ ),
300
+ ],
301
+ ),
302
+ ],
303
+ )
304
+
305
+ store.store(workflow)
306
+ orchestrator.start(workflow)
307
+ processor.process_all(timeout=180.0)
308
+
309
+ result = store.retrieve(workflow.id)
310
+ print(f"\nWorkflow Status: {result.status}")
311
+ if result.status == WorkflowStatus.SUCCEEDED:
312
+ response = result.stages[0].outputs.get("response", "")
313
+ print("\nResponse:")
314
+ print("-" * 40)
315
+ print(response)
316
+
317
+
318
+ # =============================================================================
319
+ # Example 3: JSON Structured Output
320
+ # =============================================================================
321
+
322
+
323
+ def example_json_output() -> None:
324
+ """Generate structured JSON output."""
325
+ print("\n" + "=" * 60)
326
+ print("Example 3: JSON Structured Output")
327
+ print("=" * 60)
328
+
329
+ store = SqliteWorkflowStore("sqlite:///:memory:", create_tables=True)
330
+ queue = SqliteQueue("sqlite:///:memory:", table_name="queue_messages")
331
+ queue._create_table()
332
+ processor, orchestrator = setup_pipeline_runner(store, queue)
333
+
334
+ workflow = Workflow.create(
335
+ application="llama-example",
336
+ name="JSON Output",
337
+ stages=[
338
+ StageExecution(
339
+ ref_id="1",
340
+ type="ollama",
341
+ name="Generate JSON",
342
+ context={
343
+ "system": "You are a JSON generator. Output only valid JSON, no markdown or explanation.",
344
+ "prompt": """Generate a JSON object describing a software project with these fields:
345
+ - name: string
346
+ - version: string (semver)
347
+ - description: string (1 sentence)
348
+ - features: array of 3 strings
349
+ - status: one of "alpha", "beta", "stable"
350
+ """,
351
+ "model": "deepseek-v3.1:671b-cloud",
352
+ "temperature": 0.3,
353
+ "format": "json",
354
+ },
355
+ tasks=[
356
+ TaskExecution.create(
357
+ name="Generate JSON",
358
+ implementing_class="ollama",
359
+ stage_start=True,
360
+ stage_end=True,
361
+ ),
362
+ ],
363
+ ),
364
+ ],
365
+ )
366
+
367
+ store.store(workflow)
368
+ orchestrator.start(workflow)
369
+ processor.process_all(timeout=180.0)
370
+
371
+ result = store.retrieve(workflow.id)
372
+ print(f"\nWorkflow Status: {result.status}")
373
+ if result.status == WorkflowStatus.SUCCEEDED:
374
+ json_output = result.stages[0].outputs.get("json")
375
+ if json_output:
376
+ print("\nParsed JSON:")
377
+ print("-" * 40)
378
+ print(json.dumps(json_output, indent=2))
379
+ else:
380
+ print("\nRaw Response:")
381
+ print(result.stages[0].outputs.get("response", ""))
382
+
383
+
384
+ # =============================================================================
385
+ # Example 4: Sequential Processing Pipeline
386
+ # =============================================================================
387
+
388
+
389
+ def example_processing_pipeline() -> None:
390
+ """Chain LLM calls: summarize -> translate -> format."""
391
+ print("\n" + "=" * 60)
392
+ print("Example 4: Processing Pipeline")
393
+ print("=" * 60)
394
+
395
+ store = SqliteWorkflowStore("sqlite:///:memory:", create_tables=True)
396
+ queue = SqliteQueue("sqlite:///:memory:", table_name="queue_messages")
397
+ queue._create_table()
398
+ processor, orchestrator = setup_pipeline_runner(store, queue)
399
+
400
+ original_text = """
401
+ Stabilize is a lightweight Python workflow execution engine with DAG-based
402
+ stage orchestration. It supports message-driven execution, parallel and
403
+ sequential stages, synthetic stages for lifecycle hooks, multiple persistence
404
+ backends including PostgreSQL and SQLite, and a pluggable task system with
405
+ retry and timeout support.
406
+ """
407
+
408
+ workflow = Workflow.create(
409
+ application="llama-example",
410
+ name="Processing Pipeline",
411
+ stages=[
412
+ # Step 1: Summarize
413
+ StageExecution(
414
+ ref_id="1",
415
+ type="ollama",
416
+ name="Summarize",
417
+ context={
418
+ "system": "You are a technical writer. Summarize text into bullet points.",
419
+ "prompt": f"Summarize this into 3 key bullet points:\n\n{original_text}",
420
+ "model": "deepseek-v3.1:671b-cloud",
421
+ "temperature": 0.3,
422
+ "max_tokens": 150,
423
+ },
424
+ tasks=[
425
+ TaskExecution.create(
426
+ name="Summarize",
427
+ implementing_class="ollama",
428
+ stage_start=True,
429
+ stage_end=True,
430
+ ),
431
+ ],
432
+ ),
433
+ # Step 2: Extract keywords
434
+ StageExecution(
435
+ ref_id="2",
436
+ type="ollama",
437
+ name="Extract Keywords",
438
+ requisite_stage_ref_ids={"1"},
439
+ context={
440
+ "system": "Extract technical keywords. Output only a comma-separated list.",
441
+ "prompt": f"Extract 5 technical keywords from:\n\n{original_text}",
442
+ "model": "deepseek-v3.1:671b-cloud",
443
+ "temperature": 0.2,
444
+ "max_tokens": 50,
445
+ },
446
+ tasks=[
447
+ TaskExecution.create(
448
+ name="Keywords",
449
+ implementing_class="ollama",
450
+ stage_start=True,
451
+ stage_end=True,
452
+ ),
453
+ ],
454
+ ),
455
+ # Step 3: Generate tagline
456
+ StageExecution(
457
+ ref_id="3",
458
+ type="ollama",
459
+ name="Generate Tagline",
460
+ requisite_stage_ref_ids={"2"},
461
+ context={
462
+ "system": "You are a marketing copywriter. Be concise and catchy.",
463
+ "prompt": f"Create a one-line tagline (max 10 words) for this product:\n\n{original_text}",
464
+ "model": "deepseek-v3.1:671b-cloud",
465
+ "temperature": 0.8,
466
+ "max_tokens": 30,
467
+ },
468
+ tasks=[
469
+ TaskExecution.create(
470
+ name="Tagline",
471
+ implementing_class="ollama",
472
+ stage_start=True,
473
+ stage_end=True,
474
+ ),
475
+ ],
476
+ ),
477
+ ],
478
+ )
479
+
480
+ store.store(workflow)
481
+ orchestrator.start(workflow)
482
+ processor.process_all(timeout=300.0)
483
+
484
+ result = store.retrieve(workflow.id)
485
+ print(f"\nWorkflow Status: {result.status}")
486
+ print("\nPipeline Results:")
487
+ print("-" * 40)
488
+ for stage in result.stages:
489
+ print(f"\n{stage.name}:")
490
+ response = stage.outputs.get("response", "N/A")
491
+ print(f" {response[:200]}")
492
+
493
+
494
+ # =============================================================================
495
+ # Example 5: Parallel Generation
496
+ # =============================================================================
497
+
498
+
499
+ def example_parallel_generation() -> None:
500
+ """Generate multiple variations in parallel."""
501
+ print("\n" + "=" * 60)
502
+ print("Example 5: Parallel Generation")
503
+ print("=" * 60)
504
+
505
+ store = SqliteWorkflowStore("sqlite:///:memory:", create_tables=True)
506
+ queue = SqliteQueue("sqlite:///:memory:", table_name="queue_messages")
507
+ queue._create_table()
508
+ processor, orchestrator = setup_pipeline_runner(store, queue)
509
+
510
+ base_prompt = "Write a one-sentence description of a workflow engine"
511
+
512
+ # Start
513
+ # / | \
514
+ # Formal Casual Technical
515
+ # \ | /
516
+ # Compare
517
+
518
+ workflow = Workflow.create(
519
+ application="llama-example",
520
+ name="Parallel Generation",
521
+ stages=[
522
+ StageExecution(
523
+ ref_id="start",
524
+ type="ollama",
525
+ name="Setup",
526
+ context={
527
+ "prompt": "Say 'Starting parallel generation' in exactly 3 words.",
528
+ "model": "deepseek-v3.1:671b-cloud",
529
+ "max_tokens": 10,
530
+ },
531
+ tasks=[
532
+ TaskExecution.create(
533
+ name="Setup",
534
+ implementing_class="ollama",
535
+ stage_start=True,
536
+ stage_end=True,
537
+ ),
538
+ ],
539
+ ),
540
+ # Parallel variations
541
+ StageExecution(
542
+ ref_id="formal",
543
+ type="ollama",
544
+ name="Formal Style",
545
+ requisite_stage_ref_ids={"start"},
546
+ context={
547
+ "system": "Write in a formal, professional tone.",
548
+ "prompt": base_prompt,
549
+ "model": "deepseek-v3.1:671b-cloud",
550
+ "temperature": 0.3,
551
+ "max_tokens": 50,
552
+ },
553
+ tasks=[
554
+ TaskExecution.create(
555
+ name="Formal",
556
+ implementing_class="ollama",
557
+ stage_start=True,
558
+ stage_end=True,
559
+ ),
560
+ ],
561
+ ),
562
+ StageExecution(
563
+ ref_id="casual",
564
+ type="ollama",
565
+ name="Casual Style",
566
+ requisite_stage_ref_ids={"start"},
567
+ context={
568
+ "system": "Write in a casual, friendly tone.",
569
+ "prompt": base_prompt,
570
+ "model": "deepseek-v3.1:671b-cloud",
571
+ "temperature": 0.7,
572
+ "max_tokens": 50,
573
+ },
574
+ tasks=[
575
+ TaskExecution.create(
576
+ name="Casual",
577
+ implementing_class="ollama",
578
+ stage_start=True,
579
+ stage_end=True,
580
+ ),
581
+ ],
582
+ ),
583
+ StageExecution(
584
+ ref_id="technical",
585
+ type="ollama",
586
+ name="Technical Style",
587
+ requisite_stage_ref_ids={"start"},
588
+ context={
589
+ "system": "Write in a technical, precise tone for developers.",
590
+ "prompt": base_prompt,
591
+ "model": "deepseek-v3.1:671b-cloud",
592
+ "temperature": 0.2,
593
+ "max_tokens": 50,
594
+ },
595
+ tasks=[
596
+ TaskExecution.create(
597
+ name="Technical",
598
+ implementing_class="ollama",
599
+ stage_start=True,
600
+ stage_end=True,
601
+ ),
602
+ ],
603
+ ),
604
+ # Compare
605
+ StageExecution(
606
+ ref_id="compare",
607
+ type="ollama",
608
+ name="Compare Results",
609
+ requisite_stage_ref_ids={"formal", "casual", "technical"},
610
+ context={
611
+ "prompt": "Say 'Generation complete' in exactly 2 words.",
612
+ "model": "deepseek-v3.1:671b-cloud",
613
+ "max_tokens": 10,
614
+ },
615
+ tasks=[
616
+ TaskExecution.create(
617
+ name="Compare",
618
+ implementing_class="ollama",
619
+ stage_start=True,
620
+ stage_end=True,
621
+ ),
622
+ ],
623
+ ),
624
+ ],
625
+ )
626
+
627
+ store.store(workflow)
628
+ orchestrator.start(workflow)
629
+ processor.process_all(timeout=300.0)
630
+
631
+ result = store.retrieve(workflow.id)
632
+ print(f"\nWorkflow Status: {result.status}")
633
+ print("\nGenerated Variations:")
634
+ print("-" * 40)
635
+
636
+ for stage in result.stages:
637
+ if stage.ref_id in ["formal", "casual", "technical"]:
638
+ response = stage.outputs.get("response", "N/A")
639
+ print(f"\n{stage.name}:")
640
+ print(f" {response[:150]}")
641
+
642
+
643
+ # =============================================================================
644
+ # Main
645
+ # =============================================================================
646
+
647
+
648
+ if __name__ == "__main__":
649
+ print("Stabilize Ollama LLM Examples")
650
+ print("=" * 60)
651
+ print("Requires: Ollama running with deepseek-v3.1:671b-cloud model")
652
+ print("Install: ollama pull deepseek-v3.1:671b-cloud")
653
+
654
+ example_simple_generation()
655
+ example_with_system_prompt()
656
+ example_json_output()
657
+ example_processing_pipeline()
658
+ example_parallel_generation()
659
+
660
+ print("\n" + "=" * 60)
661
+ print("All examples completed!")
662
+ print("=" * 60)