stabilize 0.9.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. stabilize/__init__.py +29 -0
  2. stabilize/cli.py +1193 -0
  3. stabilize/context/__init__.py +7 -0
  4. stabilize/context/stage_context.py +170 -0
  5. stabilize/dag/__init__.py +15 -0
  6. stabilize/dag/graph.py +215 -0
  7. stabilize/dag/topological.py +199 -0
  8. stabilize/examples/__init__.py +1 -0
  9. stabilize/examples/docker-example.py +759 -0
  10. stabilize/examples/golden-standard-expected-result.txt +1 -0
  11. stabilize/examples/golden-standard.py +488 -0
  12. stabilize/examples/http-example.py +606 -0
  13. stabilize/examples/llama-example.py +662 -0
  14. stabilize/examples/python-example.py +731 -0
  15. stabilize/examples/shell-example.py +399 -0
  16. stabilize/examples/ssh-example.py +603 -0
  17. stabilize/handlers/__init__.py +53 -0
  18. stabilize/handlers/base.py +226 -0
  19. stabilize/handlers/complete_stage.py +209 -0
  20. stabilize/handlers/complete_task.py +75 -0
  21. stabilize/handlers/complete_workflow.py +150 -0
  22. stabilize/handlers/run_task.py +369 -0
  23. stabilize/handlers/start_stage.py +262 -0
  24. stabilize/handlers/start_task.py +74 -0
  25. stabilize/handlers/start_workflow.py +136 -0
  26. stabilize/launcher.py +307 -0
  27. stabilize/migrations/01KDQ4N9QPJ6Q4MCV3V9GHWPV4_initial_schema.sql +97 -0
  28. stabilize/migrations/01KDRK3TXW4R2GERC1WBCQYJGG_rag_embeddings.sql +25 -0
  29. stabilize/migrations/__init__.py +1 -0
  30. stabilize/models/__init__.py +15 -0
  31. stabilize/models/stage.py +389 -0
  32. stabilize/models/status.py +146 -0
  33. stabilize/models/task.py +125 -0
  34. stabilize/models/workflow.py +317 -0
  35. stabilize/orchestrator.py +113 -0
  36. stabilize/persistence/__init__.py +28 -0
  37. stabilize/persistence/connection.py +185 -0
  38. stabilize/persistence/factory.py +136 -0
  39. stabilize/persistence/memory.py +214 -0
  40. stabilize/persistence/postgres.py +655 -0
  41. stabilize/persistence/sqlite.py +674 -0
  42. stabilize/persistence/store.py +235 -0
  43. stabilize/queue/__init__.py +59 -0
  44. stabilize/queue/messages.py +377 -0
  45. stabilize/queue/processor.py +312 -0
  46. stabilize/queue/queue.py +526 -0
  47. stabilize/queue/sqlite_queue.py +354 -0
  48. stabilize/rag/__init__.py +19 -0
  49. stabilize/rag/assistant.py +459 -0
  50. stabilize/rag/cache.py +294 -0
  51. stabilize/stages/__init__.py +11 -0
  52. stabilize/stages/builder.py +253 -0
  53. stabilize/tasks/__init__.py +19 -0
  54. stabilize/tasks/interface.py +335 -0
  55. stabilize/tasks/registry.py +255 -0
  56. stabilize/tasks/result.py +283 -0
  57. stabilize-0.9.2.dist-info/METADATA +301 -0
  58. stabilize-0.9.2.dist-info/RECORD +61 -0
  59. stabilize-0.9.2.dist-info/WHEEL +4 -0
  60. stabilize-0.9.2.dist-info/entry_points.txt +2 -0
  61. stabilize-0.9.2.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,526 @@
1
+ """
2
+ Queue interface and implementations.
3
+
4
+ This module provides the abstract Queue interface and concrete implementations
5
+ for different backends (in-memory, PostgreSQL).
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import heapq
11
+ import json
12
+ import logging
13
+ import threading
14
+ import time
15
+ import uuid
16
+ from abc import ABC, abstractmethod
17
+ from collections.abc import Callable
18
+ from dataclasses import dataclass, field
19
+ from datetime import datetime, timedelta
20
+ from typing import Any
21
+
22
+ from stabilize.queue.messages import (
23
+ Message,
24
+ create_message_from_dict,
25
+ get_message_type_name,
26
+ )
27
+
28
+ logger = logging.getLogger(__name__)
29
+
30
+
31
+ class Queue(ABC):
32
+ """
33
+ Abstract queue interface for message handling.
34
+
35
+ The queue is the core of the execution engine, managing all messages
36
+ that drive stage and task execution.
37
+ """
38
+
39
+ @abstractmethod
40
+ def push(
41
+ self,
42
+ message: Message,
43
+ delay: timedelta | None = None,
44
+ ) -> None:
45
+ """
46
+ Push a message onto the queue.
47
+
48
+ Args:
49
+ message: The message to push
50
+ delay: Optional delay before message is delivered
51
+ """
52
+ pass
53
+
54
+ @abstractmethod
55
+ def poll(self, callback: Callable[[Message], None]) -> None:
56
+ """
57
+ Poll for a message and process it with the callback.
58
+
59
+ If a message is available, calls callback(message).
60
+ After callback returns, the message is automatically acknowledged.
61
+
62
+ Args:
63
+ callback: Function to call with the message
64
+ """
65
+ pass
66
+
67
+ @abstractmethod
68
+ def poll_one(self) -> Message | None:
69
+ """
70
+ Poll for a single message without callback.
71
+
72
+ Returns the message if available, None otherwise.
73
+ Message must be manually acknowledged.
74
+
75
+ Returns:
76
+ The message or None
77
+ """
78
+ pass
79
+
80
+ @abstractmethod
81
+ def ack(self, message: Message) -> None:
82
+ """
83
+ Acknowledge a message, removing it from the queue.
84
+
85
+ Args:
86
+ message: The message to acknowledge
87
+ """
88
+ pass
89
+
90
+ @abstractmethod
91
+ def ensure(
92
+ self,
93
+ message: Message,
94
+ delay: timedelta,
95
+ ) -> None:
96
+ """
97
+ Ensure a message is in the queue with the given delay.
98
+
99
+ If the message is already in the queue, updates its delay.
100
+ If not, adds it with the given delay.
101
+
102
+ Args:
103
+ message: The message to ensure
104
+ delay: Delay before message is delivered
105
+ """
106
+ pass
107
+
108
+ @abstractmethod
109
+ def reschedule(
110
+ self,
111
+ message: Message,
112
+ delay: timedelta,
113
+ ) -> None:
114
+ """
115
+ Reschedule a message with a new delay.
116
+
117
+ Args:
118
+ message: The message to reschedule
119
+ delay: New delay before message is delivered
120
+ """
121
+ pass
122
+
123
+ @abstractmethod
124
+ def size(self) -> int:
125
+ """Get the number of messages in the queue."""
126
+ pass
127
+
128
+ @abstractmethod
129
+ def clear(self) -> None:
130
+ """Clear all messages from the queue."""
131
+ pass
132
+
133
+
134
+ @dataclass(order=True)
135
+ class QueuedMessage:
136
+ """A message with its delivery time for priority queue ordering."""
137
+
138
+ deliver_at: float
139
+ message: Message = field(compare=False)
140
+ message_id: str = field(compare=False, default="")
141
+
142
+
143
+ class InMemoryQueue(Queue):
144
+ """
145
+ In-memory queue implementation using a priority queue.
146
+
147
+ Useful for testing and single-process execution.
148
+ Messages are ordered by delivery time.
149
+ """
150
+
151
+ def __init__(self) -> None:
152
+ self._queue: list[QueuedMessage] = []
153
+ self._lock = threading.Lock()
154
+ self._message_id_counter = 0
155
+ self._pending: dict[str, QueuedMessage] = {} # Messages being processed
156
+ self._message_index: dict[str, QueuedMessage] = {} # For ensure/reschedule
157
+
158
+ def _generate_message_id(self) -> str:
159
+ """Generate a unique message ID."""
160
+ self._message_id_counter += 1
161
+ return f"msg-{self._message_id_counter}"
162
+
163
+ def push(
164
+ self,
165
+ message: Message,
166
+ delay: timedelta | None = None,
167
+ ) -> None:
168
+ """Push a message onto the queue."""
169
+ with self._lock:
170
+ deliver_at = time.time()
171
+ if delay:
172
+ deliver_at += delay.total_seconds()
173
+
174
+ message_id = self._generate_message_id()
175
+ message.message_id = message_id
176
+
177
+ queued = QueuedMessage(
178
+ deliver_at=deliver_at,
179
+ message=message,
180
+ message_id=message_id,
181
+ )
182
+
183
+ heapq.heappush(self._queue, queued)
184
+ self._message_index[message_id] = queued
185
+
186
+ logger.debug(
187
+ f"Pushed {get_message_type_name(message)} "
188
+ f"(id={message_id}, deliver_at={datetime.fromtimestamp(deliver_at)})"
189
+ )
190
+
191
+ def poll(self, callback: Callable[[Message], None]) -> None:
192
+ """Poll for a message and process it with the callback."""
193
+ message = self.poll_one()
194
+ if message:
195
+ try:
196
+ callback(message)
197
+ finally:
198
+ self.ack(message)
199
+
200
+ def poll_one(self) -> Message | None:
201
+ """Poll for a single message without callback."""
202
+ with self._lock:
203
+ now = time.time()
204
+
205
+ # Skip messages that aren't ready yet
206
+ while self._queue and self._queue[0].deliver_at <= now:
207
+ queued = heapq.heappop(self._queue)
208
+ message_id = queued.message_id
209
+
210
+ # Remove from index
211
+ self._message_index.pop(message_id, None)
212
+
213
+ # Add to pending
214
+ self._pending[message_id] = queued
215
+
216
+ logger.debug(f"Polled {get_message_type_name(queued.message)} (id={message_id})")
217
+
218
+ return queued.message
219
+
220
+ return None
221
+
222
+ def ack(self, message: Message) -> None:
223
+ """Acknowledge a message, removing it from pending."""
224
+ with self._lock:
225
+ message_id = message.message_id
226
+ if message_id and message_id in self._pending:
227
+ del self._pending[message_id]
228
+ logger.debug(f"Acked {get_message_type_name(message)} (id={message_id})")
229
+
230
+ def ensure(
231
+ self,
232
+ message: Message,
233
+ delay: timedelta,
234
+ ) -> None:
235
+ """Ensure a message is in the queue with the given delay."""
236
+ # For in-memory queue, just push the message
237
+ # In production, would check if similar message exists
238
+ self.push(message, delay)
239
+
240
+ def reschedule(
241
+ self,
242
+ message: Message,
243
+ delay: timedelta,
244
+ ) -> None:
245
+ """Reschedule a message with a new delay."""
246
+ with self._lock:
247
+ message_id = message.message_id
248
+ if message_id and message_id in self._pending:
249
+ # Remove from pending
250
+ del self._pending[message_id]
251
+
252
+ # Push with new delay
253
+ self.push(message, delay)
254
+
255
+ def size(self) -> int:
256
+ """Get the number of messages in the queue."""
257
+ with self._lock:
258
+ return len(self._queue) + len(self._pending)
259
+
260
+ def ready_count(self) -> int:
261
+ """Get the number of messages ready to be delivered."""
262
+ with self._lock:
263
+ now = time.time()
264
+ return sum(1 for q in self._queue if q.deliver_at <= now)
265
+
266
+ def clear(self) -> None:
267
+ """Clear all messages from the queue."""
268
+ with self._lock:
269
+ self._queue.clear()
270
+ self._pending.clear()
271
+ self._message_index.clear()
272
+
273
+
274
+ class PostgresQueue(Queue):
275
+ """
276
+ PostgreSQL-backed queue implementation.
277
+
278
+ Uses FOR UPDATE SKIP LOCKED for concurrent message processing across
279
+ multiple workers. Messages are stored in a table with delivery time.
280
+
281
+ Connection pools are managed by singleton ConnectionManager for
282
+ efficient resource sharing across all queue instances.
283
+ """
284
+
285
+ def __init__(
286
+ self,
287
+ connection_string: str,
288
+ table_name: str = "queue_messages",
289
+ lock_duration: timedelta = timedelta(minutes=5),
290
+ max_attempts: int = 10,
291
+ ) -> None:
292
+ """
293
+ Initialize the PostgreSQL queue.
294
+
295
+ Args:
296
+ connection_string: PostgreSQL connection string
297
+ table_name: Name of the queue table
298
+ lock_duration: How long to lock messages during processing
299
+ max_attempts: Maximum retry attempts before dropping message
300
+ """
301
+ from stabilize.persistence.connection import get_connection_manager
302
+
303
+ self.connection_string = connection_string
304
+ self.table_name = table_name
305
+ self.lock_duration = lock_duration
306
+ self.max_attempts = max_attempts
307
+ self._manager = get_connection_manager()
308
+ self._pending: dict[int, dict[str, Any]] = {}
309
+
310
+ def _get_pool(self) -> Any:
311
+ """Get the shared connection pool from ConnectionManager."""
312
+ return self._manager.get_postgres_pool(self.connection_string)
313
+
314
+ def close(self) -> None:
315
+ """Close the connection pool via connection manager."""
316
+ self._manager.close_postgres_pool(self.connection_string)
317
+
318
+ def _serialize_message(self, message: Message) -> str:
319
+ """Serialize a message to JSON."""
320
+ from enum import Enum
321
+
322
+ data = {}
323
+ for key, value in message.__dict__.items():
324
+ if key.startswith("_"):
325
+ continue
326
+ if isinstance(value, datetime):
327
+ data[key] = value.isoformat()
328
+ elif isinstance(value, Enum):
329
+ data[key] = value.name # Use name, not value (which may be a tuple)
330
+ else:
331
+ data[key] = value
332
+ return json.dumps(data)
333
+
334
+ def _deserialize_message(self, type_name: str, payload: Any) -> Message:
335
+ """Deserialize a message from JSON or dict."""
336
+ from stabilize.models.stage import SyntheticStageOwner
337
+ from stabilize.models.status import WorkflowStatus
338
+
339
+ # psycopg3 returns JSONB as dict directly
340
+ if isinstance(payload, dict):
341
+ data = payload
342
+ else:
343
+ data = json.loads(payload)
344
+
345
+ # Convert enum values
346
+ if "status" in data and isinstance(data["status"], str):
347
+ data["status"] = WorkflowStatus[data["status"]]
348
+ if "original_status" in data and data["original_status"]:
349
+ data["original_status"] = WorkflowStatus[data["original_status"]]
350
+ if "phase" in data and isinstance(data["phase"], str):
351
+ data["phase"] = SyntheticStageOwner[data["phase"]]
352
+
353
+ # Remove metadata fields
354
+ data.pop("message_id", None)
355
+ data.pop("created_at", None)
356
+ data.pop("attempts", None)
357
+ data.pop("max_attempts", None)
358
+
359
+ return create_message_from_dict(type_name, data)
360
+
361
+ def push(
362
+ self,
363
+ message: Message,
364
+ delay: timedelta | None = None,
365
+ ) -> None:
366
+ """Push a message onto the queue."""
367
+ pool = self._get_pool()
368
+ deliver_at = datetime.now()
369
+ if delay:
370
+ deliver_at += delay
371
+
372
+ message_type = get_message_type_name(message)
373
+ message_id = str(uuid.uuid4())
374
+ payload = self._serialize_message(message)
375
+
376
+ with pool.connection() as conn:
377
+ with conn.cursor() as cur:
378
+ cur.execute(
379
+ f"""
380
+ INSERT INTO {self.table_name}
381
+ (message_id, message_type, payload, deliver_at, attempts)
382
+ VALUES (%(message_id)s, %(type)s, %(payload)s::jsonb, %(deliver_at)s, 0)
383
+ """,
384
+ {
385
+ "message_id": message_id,
386
+ "type": message_type,
387
+ "payload": payload,
388
+ "deliver_at": deliver_at,
389
+ },
390
+ )
391
+ conn.commit()
392
+
393
+ def poll(self, callback: Callable[[Message], None]) -> None:
394
+ """Poll for a message and process it with the callback."""
395
+ message = self.poll_one()
396
+ if message:
397
+ try:
398
+ callback(message)
399
+ self.ack(message)
400
+ except Exception:
401
+ # Message will be retried after lock expires
402
+ raise
403
+
404
+ def poll_one(self) -> Message | None:
405
+ """Poll for a single message without callback."""
406
+ pool = self._get_pool()
407
+ locked_until = datetime.now() + self.lock_duration
408
+
409
+ with pool.connection() as conn:
410
+ with conn.cursor() as cur:
411
+ # Use SKIP LOCKED to allow concurrent workers
412
+ cur.execute(
413
+ f"""
414
+ UPDATE {self.table_name}
415
+ SET locked_until = %(locked_until)s,
416
+ attempts = attempts + 1
417
+ WHERE id = (
418
+ SELECT id FROM {self.table_name}
419
+ WHERE deliver_at <= NOW()
420
+ AND (locked_until IS NULL OR locked_until < NOW())
421
+ AND attempts < %(max_attempts)s
422
+ ORDER BY deliver_at
423
+ LIMIT 1
424
+ FOR UPDATE SKIP LOCKED
425
+ )
426
+ RETURNING id, message_type, payload, attempts
427
+ """,
428
+ {
429
+ "locked_until": locked_until,
430
+ "max_attempts": self.max_attempts,
431
+ },
432
+ )
433
+ row = cur.fetchone()
434
+ conn.commit()
435
+
436
+ if row:
437
+ msg_id = row["id"]
438
+ msg_type = row["message_type"]
439
+ payload = row["payload"]
440
+ attempts = row["attempts"]
441
+
442
+ message = self._deserialize_message(msg_type, payload)
443
+ message.message_id = str(msg_id)
444
+ message.attempts = attempts
445
+ self._pending[msg_id] = {
446
+ "message": message,
447
+ "type": msg_type,
448
+ }
449
+ return message
450
+
451
+ return None
452
+
453
+ def ack(self, message: Message) -> None:
454
+ """Acknowledge a message, removing it from the queue."""
455
+ if not message.message_id:
456
+ return
457
+
458
+ msg_id = int(message.message_id)
459
+ pool = self._get_pool()
460
+
461
+ with pool.connection() as conn:
462
+ with conn.cursor() as cur:
463
+ cur.execute(
464
+ f"DELETE FROM {self.table_name} WHERE id = %(id)s",
465
+ {"id": msg_id},
466
+ )
467
+ conn.commit()
468
+
469
+ self._pending.pop(msg_id, None)
470
+
471
+ def ensure(
472
+ self,
473
+ message: Message,
474
+ delay: timedelta,
475
+ ) -> None:
476
+ """Ensure a message is in the queue with the given delay."""
477
+ # For simplicity, just push the message
478
+ # In production, would check for duplicates
479
+ self.push(message, delay)
480
+
481
+ def reschedule(
482
+ self,
483
+ message: Message,
484
+ delay: timedelta,
485
+ ) -> None:
486
+ """Reschedule a message with a new delay."""
487
+ if not message.message_id:
488
+ return
489
+
490
+ msg_id = int(message.message_id)
491
+ deliver_at = datetime.now() + delay
492
+ pool = self._get_pool()
493
+
494
+ with pool.connection() as conn:
495
+ with conn.cursor() as cur:
496
+ cur.execute(
497
+ f"""
498
+ UPDATE {self.table_name}
499
+ SET deliver_at = %(deliver_at)s,
500
+ locked_until = NULL
501
+ WHERE id = %(id)s
502
+ """,
503
+ {"id": msg_id, "deliver_at": deliver_at},
504
+ )
505
+ conn.commit()
506
+
507
+ self._pending.pop(msg_id, None)
508
+
509
+ def size(self) -> int:
510
+ """Get the number of messages in the queue."""
511
+ pool = self._get_pool()
512
+ with pool.connection() as conn:
513
+ with conn.cursor() as cur:
514
+ cur.execute(f"SELECT COUNT(*) as cnt FROM {self.table_name}")
515
+ row = cur.fetchone()
516
+ return row["cnt"] if row else 0
517
+
518
+ def clear(self) -> None:
519
+ """Clear all messages from the queue."""
520
+ pool = self._get_pool()
521
+ with pool.connection() as conn:
522
+ with conn.cursor() as cur:
523
+ cur.execute(f"DELETE FROM {self.table_name}")
524
+ conn.commit()
525
+
526
+ self._pending.clear()