stabilize 0.9.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. stabilize/__init__.py +29 -0
  2. stabilize/cli.py +1193 -0
  3. stabilize/context/__init__.py +7 -0
  4. stabilize/context/stage_context.py +170 -0
  5. stabilize/dag/__init__.py +15 -0
  6. stabilize/dag/graph.py +215 -0
  7. stabilize/dag/topological.py +199 -0
  8. stabilize/examples/__init__.py +1 -0
  9. stabilize/examples/docker-example.py +759 -0
  10. stabilize/examples/golden-standard-expected-result.txt +1 -0
  11. stabilize/examples/golden-standard.py +488 -0
  12. stabilize/examples/http-example.py +606 -0
  13. stabilize/examples/llama-example.py +662 -0
  14. stabilize/examples/python-example.py +731 -0
  15. stabilize/examples/shell-example.py +399 -0
  16. stabilize/examples/ssh-example.py +603 -0
  17. stabilize/handlers/__init__.py +53 -0
  18. stabilize/handlers/base.py +226 -0
  19. stabilize/handlers/complete_stage.py +209 -0
  20. stabilize/handlers/complete_task.py +75 -0
  21. stabilize/handlers/complete_workflow.py +150 -0
  22. stabilize/handlers/run_task.py +369 -0
  23. stabilize/handlers/start_stage.py +262 -0
  24. stabilize/handlers/start_task.py +74 -0
  25. stabilize/handlers/start_workflow.py +136 -0
  26. stabilize/launcher.py +307 -0
  27. stabilize/migrations/01KDQ4N9QPJ6Q4MCV3V9GHWPV4_initial_schema.sql +97 -0
  28. stabilize/migrations/01KDRK3TXW4R2GERC1WBCQYJGG_rag_embeddings.sql +25 -0
  29. stabilize/migrations/__init__.py +1 -0
  30. stabilize/models/__init__.py +15 -0
  31. stabilize/models/stage.py +389 -0
  32. stabilize/models/status.py +146 -0
  33. stabilize/models/task.py +125 -0
  34. stabilize/models/workflow.py +317 -0
  35. stabilize/orchestrator.py +113 -0
  36. stabilize/persistence/__init__.py +28 -0
  37. stabilize/persistence/connection.py +185 -0
  38. stabilize/persistence/factory.py +136 -0
  39. stabilize/persistence/memory.py +214 -0
  40. stabilize/persistence/postgres.py +655 -0
  41. stabilize/persistence/sqlite.py +674 -0
  42. stabilize/persistence/store.py +235 -0
  43. stabilize/queue/__init__.py +59 -0
  44. stabilize/queue/messages.py +377 -0
  45. stabilize/queue/processor.py +312 -0
  46. stabilize/queue/queue.py +526 -0
  47. stabilize/queue/sqlite_queue.py +354 -0
  48. stabilize/rag/__init__.py +19 -0
  49. stabilize/rag/assistant.py +459 -0
  50. stabilize/rag/cache.py +294 -0
  51. stabilize/stages/__init__.py +11 -0
  52. stabilize/stages/builder.py +253 -0
  53. stabilize/tasks/__init__.py +19 -0
  54. stabilize/tasks/interface.py +335 -0
  55. stabilize/tasks/registry.py +255 -0
  56. stabilize/tasks/result.py +283 -0
  57. stabilize-0.9.2.dist-info/METADATA +301 -0
  58. stabilize-0.9.2.dist-info/RECORD +61 -0
  59. stabilize-0.9.2.dist-info/WHEEL +4 -0
  60. stabilize-0.9.2.dist-info/entry_points.txt +2 -0
  61. stabilize-0.9.2.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,354 @@
1
+ """
2
+ SQLite-backed queue implementation.
3
+
4
+ Uses optimistic locking for concurrent message processing since SQLite
5
+ does not support FOR UPDATE SKIP LOCKED.
6
+ Uses singleton ConnectionManager for efficient connection sharing.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ import json
12
+ import logging
13
+ import sqlite3
14
+ import uuid
15
+ from collections.abc import Callable
16
+ from datetime import datetime, timedelta
17
+ from typing import Any
18
+
19
+ from stabilize.queue.messages import (
20
+ Message,
21
+ create_message_from_dict,
22
+ get_message_type_name,
23
+ )
24
+ from stabilize.queue.queue import Queue
25
+
26
+ logger = logging.getLogger(__name__)
27
+
28
+
29
+ class SqliteQueue(Queue):
30
+ """
31
+ SQLite-backed queue implementation.
32
+
33
+ Uses optimistic locking with a version column for concurrent access.
34
+ Multiple workers can safely poll messages, though with lower
35
+ throughput than PostgreSQL's SKIP LOCKED.
36
+
37
+ The queue uses a `version` column to implement optimistic locking:
38
+ 1. SELECT a candidate message with its version
39
+ 2. UPDATE the message WHERE version = expected_version
40
+ 3. If 0 rows updated, another worker claimed it - retry
41
+
42
+ Features:
43
+ - Multi-worker support via optimistic locking
44
+ - Configurable lock duration and max attempts
45
+ - Automatic retry on lock contention
46
+ - WAL mode for better concurrent read performance
47
+ - Thread-local connections managed by singleton ConnectionManager
48
+ """
49
+
50
+ def __init__(
51
+ self,
52
+ connection_string: str,
53
+ table_name: str = "queue_messages",
54
+ lock_duration: timedelta = timedelta(minutes=5),
55
+ max_attempts: int = 10,
56
+ ) -> None:
57
+ """
58
+ Initialize the SQLite queue.
59
+
60
+ Args:
61
+ connection_string: SQLite connection string (e.g., sqlite:///./db.sqlite)
62
+ table_name: Name of the queue table
63
+ lock_duration: How long to lock messages during processing
64
+ max_attempts: Maximum retry attempts before dropping message
65
+ """
66
+ from stabilize.persistence.connection import get_connection_manager
67
+
68
+ self.connection_string = connection_string
69
+ self.table_name = table_name
70
+ self.lock_duration = lock_duration
71
+ self.max_attempts = max_attempts
72
+ self._manager = get_connection_manager()
73
+ self._pending: dict[int, dict[str, Any]] = {}
74
+
75
+ def _get_connection(self) -> sqlite3.Connection:
76
+ """
77
+ Get thread-local connection from ConnectionManager.
78
+
79
+ Returns a connection configured with:
80
+ - Row factory for dict-like access
81
+ - WAL journal mode for concurrency
82
+ - 30 second busy timeout
83
+ """
84
+ return self._manager.get_sqlite_connection(self.connection_string)
85
+
86
+ def close(self) -> None:
87
+ """Close SQLite connection for current thread."""
88
+ self._manager.close_sqlite_connection(self.connection_string)
89
+
90
+ def _create_table(self) -> None:
91
+ """Create the queue table if it doesn't exist."""
92
+ conn = self._get_connection()
93
+ conn.execute(
94
+ f"""
95
+ CREATE TABLE IF NOT EXISTS {self.table_name} (
96
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
97
+ message_id TEXT NOT NULL UNIQUE,
98
+ message_type TEXT NOT NULL,
99
+ payload TEXT NOT NULL,
100
+ deliver_at TEXT NOT NULL DEFAULT (datetime('now')),
101
+ attempts INTEGER DEFAULT 0,
102
+ max_attempts INTEGER DEFAULT 10,
103
+ locked_until TEXT,
104
+ version INTEGER DEFAULT 0,
105
+ created_at TEXT DEFAULT (datetime('now'))
106
+ )
107
+ """
108
+ )
109
+ conn.execute(
110
+ f"""
111
+ CREATE INDEX IF NOT EXISTS idx_{self.table_name}_deliver
112
+ ON {self.table_name}(deliver_at)
113
+ """
114
+ )
115
+ conn.execute(
116
+ f"""
117
+ CREATE INDEX IF NOT EXISTS idx_{self.table_name}_locked
118
+ ON {self.table_name}(locked_until)
119
+ """
120
+ )
121
+ conn.commit()
122
+
123
+ def _serialize_message(self, message: Message) -> str:
124
+ """Serialize a message to JSON."""
125
+ from enum import Enum
126
+
127
+ data = {}
128
+ for key, value in message.__dict__.items():
129
+ if key.startswith("_"):
130
+ continue
131
+ if isinstance(value, datetime):
132
+ data[key] = value.isoformat()
133
+ elif isinstance(value, Enum):
134
+ data[key] = value.name
135
+ else:
136
+ data[key] = value
137
+ return json.dumps(data)
138
+
139
+ def _deserialize_message(self, type_name: str, payload: Any) -> Message:
140
+ """Deserialize a message from JSON string or dict."""
141
+ from stabilize.models.stage import SyntheticStageOwner
142
+ from stabilize.models.status import WorkflowStatus
143
+
144
+ if isinstance(payload, dict):
145
+ data = payload
146
+ else:
147
+ data = json.loads(payload)
148
+
149
+ # Convert enum values
150
+ if "status" in data and isinstance(data["status"], str):
151
+ data["status"] = WorkflowStatus[data["status"]]
152
+ if "original_status" in data and data["original_status"]:
153
+ data["original_status"] = WorkflowStatus[data["original_status"]]
154
+ if "phase" in data and isinstance(data["phase"], str):
155
+ data["phase"] = SyntheticStageOwner[data["phase"]]
156
+
157
+ # Remove metadata fields
158
+ data.pop("message_id", None)
159
+ data.pop("created_at", None)
160
+ data.pop("attempts", None)
161
+ data.pop("max_attempts", None)
162
+
163
+ return create_message_from_dict(type_name, data)
164
+
165
+ def push(
166
+ self,
167
+ message: Message,
168
+ delay: timedelta | None = None,
169
+ ) -> None:
170
+ """Push a message onto the queue."""
171
+ conn = self._get_connection()
172
+ deliver_at = datetime.now()
173
+ if delay:
174
+ deliver_at += delay
175
+
176
+ message_type = get_message_type_name(message)
177
+ message_id = str(uuid.uuid4())
178
+ payload = self._serialize_message(message)
179
+
180
+ conn.execute(
181
+ f"""
182
+ INSERT INTO {self.table_name}
183
+ (message_id, message_type, payload, deliver_at, attempts)
184
+ VALUES (:message_id, :type, :payload, :deliver_at, 0)
185
+ """,
186
+ {
187
+ "message_id": message_id,
188
+ "type": message_type,
189
+ "payload": payload,
190
+ "deliver_at": deliver_at.isoformat(),
191
+ },
192
+ )
193
+ conn.commit()
194
+
195
+ logger.debug(f"Pushed {message_type} (id={message_id}, deliver_at={deliver_at})")
196
+
197
+ def poll(self, callback: Callable[[Message], None]) -> None:
198
+ """Poll for a message and process it with the callback."""
199
+ message = self.poll_one()
200
+ if message:
201
+ try:
202
+ callback(message)
203
+ self.ack(message)
204
+ except Exception:
205
+ # Message will be retried after lock expires
206
+ raise
207
+
208
+ def poll_one(self) -> Message | None:
209
+ """
210
+ Poll for a single message using optimistic locking.
211
+
212
+ This implementation uses a version column to handle concurrent
213
+ access without FOR UPDATE SKIP LOCKED:
214
+
215
+ 1. SELECT a candidate message with current version
216
+ 2. Try to UPDATE with WHERE version = expected
217
+ 3. If rowcount == 0, another worker got it - return None
218
+ 4. If rowcount == 1, we claimed it successfully
219
+
220
+ Returns:
221
+ The claimed message or None if no message available
222
+ """
223
+ conn = self._get_connection()
224
+ locked_until = datetime.now() + self.lock_duration
225
+
226
+ # Step 1: Find a candidate message
227
+ result = conn.execute(
228
+ f"""
229
+ SELECT id, message_type, payload, attempts, version
230
+ FROM {self.table_name}
231
+ WHERE datetime(deliver_at) <= datetime('now')
232
+ AND (locked_until IS NULL OR datetime(locked_until) < datetime('now'))
233
+ AND attempts < :max_attempts
234
+ ORDER BY deliver_at
235
+ LIMIT 1
236
+ """,
237
+ {"max_attempts": self.max_attempts},
238
+ )
239
+ row = result.fetchone()
240
+
241
+ if not row:
242
+ return None
243
+
244
+ msg_id = row["id"]
245
+ msg_type = row["message_type"]
246
+ payload = row["payload"]
247
+ attempts = row["attempts"]
248
+ version = row["version"]
249
+
250
+ # Step 2: Try to claim with optimistic lock
251
+ cursor = conn.execute(
252
+ f"""
253
+ UPDATE {self.table_name}
254
+ SET locked_until = :locked_until,
255
+ attempts = attempts + 1,
256
+ version = version + 1
257
+ WHERE id = :id AND version = :version
258
+ """,
259
+ {
260
+ "id": msg_id,
261
+ "locked_until": locked_until.isoformat(),
262
+ "version": version,
263
+ },
264
+ )
265
+ conn.commit()
266
+
267
+ # Step 3: Check if we won the race
268
+ if cursor.rowcount == 0:
269
+ # Another worker grabbed it
270
+ logger.debug(f"Lost race for message {msg_id}, will retry")
271
+ return None
272
+
273
+ # Step 4: Successfully claimed - deserialize and return
274
+ message = self._deserialize_message(msg_type, payload)
275
+ message.message_id = str(msg_id)
276
+ message.attempts = attempts + 1
277
+
278
+ self._pending[msg_id] = {
279
+ "message": message,
280
+ "type": msg_type,
281
+ }
282
+
283
+ logger.debug(f"Polled {msg_type} (id={msg_id}, attempts={attempts + 1})")
284
+ return message
285
+
286
+ def ack(self, message: Message) -> None:
287
+ """Acknowledge a message, removing it from the queue."""
288
+ if not message.message_id:
289
+ return
290
+
291
+ msg_id = int(message.message_id)
292
+ conn = self._get_connection()
293
+
294
+ conn.execute(
295
+ f"DELETE FROM {self.table_name} WHERE id = :id",
296
+ {"id": msg_id},
297
+ )
298
+ conn.commit()
299
+
300
+ self._pending.pop(msg_id, None)
301
+ logger.debug(f"Acked message (id={msg_id})")
302
+
303
+ def ensure(
304
+ self,
305
+ message: Message,
306
+ delay: timedelta,
307
+ ) -> None:
308
+ """Ensure a message is in the queue with the given delay."""
309
+ # For simplicity, just push the message
310
+ # A full implementation would check for duplicates
311
+ self.push(message, delay)
312
+
313
+ def reschedule(
314
+ self,
315
+ message: Message,
316
+ delay: timedelta,
317
+ ) -> None:
318
+ """Reschedule a message with a new delay."""
319
+ if not message.message_id:
320
+ return
321
+
322
+ msg_id = int(message.message_id)
323
+ deliver_at = datetime.now() + delay
324
+ conn = self._get_connection()
325
+
326
+ conn.execute(
327
+ f"""
328
+ UPDATE {self.table_name}
329
+ SET deliver_at = :deliver_at,
330
+ locked_until = NULL
331
+ WHERE id = :id
332
+ """,
333
+ {"id": msg_id, "deliver_at": deliver_at.isoformat()},
334
+ )
335
+ conn.commit()
336
+
337
+ self._pending.pop(msg_id, None)
338
+ logger.debug(f"Rescheduled message (id={msg_id}, deliver_at={deliver_at})")
339
+
340
+ def size(self) -> int:
341
+ """Get the number of messages in the queue."""
342
+ conn = self._get_connection()
343
+ result = conn.execute(f"SELECT COUNT(*) FROM {self.table_name}")
344
+ row = result.fetchone()
345
+ return row[0] if row else 0
346
+
347
+ def clear(self) -> None:
348
+ """Clear all messages from the queue."""
349
+ conn = self._get_connection()
350
+ conn.execute(f"DELETE FROM {self.table_name}")
351
+ conn.commit()
352
+
353
+ self._pending.clear()
354
+ logger.debug("Cleared queue")
@@ -0,0 +1,19 @@
1
+ """RAG module for AI-powered pipeline generation."""
2
+
3
+ from .assistant import StabilizeRAG
4
+ from .cache import (
5
+ CachedEmbedding,
6
+ EmbeddingCache,
7
+ PostgresEmbeddingCache,
8
+ SqliteEmbeddingCache,
9
+ get_cache,
10
+ )
11
+
12
+ __all__ = [
13
+ "StabilizeRAG",
14
+ "EmbeddingCache",
15
+ "SqliteEmbeddingCache",
16
+ "PostgresEmbeddingCache",
17
+ "CachedEmbedding",
18
+ "get_cache",
19
+ ]