aegra-api 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. aegra_api/__init__.py +3 -0
  2. aegra_api/api/__init__.py +1 -0
  3. aegra_api/api/assistants.py +235 -0
  4. aegra_api/api/runs.py +1110 -0
  5. aegra_api/api/store.py +200 -0
  6. aegra_api/api/threads.py +761 -0
  7. aegra_api/config.py +204 -0
  8. aegra_api/constants.py +5 -0
  9. aegra_api/core/__init__.py +0 -0
  10. aegra_api/core/app_loader.py +91 -0
  11. aegra_api/core/auth_ctx.py +65 -0
  12. aegra_api/core/auth_deps.py +186 -0
  13. aegra_api/core/auth_handlers.py +248 -0
  14. aegra_api/core/auth_middleware.py +331 -0
  15. aegra_api/core/database.py +123 -0
  16. aegra_api/core/health.py +131 -0
  17. aegra_api/core/orm.py +165 -0
  18. aegra_api/core/route_merger.py +69 -0
  19. aegra_api/core/serializers/__init__.py +7 -0
  20. aegra_api/core/serializers/base.py +22 -0
  21. aegra_api/core/serializers/general.py +54 -0
  22. aegra_api/core/serializers/langgraph.py +102 -0
  23. aegra_api/core/sse.py +178 -0
  24. aegra_api/main.py +303 -0
  25. aegra_api/middleware/__init__.py +4 -0
  26. aegra_api/middleware/double_encoded_json.py +74 -0
  27. aegra_api/middleware/logger_middleware.py +95 -0
  28. aegra_api/models/__init__.py +76 -0
  29. aegra_api/models/assistants.py +81 -0
  30. aegra_api/models/auth.py +62 -0
  31. aegra_api/models/enums.py +29 -0
  32. aegra_api/models/errors.py +29 -0
  33. aegra_api/models/runs.py +124 -0
  34. aegra_api/models/store.py +67 -0
  35. aegra_api/models/threads.py +152 -0
  36. aegra_api/observability/__init__.py +1 -0
  37. aegra_api/observability/base.py +88 -0
  38. aegra_api/observability/otel.py +133 -0
  39. aegra_api/observability/setup.py +27 -0
  40. aegra_api/observability/targets/__init__.py +11 -0
  41. aegra_api/observability/targets/base.py +18 -0
  42. aegra_api/observability/targets/langfuse.py +33 -0
  43. aegra_api/observability/targets/otlp.py +38 -0
  44. aegra_api/observability/targets/phoenix.py +24 -0
  45. aegra_api/services/__init__.py +0 -0
  46. aegra_api/services/assistant_service.py +569 -0
  47. aegra_api/services/base_broker.py +59 -0
  48. aegra_api/services/broker.py +141 -0
  49. aegra_api/services/event_converter.py +157 -0
  50. aegra_api/services/event_store.py +196 -0
  51. aegra_api/services/graph_streaming.py +433 -0
  52. aegra_api/services/langgraph_service.py +456 -0
  53. aegra_api/services/streaming_service.py +362 -0
  54. aegra_api/services/thread_state_service.py +128 -0
  55. aegra_api/settings.py +124 -0
  56. aegra_api/utils/__init__.py +3 -0
  57. aegra_api/utils/assistants.py +23 -0
  58. aegra_api/utils/run_utils.py +60 -0
  59. aegra_api/utils/setup_logging.py +122 -0
  60. aegra_api/utils/sse_utils.py +26 -0
  61. aegra_api/utils/status_compat.py +57 -0
  62. aegra_api-0.1.0.dist-info/METADATA +244 -0
  63. aegra_api-0.1.0.dist-info/RECORD +64 -0
  64. aegra_api-0.1.0.dist-info/WHEEL +4 -0
@@ -0,0 +1,362 @@
1
+ """Streaming service for orchestrating SSE streaming"""
2
+
3
+ import asyncio
4
+ from collections.abc import AsyncIterator
5
+ from typing import Any
6
+
7
+ import structlog
8
+
9
+ from aegra_api.core.sse import create_error_event
10
+ from aegra_api.models import Run
11
+ from aegra_api.services.broker import broker_manager
12
+ from aegra_api.services.event_converter import EventConverter
13
+ from aegra_api.services.event_store import event_store, store_sse_event
14
+ from aegra_api.utils import extract_event_sequence, generate_event_id
15
+
16
+ logger = structlog.getLogger(__name__)
17
+
18
+
19
+ class StreamingService:
20
+ """Service to handle SSE streaming orchestration"""
21
+
22
+ def __init__(self):
23
+ self.event_counters: dict[str, int] = {}
24
+ self.event_converter = EventConverter()
25
+
26
+ def _next_event_counter(self, run_id: str, event_id: str) -> int:
27
+ """Update and return the next event counter for a run"""
28
+ try:
29
+ idx = self._extract_event_sequence(event_id)
30
+ current = self.event_counters.get(run_id, 0)
31
+ if idx > current:
32
+ self.event_counters[run_id] = idx
33
+ return idx
34
+ except Exception as e:
35
+ logger.warning(f"Event counter update failed: {e}")
36
+ return self.event_counters.get(run_id, 0)
37
+
38
+ async def put_to_broker(
39
+ self,
40
+ run_id: str,
41
+ event_id: str,
42
+ raw_event: Any,
43
+ ):
44
+ """Put an event into the run's broker queue for live consumers
45
+
46
+ Note: Events from graph_streaming are already filtered, so they pass through as-is.
47
+ """
48
+ broker = broker_manager.get_or_create_broker(run_id)
49
+ self._next_event_counter(run_id, event_id)
50
+ await broker.put(event_id, raw_event)
51
+
52
+ async def store_event_from_raw(
53
+ self,
54
+ run_id: str,
55
+ event_id: str,
56
+ raw_event: Any,
57
+ ):
58
+ """Convert raw event to stored format and store it
59
+
60
+ Note: Events from graph_streaming are already filtered, so they pass through as-is.
61
+ """
62
+ processed_event = raw_event
63
+
64
+ # Parse the processed event
65
+ node_path = None
66
+ stream_mode_label = None
67
+ event_payload = None
68
+
69
+ if isinstance(processed_event, tuple):
70
+ if len(processed_event) == 2:
71
+ stream_mode_label, event_payload = processed_event
72
+ elif len(processed_event) == 3:
73
+ node_path, stream_mode_label, event_payload = processed_event
74
+ else:
75
+ stream_mode_label = "values"
76
+ event_payload = processed_event
77
+
78
+ # Store based on stream mode
79
+ if stream_mode_label == "messages":
80
+ await store_sse_event(
81
+ run_id,
82
+ event_id,
83
+ "messages",
84
+ {
85
+ "type": "messages_stream",
86
+ "message_chunk": event_payload[0]
87
+ if isinstance(event_payload, tuple) and len(event_payload) >= 1
88
+ else event_payload,
89
+ "metadata": event_payload[1]
90
+ if isinstance(event_payload, tuple) and len(event_payload) >= 2
91
+ else None,
92
+ "node_path": node_path,
93
+ },
94
+ )
95
+ elif stream_mode_label == "messages/partial":
96
+ await store_sse_event(
97
+ run_id,
98
+ event_id,
99
+ "messages/partial",
100
+ {
101
+ "type": "messages_partial",
102
+ "messages": event_payload,
103
+ "node_path": node_path,
104
+ },
105
+ )
106
+ elif stream_mode_label == "messages/complete":
107
+ await store_sse_event(
108
+ run_id,
109
+ event_id,
110
+ "messages/complete",
111
+ {
112
+ "type": "messages_complete",
113
+ "messages": event_payload,
114
+ "node_path": node_path,
115
+ },
116
+ )
117
+ elif stream_mode_label == "messages/metadata":
118
+ await store_sse_event(
119
+ run_id,
120
+ event_id,
121
+ "messages/metadata",
122
+ {
123
+ "type": "messages_metadata",
124
+ "metadata": event_payload,
125
+ "node_path": node_path,
126
+ },
127
+ )
128
+ elif stream_mode_label == "events":
129
+ await store_sse_event(
130
+ run_id,
131
+ event_id,
132
+ "events",
133
+ {
134
+ "type": "langchain_event",
135
+ "event": event_payload,
136
+ },
137
+ )
138
+ elif stream_mode_label == "values" or stream_mode_label == "updates":
139
+ await store_sse_event(
140
+ run_id,
141
+ event_id,
142
+ "values",
143
+ {"type": "execution_values", "chunk": event_payload},
144
+ )
145
+ elif stream_mode_label == "end":
146
+ await store_sse_event(
147
+ run_id,
148
+ event_id,
149
+ "end",
150
+ {
151
+ "type": "run_complete",
152
+ "status": event_payload.get("status", "success"),
153
+ "final_output": event_payload.get("final_output"),
154
+ },
155
+ )
156
+ # Add other stream modes as needed
157
+
158
+ async def signal_run_cancelled(self, run_id: str):
159
+ """Signal that a run was cancelled"""
160
+ counter = self.event_counters.get(run_id, 0) + 1
161
+ self.event_counters[run_id] = counter
162
+ event_id = generate_event_id(run_id, counter)
163
+
164
+ broker = broker_manager.get_or_create_broker(run_id)
165
+ if broker:
166
+ await broker.put(event_id, ("end", {"status": "interrupted"}))
167
+
168
+ broker_manager.cleanup_broker(run_id)
169
+
170
+ async def signal_run_error(self, run_id: str, error_message: str, error_type: str = "Error"):
171
+ """Signal that a run encountered an error.
172
+
173
+ Sends a proper 'error' event to the broker and stores it for replay.
174
+ Also sends an 'end' event to signal stream completion.
175
+
176
+ Args:
177
+ run_id: The run ID.
178
+ error_message: Human-readable error message.
179
+ error_type: Error type/class name (e.g., "ValueError", "GraphRecursionError").
180
+ """
181
+ counter = self.event_counters.get(run_id, 0) + 1
182
+ self.event_counters[run_id] = counter
183
+ error_event_id = generate_event_id(run_id, counter)
184
+
185
+ # Create structured error payload
186
+ error_payload = {"error": error_type, "message": error_message}
187
+
188
+ broker = broker_manager.get_or_create_broker(run_id)
189
+ if broker:
190
+ # Send dedicated error event (so frontend receives the error details)
191
+ await broker.put(error_event_id, ("error", error_payload))
192
+
193
+ # Store error event for replay support
194
+ await store_sse_event(
195
+ run_id,
196
+ error_event_id,
197
+ "error",
198
+ {"error": error_type, "message": error_message},
199
+ )
200
+
201
+ # Send end event to signal stream completion
202
+ counter += 1
203
+ self.event_counters[run_id] = counter
204
+ end_event_id = generate_event_id(run_id, counter)
205
+ await broker.put(end_event_id, ("end", {"status": "error"}))
206
+
207
+ broker_manager.cleanup_broker(run_id)
208
+
209
+ def _extract_event_sequence(self, event_id: str) -> int:
210
+ """Extract numeric sequence from event_id format: {run_id}_event_{sequence}"""
211
+ return extract_event_sequence(event_id)
212
+
213
+ async def stream_run_execution(
214
+ self,
215
+ run: Run,
216
+ last_event_id: str | None = None,
217
+ cancel_on_disconnect: bool = False,
218
+ ) -> AsyncIterator[str]:
219
+ """Stream run execution with unified producer-consumer pattern"""
220
+ run_id = run.run_id
221
+ try:
222
+ # Replay stored events first
223
+ last_sent_sequence = 0
224
+ if last_event_id:
225
+ last_sent_sequence = self._extract_event_sequence(last_event_id)
226
+
227
+ async for sse_event in self._replay_stored_events(run_id, last_event_id):
228
+ yield sse_event
229
+
230
+ # Stream live events if run is still active
231
+ async for sse_event in self._stream_live_events(run, last_sent_sequence):
232
+ yield sse_event
233
+
234
+ except asyncio.CancelledError:
235
+ logger.debug(f"Stream cancelled for run {run_id}")
236
+ if cancel_on_disconnect:
237
+ self._cancel_background_task(run_id)
238
+ raise
239
+ except Exception as e:
240
+ logger.error(f"Error in stream_run_execution for run {run_id}: {e}")
241
+ yield create_error_event(str(e))
242
+
243
+ async def _replay_stored_events(self, run_id: str, last_event_id: str | None) -> AsyncIterator[str]:
244
+ """Replay stored events"""
245
+ if last_event_id:
246
+ stored_events = await event_store.get_events_since(run_id, last_event_id)
247
+ else:
248
+ stored_events = await event_store.get_all_events(run_id)
249
+
250
+ for ev in stored_events:
251
+ sse_event = self._stored_event_to_sse(run_id, ev)
252
+ if sse_event:
253
+ yield sse_event
254
+
255
+ async def _stream_live_events(self, run: Run, last_sent_sequence: int) -> AsyncIterator[str]:
256
+ """Stream live events from broker"""
257
+ run_id = run.run_id
258
+ broker = broker_manager.get_or_create_broker(run_id)
259
+
260
+ # If run finished and broker is done, nothing to stream
261
+ if run.status in ["success", "error", "interrupted"] and broker.is_finished():
262
+ return
263
+
264
+ # Stream live events
265
+ if broker:
266
+ async for event_id, raw_event in broker.aiter():
267
+ # Skip duplicates that were already replayed
268
+ current_sequence = self._extract_event_sequence(event_id)
269
+ if current_sequence <= last_sent_sequence:
270
+ continue
271
+
272
+ sse_event = await self._convert_raw_to_sse(event_id, raw_event)
273
+ if sse_event:
274
+ yield sse_event
275
+ last_sent_sequence = current_sequence
276
+
277
+ def _cancel_background_task(self, run_id: str) -> bool:
278
+ """Cancel the asyncio task for a run.
279
+
280
+ @param run_id: The ID of the run to cancel.
281
+ @return: True if task was cancelled, False otherwise.
282
+ """
283
+ try:
284
+ from aegra_api.api.runs import active_runs
285
+
286
+ task = active_runs.get(run_id)
287
+ if task and not task.done():
288
+ logger.info(f"Cancelling asyncio task for run {run_id}")
289
+ task.cancel()
290
+ return True
291
+ elif task and task.done():
292
+ logger.debug(f"Task for run {run_id} already completed")
293
+ return False
294
+ else:
295
+ logger.debug(f"No active task found for run {run_id}")
296
+ return False
297
+ except Exception as e:
298
+ logger.warning(f"Failed to cancel background task for run {run_id}: {e}")
299
+ return False
300
+
301
+ async def _convert_raw_to_sse(self, event_id: str, raw_event: Any) -> str | None:
302
+ """Convert a raw event from broker to SSE format"""
303
+ return self.event_converter.convert_raw_to_sse(event_id, raw_event)
304
+
305
+ async def interrupt_run(self, run_id: str) -> bool:
306
+ """Interrupt a running execution.
307
+
308
+ Cancels the asyncio task and signals interruption to broker.
309
+ The task's CancelledError handler will set status to 'interrupted'.
310
+ """
311
+ try:
312
+ # Cancel the asyncio task first so it stops processing
313
+ self._cancel_background_task(run_id)
314
+ # Signal interruption to broker for any connected clients
315
+ await self.signal_run_error(run_id, "Run was interrupted")
316
+ return True
317
+ except Exception as e:
318
+ logger.error(f"Error interrupting run {run_id}: {e}")
319
+ return False
320
+
321
+ async def cancel_run(self, run_id: str) -> bool:
322
+ """Cancel a pending or running execution.
323
+
324
+ Cancels the asyncio task and signals cancellation to broker.
325
+ The task's CancelledError handler will set status to 'interrupted'.
326
+ """
327
+ try:
328
+ # Cancel the asyncio task first so it stops processing
329
+ self._cancel_background_task(run_id)
330
+ # Signal cancellation to broker for any connected clients
331
+ await self.signal_run_cancelled(run_id)
332
+ return True
333
+ except Exception as e:
334
+ logger.error(f"Error cancelling run {run_id}: {e}")
335
+ return False
336
+
337
+ async def _update_run_status(self, run_id: str, status: str, output: Any = None, error: str = None):
338
+ """Update run status in database using the shared updater."""
339
+ try:
340
+ # Lazy import to avoid cycles
341
+ from aegra_api.api.runs import update_run_status
342
+
343
+ await update_run_status(run_id, status, output, error)
344
+ except Exception as e:
345
+ logger.error(f"Error updating run status for {run_id}: {e}")
346
+
347
+ def is_run_streaming(self, run_id: str) -> bool:
348
+ """Check if run is currently active (has a broker)"""
349
+ broker = broker_manager.get_broker(run_id)
350
+ return broker is not None and not broker.is_finished()
351
+
352
+ async def cleanup_run(self, run_id: str):
353
+ """Clean up streaming resources for a run"""
354
+ broker_manager.cleanup_broker(run_id)
355
+
356
+ def _stored_event_to_sse(self, run_id: str, ev) -> str | None:
357
+ """Convert stored event object to SSE string"""
358
+ return self.event_converter.convert_stored_to_sse(ev, run_id)
359
+
360
+
361
+ # Global streaming service instance
362
+ streaming_service = StreamingService()
@@ -0,0 +1,128 @@
1
+ """Thread state conversion service"""
2
+
3
+ from datetime import datetime
4
+ from typing import Any
5
+
6
+ import structlog
7
+
8
+ from aegra_api.core.serializers import LangGraphSerializer
9
+ from aegra_api.models.threads import ThreadCheckpoint, ThreadState
10
+
11
+ logger = structlog.getLogger(__name__)
12
+
13
+
14
+ class ThreadStateService:
15
+ """Service for converting LangGraph snapshots to ThreadState objects"""
16
+
17
+ def __init__(self) -> None:
18
+ self.serializer = LangGraphSerializer()
19
+
20
+ def convert_snapshot_to_thread_state(self, snapshot: Any, thread_id: str, subgraphs: bool = False) -> ThreadState:
21
+ """Convert a LangGraph snapshot to ThreadState format"""
22
+ try:
23
+ # Extract basic values
24
+ values = getattr(snapshot, "values", {})
25
+ next_nodes = getattr(snapshot, "next", []) or []
26
+ metadata = getattr(snapshot, "metadata", {}) or {}
27
+ created_at = self._extract_created_at(snapshot)
28
+
29
+ # Extract tasks and interrupts using serializer
30
+ tasks = self.serializer.extract_tasks_from_snapshot(snapshot)
31
+
32
+ # Recursively serialize tasks' state (which might be subgraphs)
33
+ if subgraphs:
34
+ for task in tasks:
35
+ if "state" in task and task["state"] is not None:
36
+ try:
37
+ task["state"] = self.convert_snapshot_to_thread_state(
38
+ task["state"], thread_id, subgraphs=True
39
+ )
40
+ except Exception as e:
41
+ logger.error(f"Failed to serialize subgraph state for task {task.get('id')}: {e}")
42
+ task["state"] = None
43
+
44
+ interrupts = self.serializer.extract_interrupts_from_snapshot(snapshot)
45
+
46
+ # Create checkpoint objects
47
+ current_checkpoint = self._create_checkpoint(snapshot.config, thread_id)
48
+ parent_checkpoint = (
49
+ self._create_checkpoint(snapshot.parent_config, thread_id) if snapshot.parent_config else None
50
+ )
51
+
52
+ # Extract checkpoint IDs for backward compatibility
53
+ checkpoint_id = self._extract_checkpoint_id(snapshot.config)
54
+ parent_checkpoint_id = (
55
+ self._extract_checkpoint_id(snapshot.parent_config) if snapshot.parent_config else None
56
+ )
57
+
58
+ return ThreadState(
59
+ values=values,
60
+ next=next_nodes,
61
+ tasks=tasks,
62
+ interrupts=interrupts,
63
+ metadata=metadata,
64
+ created_at=created_at,
65
+ checkpoint=current_checkpoint,
66
+ parent_checkpoint=parent_checkpoint,
67
+ checkpoint_id=checkpoint_id,
68
+ parent_checkpoint_id=parent_checkpoint_id,
69
+ )
70
+
71
+ except Exception as e:
72
+ logger.error(
73
+ f"Failed to convert snapshot to thread state: {e} "
74
+ f"(thread_id={thread_id}, snapshot_type={type(snapshot).__name__})"
75
+ )
76
+ raise
77
+
78
+ def convert_snapshots_to_thread_states(self, snapshots: list[Any], thread_id: str) -> list[ThreadState]:
79
+ """Convert multiple snapshots to ThreadState objects"""
80
+ thread_states = []
81
+
82
+ for i, snapshot in enumerate(snapshots):
83
+ try:
84
+ thread_state = self.convert_snapshot_to_thread_state(snapshot, thread_id)
85
+ thread_states.append(thread_state)
86
+ except Exception as e:
87
+ logger.error(f"Failed to convert snapshot in batch: {e} (thread_id={thread_id}, snapshot_index={i})")
88
+ # Continue with other snapshots rather than failing the entire batch
89
+ continue
90
+
91
+ return thread_states
92
+
93
+ def _extract_created_at(self, snapshot: Any) -> datetime | None:
94
+ """Extract created_at timestamp from snapshot"""
95
+ created_at = getattr(snapshot, "created_at", None)
96
+ if isinstance(created_at, str):
97
+ try:
98
+ return datetime.fromisoformat(created_at.replace("Z", "+00:00"))
99
+ except ValueError:
100
+ logger.warning(f"Invalid created_at format: {created_at}")
101
+ return None
102
+ elif isinstance(created_at, datetime):
103
+ return created_at
104
+ return None
105
+
106
+ def _create_checkpoint(self, config: Any, thread_id: str) -> ThreadCheckpoint:
107
+ """Create ThreadCheckpoint from config"""
108
+ if not config or not isinstance(config, dict):
109
+ return ThreadCheckpoint(checkpoint_id=None, thread_id=thread_id, checkpoint_ns="")
110
+
111
+ configurable = config.get("configurable", {})
112
+ checkpoint_id = configurable.get("checkpoint_id")
113
+ checkpoint_ns = configurable.get("checkpoint_ns", "")
114
+
115
+ return ThreadCheckpoint(
116
+ checkpoint_id=checkpoint_id,
117
+ thread_id=thread_id,
118
+ checkpoint_ns=checkpoint_ns,
119
+ )
120
+
121
+ def _extract_checkpoint_id(self, config: Any) -> str | None:
122
+ """Extract checkpoint ID from config for backward compatibility"""
123
+ if not config or not isinstance(config, dict):
124
+ return None
125
+
126
+ configurable = config.get("configurable", {})
127
+ checkpoint_id = configurable.get("checkpoint_id")
128
+ return str(checkpoint_id) if checkpoint_id is not None else None
aegra_api/settings.py ADDED
@@ -0,0 +1,124 @@
1
+ from typing import Annotated
2
+
3
+ from pydantic import BeforeValidator, computed_field
4
+ from pydantic_settings import BaseSettings, SettingsConfigDict
5
+
6
+
7
+ def parse_lower(v: str) -> str:
8
+ """Converts to lowercase and strips whitespace."""
9
+ return v.strip().lower() if isinstance(v, str) else v
10
+
11
+
12
+ def parse_upper(v: str) -> str:
13
+ """Converts to uppercase and strips whitespace."""
14
+ return v.strip().upper() if isinstance(v, str) else v
15
+
16
+
17
+ # Custom types for automatic formatting
18
+ LowerStr = Annotated[str, BeforeValidator(parse_lower)]
19
+ UpperStr = Annotated[str, BeforeValidator(parse_upper)]
20
+
21
+
22
+ class EnvBase(BaseSettings):
23
+ model_config = SettingsConfigDict(
24
+ env_file=".env",
25
+ env_file_encoding="utf-8",
26
+ extra="ignore",
27
+ )
28
+
29
+
30
+ class AppSettings(EnvBase):
31
+ """General application settings."""
32
+
33
+ PROJECT_NAME: str = "Aegra"
34
+ VERSION: str = "0.1.0"
35
+
36
+ # Server config
37
+ HOST: str = "0.0.0.0" # nosec B104
38
+ PORT: int = 8000
39
+ SERVER_URL: str = "http://localhost:8000"
40
+
41
+ # App logic
42
+ AEGRA_CONFIG: str = "aegra.json" # Default config file path
43
+ AUTH_TYPE: LowerStr = "noop"
44
+ ENV_MODE: UpperStr = "LOCAL"
45
+ DEBUG: bool = False
46
+
47
+ # Logging
48
+ LOG_LEVEL: UpperStr = "INFO"
49
+ LOG_VERBOSITY: LowerStr = "verbose"
50
+
51
+
52
+ class DatabaseSettings(EnvBase):
53
+ """Database connection settings."""
54
+
55
+ POSTGRES_USER: str = "postgres"
56
+ POSTGRES_PASSWORD: str = "postgres"
57
+ POSTGRES_HOST: str = "localhost"
58
+ POSTGRES_PORT: str = "5432"
59
+ POSTGRES_DB: str = "aegra"
60
+ DB_ECHO_LOG: bool = False
61
+
62
+ @computed_field
63
+ @property
64
+ def database_url(self) -> str:
65
+ """Async URL for SQLAlchemy (asyncpg)."""
66
+ return (
67
+ f"postgresql+asyncpg://{self.POSTGRES_USER}:{self.POSTGRES_PASSWORD}@"
68
+ f"{self.POSTGRES_HOST}:{self.POSTGRES_PORT}/{self.POSTGRES_DB}"
69
+ )
70
+
71
+ @computed_field
72
+ @property
73
+ def database_url_sync(self) -> str:
74
+ """Sync URL for LangGraph/Psycopg (postgresql://)."""
75
+ return (
76
+ f"postgresql://{self.POSTGRES_USER}:{self.POSTGRES_PASSWORD}@"
77
+ f"{self.POSTGRES_HOST}:{self.POSTGRES_PORT}/{self.POSTGRES_DB}"
78
+ )
79
+
80
+
81
+ class PoolSettings(EnvBase):
82
+ """Connection pool settings for SQLAlchemy and LangGraph."""
83
+
84
+ SQLALCHEMY_POOL_SIZE: int = 2
85
+ SQLALCHEMY_MAX_OVERFLOW: int = 0
86
+
87
+ LANGGRAPH_MIN_POOL_SIZE: int = 1
88
+ LANGGRAPH_MAX_POOL_SIZE: int = 6
89
+
90
+
91
+ class ObservabilitySettings(EnvBase):
92
+ """
93
+ Unified settings for OpenTelemetry and Vendor targets.
94
+ Supports Fan-out configuration via OTEL_TARGETS.
95
+ """
96
+
97
+ # General OTEL Config
98
+ OTEL_SERVICE_NAME: str = "aegra-backend"
99
+ OTEL_TARGETS: str = "" # Comma-separated: "LANGFUSE,PHOENIX"
100
+ OTEL_CONSOLE_EXPORT: bool = False # For local debugging
101
+
102
+ # --- Generic OTLP Target (Default/Custom) ---
103
+ OTEL_EXPORTER_OTLP_ENDPOINT: str | None = None
104
+ OTEL_EXPORTER_OTLP_HEADERS: str | None = None
105
+
106
+ # --- Langfuse Specifics ---
107
+ LANGFUSE_BASE_URL: str = "http://localhost:3000"
108
+ LANGFUSE_PUBLIC_KEY: str | None = None
109
+ LANGFUSE_SECRET_KEY: str | None = None
110
+
111
+ # --- Phoenix Specifics ---
112
+ PHOENIX_COLLECTOR_ENDPOINT: str = "http://127.0.0.1:6006/v1/traces"
113
+ PHOENIX_API_KEY: str | None = None
114
+
115
+
116
+ class Settings:
117
+ def __init__(self) -> None:
118
+ self.app = AppSettings()
119
+ self.db = DatabaseSettings()
120
+ self.pool = PoolSettings()
121
+ self.observability = ObservabilitySettings()
122
+
123
+
124
+ settings = Settings()
@@ -0,0 +1,3 @@
1
+ from aegra_api.utils.sse_utils import extract_event_sequence, generate_event_id
2
+
3
+ __all__ = ["generate_event_id", "extract_event_sequence"]
@@ -0,0 +1,23 @@
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Mapping
4
+ from uuid import uuid5
5
+
6
+ from aegra_api.constants import ASSISTANT_NAMESPACE_UUID
7
+
8
+
9
+ def resolve_assistant_id(requested_id: str, available_graphs: Mapping[str, object]) -> str:
10
+ """Resolve an assistant identifier.
11
+
12
+ If the provided identifier matches a known graph id, derive a
13
+ deterministic assistant UUID using the project namespace. Otherwise,
14
+ return the identifier as-is.
15
+
16
+ Args:
17
+ requested_id: The value provided by the client (assistant UUID or graph id).
18
+ available_graphs: Graph registry mapping; only keys are used for membership.
19
+
20
+ Returns:
21
+ A string assistant_id suitable for DB lookups and FK references.
22
+ """
23
+ return str(uuid5(ASSISTANT_NAMESPACE_UUID, requested_id)) if requested_id in available_graphs else requested_id