aegis-stack 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aegis-stack might be problematic. Click here for more details.

Files changed (103) hide show
  1. aegis/__init__.py +5 -0
  2. aegis/__main__.py +374 -0
  3. aegis/core/CLAUDE.md +365 -0
  4. aegis/core/__init__.py +6 -0
  5. aegis/core/components.py +115 -0
  6. aegis/core/dependency_resolver.py +119 -0
  7. aegis/core/template_generator.py +163 -0
  8. aegis/templates/CLAUDE.md +306 -0
  9. aegis/templates/cookiecutter-aegis-project/cookiecutter.json +27 -0
  10. aegis/templates/cookiecutter-aegis-project/hooks/post_gen_project.py +172 -0
  11. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/.dockerignore +71 -0
  12. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/.env.example.j2 +70 -0
  13. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/.gitignore +127 -0
  14. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/Dockerfile +53 -0
  15. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/Makefile +211 -0
  16. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/README.md.j2 +196 -0
  17. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/__init__.py +5 -0
  18. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/cli/__init__.py +6 -0
  19. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/cli/health.py +321 -0
  20. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/cli/load_test.py +638 -0
  21. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/cli/main.py +41 -0
  22. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/__init__.py +0 -0
  23. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/__init__.py +0 -0
  24. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/health.py +134 -0
  25. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/models.py.j2 +247 -0
  26. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/routing.py.j2 +14 -0
  27. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/tasks.py.j2 +596 -0
  28. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/hooks.py +133 -0
  29. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/main.py +16 -0
  30. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/middleware/__init__.py +1 -0
  31. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/middleware/cors.py +20 -0
  32. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/shutdown/__init__.py +1 -0
  33. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/shutdown/cleanup.py +14 -0
  34. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/startup/__init__.py +1 -0
  35. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/startup/component_health.py.j2 +190 -0
  36. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/frontend/__init__.py +0 -0
  37. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/frontend/core/__init__.py +1 -0
  38. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/frontend/core/theme.py +46 -0
  39. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/frontend/main.py +687 -0
  40. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/scheduler/__init__.py +1 -0
  41. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/scheduler/main.py +138 -0
  42. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/CLAUDE.md +213 -0
  43. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/__init__.py +6 -0
  44. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/constants.py.j2 +30 -0
  45. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/pools.py +78 -0
  46. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/queues/__init__.py +1 -0
  47. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/queues/load_test.py +48 -0
  48. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/queues/media.py +41 -0
  49. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/queues/system.py +36 -0
  50. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/registry.py +139 -0
  51. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/tasks/__init__.py +119 -0
  52. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/tasks/load_tasks.py +526 -0
  53. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/tasks/simple_system_tasks.py +32 -0
  54. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/tasks/system_tasks.py +279 -0
  55. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/core/config.py.j2 +119 -0
  56. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/core/constants.py +60 -0
  57. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/core/db.py +67 -0
  58. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/core/log.py +85 -0
  59. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/entrypoints/__init__.py +1 -0
  60. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/entrypoints/webserver.py +40 -0
  61. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/entrypoints/{% if cookiecutter.include_scheduler == /"yes/" %}scheduler.py{% endif %}" +21 -0
  62. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/integrations/__init__.py +0 -0
  63. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/integrations/main.py +61 -0
  64. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/py.typed +0 -0
  65. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/__init__.py +1 -0
  66. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/load_test.py +661 -0
  67. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/load_test_models.py +269 -0
  68. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/shared/__init__.py +15 -0
  69. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/shared/models.py +26 -0
  70. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/__init__.py +52 -0
  71. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/alerts.py +94 -0
  72. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/health.py.j2 +1105 -0
  73. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/models.py +169 -0
  74. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/ui.py +52 -0
  75. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docker-compose.yml.j2 +195 -0
  76. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/api.md +191 -0
  77. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/components/scheduler.md +414 -0
  78. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/development.md +215 -0
  79. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/health.md +240 -0
  80. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/javascripts/mermaid-config.js +62 -0
  81. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/stylesheets/mermaid.css +95 -0
  82. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/mkdocs.yml.j2 +62 -0
  83. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/pyproject.toml.j2 +156 -0
  84. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/scripts/entrypoint.sh +87 -0
  85. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/scripts/entrypoint.sh.j2 +104 -0
  86. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/scripts/gen_docs.py +16 -0
  87. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/api/__init__.py +1 -0
  88. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/api/test_health_endpoints.py.j2 +239 -0
  89. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/components/test_scheduler.py +76 -0
  90. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/conftest.py.j2 +81 -0
  91. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/__init__.py +1 -0
  92. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_component_integration.py.j2 +376 -0
  93. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_health_logic.py.j2 +633 -0
  94. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_load_test_models.py +665 -0
  95. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_load_test_service.py +602 -0
  96. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_system_service.py +96 -0
  97. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_worker_health_registration.py.j2 +224 -0
  98. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/test_core.py +50 -0
  99. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/uv.lock +1673 -0
  100. aegis_stack-0.1.0.dist-info/METADATA +114 -0
  101. aegis_stack-0.1.0.dist-info/RECORD +103 -0
  102. aegis_stack-0.1.0.dist-info/WHEEL +4 -0
  103. aegis_stack-0.1.0.dist-info/entry_points.txt +2 -0
@@ -0,0 +1,279 @@
1
+ """
2
+ System and orchestration tasks.
3
+
4
+ Contains the load test orchestrator which spawns many tasks to measure queue throughput.
5
+ """
6
+
7
+ import asyncio
8
+ from datetime import datetime
9
+ from typing import Any
10
+
11
+ from app.components.worker.constants import LoadTestTypes, TaskNames
12
+ from app.core.config import get_load_test_queue
13
+ from app.core.log import logger
14
+
15
+
16
+ async def load_test_orchestrator(
17
+ ctx: dict[str, Any],
18
+ num_tasks: int = 100,
19
+ task_type: LoadTestTypes = LoadTestTypes.CPU_INTENSIVE,
20
+ batch_size: int = 10,
21
+ delay_ms: int = 0,
22
+ target_queue: str | None = None,
23
+ **kwargs: Any,
24
+ ) -> dict[str, Any]:
25
+ """
26
+ Load test orchestrator that spawns many lightweight tasks to measure queue
27
+ throughput.
28
+
29
+ This is the new approach: instead of one task doing heavy work, we spawn
30
+ hundreds of lightweight tasks to actually stress test the queue infrastructure
31
+ and measure meaningful performance metrics like tasks/second.
32
+
33
+ Args:
34
+ num_tasks: Number of tasks to spawn for the load test
35
+ task_type: Type of worker task to spawn (cpu_intensive, io_simulation,
36
+ memory_operations)
37
+ batch_size: How many tasks to send concurrently per batch
38
+ delay_ms: Delay between batches in milliseconds
39
+ target_queue: Which queue to test (defaults to configured load_test queue)
40
+
41
+ Returns:
42
+ Comprehensive load test results with throughput metrics
43
+ """
44
+ start_time = datetime.now()
45
+ test_id = ctx.get("job_id", "unknown")
46
+
47
+ # Use configured load test queue if not specified
48
+ if target_queue is None:
49
+ target_queue = get_load_test_queue()
50
+
51
+ logger.info(
52
+ f"🚀 Starting load test orchestrator: {num_tasks} {task_type} tasks "
53
+ f"(batches of {batch_size})"
54
+ )
55
+
56
+ try:
57
+ # Import here to avoid circular imports
58
+ from app.components.worker.pools import get_queue_pool
59
+
60
+ # Get queue pool for enqueueing
61
+ pool, queue_name = await get_queue_pool(target_queue)
62
+
63
+ # Spawn tasks in batches
64
+ task_ids = []
65
+ tasks_sent = 0
66
+
67
+ for batch_start in range(0, num_tasks, batch_size):
68
+ batch_end = min(batch_start + batch_size, num_tasks)
69
+ current_batch_size = batch_end - batch_start
70
+
71
+ # Enqueue batch of tasks
72
+ # Map task type to actual function name
73
+ task_func = _get_task_function_name(task_type)
74
+
75
+ batch_jobs = []
76
+ for _ in range(current_batch_size):
77
+ job = await pool.enqueue_job(task_func, _queue_name=queue_name)
78
+ if job is not None:
79
+ batch_jobs.append(job)
80
+ task_ids.append(job.job_id)
81
+
82
+ tasks_sent += current_batch_size
83
+ logger.info(
84
+ f"📤 Sent batch: {current_batch_size} tasks "
85
+ f"(total: {tasks_sent}/{num_tasks})"
86
+ )
87
+
88
+ # Add configurable delay between batches if specified
89
+ if delay_ms > 0 and batch_end < num_tasks:
90
+ await asyncio.sleep(delay_ms / 1000.0)
91
+
92
+ # Don't close the pool yet - we need it for monitoring!
93
+ # await pool.aclose()
94
+
95
+ logger.info(f"✅ All {tasks_sent} tasks enqueued to {queue_name}")
96
+
97
+ # Monitor task completion with timeout based on queue configuration
98
+ from app.core.config import settings
99
+
100
+ from app.components.worker.registry import get_queue_metadata
101
+ queue_metadata = get_queue_metadata(target_queue)
102
+ monitor_timeout = queue_metadata.get("timeout", 300) # Use queue's timeout
103
+
104
+ logger.info(f"⏱️ Monitoring task completion (timeout: {monitor_timeout}s)...")
105
+
106
+ completion_result = await _monitor_task_completion(
107
+ task_ids=task_ids,
108
+ pool=pool,
109
+ expected_tasks=tasks_sent,
110
+ timeout_seconds=monitor_timeout, # Use configured timeout
111
+ )
112
+
113
+ # NOW we can close the pool after monitoring is done
114
+ await pool.aclose()
115
+
116
+ end_time = datetime.now()
117
+ total_duration = (end_time - start_time).total_seconds()
118
+
119
+ # Combine orchestrator stats with completion monitoring
120
+ result = {
121
+ "test_id": test_id,
122
+ "task_type": task_type.value,
123
+ "tasks_sent": tasks_sent,
124
+ "task_ids": task_ids[:10], # Sample of IDs for debugging
125
+ "batch_size": batch_size,
126
+ "delay_ms": delay_ms,
127
+ "target_queue": target_queue,
128
+ "start_time": start_time.isoformat(),
129
+ "end_time": end_time.isoformat(),
130
+ "total_duration_seconds": round(total_duration, 2),
131
+ **completion_result, # Merge in the monitoring results
132
+ }
133
+
134
+ # Calculate overall throughput based on completed tasks
135
+ if result.get("tasks_completed", 0) > 0:
136
+ result["overall_throughput_per_second"] = round(
137
+ result["tasks_completed"] / total_duration, 2
138
+ )
139
+ else:
140
+ result["overall_throughput_per_second"] = 0
141
+
142
+ logger.info(
143
+ f"🏁 Load test complete: {result['tasks_completed']}/{tasks_sent} "
144
+ f"tasks in {total_duration:.1f}s"
145
+ )
146
+ logger.info(
147
+ f"📈 Throughput: {result['overall_throughput_per_second']} tasks/sec"
148
+ )
149
+
150
+ return result
151
+
152
+ except Exception as e:
153
+ logger.error(f"Load test orchestrator failed: {e}")
154
+ return {"test_id": test_id, "error": str(e), "tasks_sent": tasks_sent}
155
+
156
+
157
+ def _get_task_function_name(task_type: LoadTestTypes) -> str:
158
+ """Map task type to actual function name."""
159
+ task_map = {
160
+ LoadTestTypes.CPU_INTENSIVE: TaskNames.CPU_INTENSIVE_TASK,
161
+ LoadTestTypes.IO_SIMULATION: TaskNames.IO_SIMULATION_TASK,
162
+ LoadTestTypes.MEMORY_OPERATIONS: TaskNames.MEMORY_OPERATIONS_TASK,
163
+ LoadTestTypes.FAILURE_TESTING: TaskNames.FAILURE_TESTING_TASK,
164
+ }
165
+ return task_map.get(task_type, TaskNames.CPU_INTENSIVE_TASK)
166
+
167
+
168
+ async def _monitor_task_completion(
169
+ task_ids: list[str],
170
+ pool: Any,
171
+ expected_tasks: int,
172
+ timeout_seconds: int = 300,
173
+ poll_interval: float = 2.0,
174
+ ) -> dict[str, Any]:
175
+ """
176
+ Monitor task completion by checking job results directly.
177
+
178
+ This avoids Redis queue type errors by tracking job completion
179
+ instead of trying to read queue internals.
180
+ """
181
+ start_monitor = datetime.now()
182
+ tasks_completed = 0
183
+ tasks_failed = 0
184
+ last_progress_time = start_monitor
185
+ last_completed = 0
186
+
187
+ # Track which task IDs we've seen complete
188
+ completed_ids: set[str] = set()
189
+ failed_ids: set[str] = set()
190
+
191
+ try:
192
+ while True:
193
+ # Check each task ID for completion
194
+ for task_id in task_ids:
195
+ if task_id in completed_ids or task_id in failed_ids:
196
+ continue # Already processed
197
+
198
+ # Check if job result exists
199
+ result_key = f"arq:result:{task_id}"
200
+ result_data = await pool.get(result_key)
201
+
202
+ if result_data:
203
+ # Job completed - check if it succeeded or failed
204
+ try:
205
+ # arq stores results as msgpack, but we can check existence
206
+ completed_ids.add(task_id)
207
+ tasks_completed += 1
208
+ except Exception:
209
+ # If we can't parse, assume it completed
210
+ completed_ids.add(task_id)
211
+ tasks_completed += 1
212
+
213
+ tasks_done = tasks_completed + tasks_failed
214
+
215
+ # Calculate throughput
216
+ elapsed = (datetime.now() - start_monitor).total_seconds()
217
+ throughput = tasks_completed / elapsed if elapsed > 0 else 0
218
+
219
+ # Check if we're making progress
220
+ if tasks_completed > last_completed:
221
+ last_progress_time = datetime.now()
222
+ last_completed = tasks_completed
223
+
224
+ # Progress logging (less verbose)
225
+ progress_pct = (
226
+ (tasks_done / expected_tasks * 100) if expected_tasks > 0 else 0
227
+ )
228
+ if (
229
+ tasks_done % 10 == 0 or tasks_done == expected_tasks
230
+ ): # Log every 10 tasks or at completion
231
+ logger.info(
232
+ f"📈 Progress: {tasks_done}/{expected_tasks} "
233
+ f"({progress_pct:.0f}% - completed: {tasks_completed}, "
234
+ f"failed: {tasks_failed}) throughput: {throughput:.1f} tasks/sec"
235
+ )
236
+
237
+ # Check completion
238
+ if tasks_done >= expected_tasks:
239
+ logger.info(
240
+ f"✅ All tasks completed: {tasks_completed} success, "
241
+ f"{tasks_failed} failed"
242
+ )
243
+ break
244
+
245
+ # Check timeout
246
+ if elapsed > timeout_seconds:
247
+ logger.warning(f"⏱️ Load test timed out after {timeout_seconds}s")
248
+ break
249
+
250
+ # Check if we're stuck (no progress for 30 seconds)
251
+ stuck_duration = (datetime.now() - last_progress_time).total_seconds()
252
+ if stuck_duration > 30 and tasks_done > 0:
253
+ logger.warning(
254
+ f"⚠️ No progress for {stuck_duration:.0f}s, stopping monitor"
255
+ )
256
+ break
257
+
258
+ await asyncio.sleep(poll_interval)
259
+
260
+ except Exception as e:
261
+ logger.error(f"Task monitoring error: {e}")
262
+
263
+ # Final metrics
264
+ final_elapsed = (datetime.now() - start_monitor).total_seconds()
265
+
266
+ return {
267
+ "tasks_completed": tasks_completed,
268
+ "tasks_failed": tasks_failed,
269
+ "monitor_duration_seconds": round(final_elapsed, 2),
270
+ "average_throughput_per_second": round(tasks_completed / final_elapsed, 2)
271
+ if final_elapsed > 0
272
+ else 0,
273
+ "completion_percentage": round((tasks_completed / expected_tasks * 100), 1)
274
+ if expected_tasks > 0
275
+ else 0,
276
+ "failure_rate_percent": round((tasks_failed / expected_tasks * 100), 1)
277
+ if expected_tasks > 0
278
+ else 0,
279
+ }
@@ -0,0 +1,119 @@
1
+ # app/core/config.py
2
+ """
3
+ Application configuration management using Pydantic's BaseSettings.
4
+
5
+ This module centralizes application settings, allowing them to be loaded
6
+ from environment variables for easy configuration in different environments.
7
+ """
8
+
9
+ from typing import Any
10
+
11
+ from pydantic_settings import BaseSettings, SettingsConfigDict
12
+
13
+
14
+ class Settings(BaseSettings):
15
+ """
16
+ Defines application settings.
17
+ `model_config` is used to specify that settings should be loaded from a .env file.
18
+ """
19
+
20
+ # Application environment: "dev" or "prod"
21
+ APP_ENV: str = "dev"
22
+
23
+ # Log level for the application
24
+ LOG_LEVEL: str = "INFO"
25
+
26
+ # Port for the web server
27
+ PORT: int = 8000
28
+
29
+ # Development settings
30
+ AUTO_RELOAD: bool = False
31
+
32
+ # Docker settings (used by docker-compose)
33
+ AEGIS_STACK_TAG: str = "aegis-stack:latest"
34
+ AEGIS_STACK_VERSION: str = "dev"
35
+
36
+ # Health monitoring and alerting
37
+ HEALTH_CHECK_ENABLED: bool = True
38
+ HEALTH_CHECK_INTERVAL_MINUTES: int = 5
39
+
40
+ # Health check performance settings
41
+ HEALTH_CHECK_TIMEOUT_SECONDS: float = 2.0
42
+ SYSTEM_METRICS_CACHE_SECONDS: int = 5
43
+
44
+ # Basic alerting configuration
45
+ ALERTING_ENABLED: bool = False
46
+ ALERT_COOLDOWN_MINUTES: int = 60 # Minutes between repeated alerts for same issue
47
+
48
+ # Health check thresholds
49
+ MEMORY_THRESHOLD_PERCENT: float = 90.0
50
+ DISK_THRESHOLD_PERCENT: float = 85.0
51
+ CPU_THRESHOLD_PERCENT: float = 95.0
52
+
53
+ {% if cookiecutter.include_redis == "yes" %}
54
+ # Redis settings for arq background tasks
55
+ REDIS_URL: str = "redis://localhost:6379"
56
+ REDIS_DB: int = 0
57
+ {% endif %}
58
+
59
+ {% if cookiecutter.include_worker == "yes" %}
60
+ # arq worker settings (shared across all workers)
61
+ WORKER_KEEP_RESULT_SECONDS: int = 3600 # Keep job results for 1 hour
62
+ WORKER_MAX_TRIES: int = 3
63
+
64
+ # PURE ARQ IMPLEMENTATION - NO CONFIGURATION NEEDED!
65
+ # Worker configuration comes from individual WorkerSettings classes
66
+ # in app/components/worker/queues/ - just import and use as arq intended!
67
+ {% endif %}
68
+
69
+ {% if cookiecutter.include_database == "yes" %}
70
+ # Database settings (SQLite)
71
+ DATABASE_URL: str = "sqlite:///./data/app.db"
72
+ DATABASE_ENGINE_ECHO: bool = False
73
+ DATABASE_CONNECT_ARGS: dict[str, Any] = {"check_same_thread": False}
74
+ {% endif %}
75
+
76
+ model_config = SettingsConfigDict(env_file=".env", env_file_encoding="utf-8")
77
+
78
+
79
+ settings = Settings()
80
+
81
+
82
+ {% if cookiecutter.include_worker == "yes" %}
83
+ # Pure arq queue helper functions - use dynamic discovery
84
+ def get_available_queues() -> list[str]:
85
+ """Get all available queue names via dynamic discovery."""
86
+ try:
87
+ from app.components.worker.registry import discover_worker_queues
88
+ queues: list[str] = discover_worker_queues()
89
+ return queues
90
+ except ImportError:
91
+ # Worker components not available
92
+ return []
93
+
94
+
95
+ def get_default_queue() -> str:
96
+ """Get the default queue name for load testing."""
97
+ # Prefer load_test queue if it exists, otherwise use first available
98
+ available = get_available_queues()
99
+ if "load_test" in available:
100
+ return "load_test"
101
+ return available[0] if available else "system"
102
+
103
+
104
+ def get_load_test_queue() -> str:
105
+ """Get the queue name for load testing."""
106
+ available = get_available_queues()
107
+ return "load_test" if "load_test" in available else get_default_queue()
108
+
109
+
110
+ def is_valid_queue(queue_name: str) -> bool:
111
+ """Check if a queue name is valid."""
112
+ try:
113
+ from app.components.worker.registry import validate_queue_name
114
+ result: bool = validate_queue_name(queue_name)
115
+ return result
116
+ except ImportError:
117
+ # Worker components not available, no queues are valid
118
+ return False
119
+ {% endif %}
@@ -0,0 +1,60 @@
1
+ """
2
+ Application constants.
3
+
4
+ This module contains truly immutable values that never change across environments.
5
+ For environment-dependent configuration, see app.core.config.
6
+
7
+ Following 12-Factor App principles:
8
+ - Constants = code (version controlled, immutable across deployments)
9
+ - Configuration = environment (varies between dev/staging/production)
10
+ """
11
+
12
+
13
+ class APIEndpoints:
14
+ """API endpoint paths - immutable across all environments."""
15
+
16
+ HEALTH_BASIC = "/health/"
17
+ HEALTH_DETAILED = "/health/detailed"
18
+ HEALTH_DASHBOARD = "/health/dashboard"
19
+
20
+
21
+ class Defaults:
22
+ """Default values for timeouts and limits."""
23
+
24
+ # API timeouts (seconds)
25
+ API_TIMEOUT = 10.0
26
+ HEALTH_CHECK_TIMEOUT = 5.0
27
+
28
+ # Retry configuration
29
+ MAX_RETRIES = 3
30
+ RETRY_BACKOFF = 1.0
31
+
32
+ # Health check intervals (seconds)
33
+ HEALTH_CHECK_INTERVAL = 30
34
+ COMPONENT_CHECK_TIMEOUT = 2.0
35
+
36
+
37
+ class CLI:
38
+ """CLI-specific constants."""
39
+
40
+ # Display limits
41
+ MAX_METADATA_DISPLAY_LENGTH = 30
42
+
43
+ # Output formatting
44
+ HEALTH_PERCENTAGE_DECIMALS = 1
45
+ TIMESTAMP_FORMAT = "%Y-%m-%d %H:%M:%S"
46
+
47
+
48
+ class HTTP:
49
+ """HTTP-related constants."""
50
+
51
+ # Status codes we care about
52
+ OK = 200
53
+ SERVICE_UNAVAILABLE = 503
54
+ INTERNAL_SERVER_ERROR = 500
55
+
56
+ # Headers
57
+ CONTENT_TYPE_JSON = "application/json"
58
+ USER_AGENT = "AegisStack-CLI/1.0"
59
+
60
+
@@ -0,0 +1,67 @@
1
+ # app/core/db.py
2
+ """
3
+ Database configuration and session management.
4
+
5
+ This module provides SQLite database connectivity using SQLModel and SQLAlchemy.
6
+ Includes proper session management with transaction handling and foreign key support.
7
+ """
8
+
9
+ from collections.abc import Generator
10
+ from contextlib import contextmanager
11
+ from typing import Any
12
+
13
+ from sqlalchemy import create_engine, event
14
+ from sqlalchemy.orm import sessionmaker
15
+ from sqlmodel import Session
16
+
17
+ from app.core.config import settings
18
+
19
+ # Create SQLite engine with proper configuration
20
+ engine = create_engine(
21
+ settings.DATABASE_URL,
22
+ connect_args=settings.DATABASE_CONNECT_ARGS,
23
+ echo=settings.DATABASE_ENGINE_ECHO,
24
+ )
25
+
26
+
27
+ # Enable foreign key constraints for SQLite
28
+ @event.listens_for(engine, "connect")
29
+ def set_sqlite_pragma(dbapi_connection: Any, connection_record: Any) -> None:
30
+ """Enable foreign key constraints in SQLite."""
31
+ cursor = dbapi_connection.cursor()
32
+ cursor.execute("PRAGMA foreign_keys=ON")
33
+ cursor.close()
34
+
35
+
36
+ # Configure session factory with SQLModel Session
37
+ SessionLocal = sessionmaker(
38
+ class_=Session, bind=engine, autoflush=False, autocommit=False
39
+ )
40
+
41
+
42
+ @contextmanager
43
+ def db_session(autocommit: bool = True) -> Generator[Session, None, None]:
44
+ """
45
+ Database session context manager with automatic transaction handling.
46
+
47
+ Args:
48
+ autocommit: Whether to automatically commit the transaction on success
49
+
50
+ Yields:
51
+ Session: Database session instance
52
+
53
+ Example:
54
+ with db_session() as session:
55
+ # Your database operations here
56
+ result = session.query(MyModel).first()
57
+ """
58
+ db_session: Session = SessionLocal()
59
+ try:
60
+ yield db_session
61
+ if autocommit:
62
+ db_session.commit()
63
+ except Exception:
64
+ db_session.rollback()
65
+ raise
66
+ finally:
67
+ db_session.close()
@@ -0,0 +1,85 @@
1
+ # app/core/log.py
2
+ """
3
+ Core logging configuration for the application.
4
+
5
+ This module sets up structlog to provide structured, context-aware logging.
6
+ It supports both human-readable console output for development and JSON
7
+ output for production environments.
8
+ """
9
+
10
+ import logging
11
+ import sys
12
+
13
+ import structlog
14
+ from structlog.types import Processor
15
+
16
+ from app.core.config import settings
17
+
18
+ # A global logger instance for easy access throughout the application
19
+ logger: structlog.stdlib.BoundLogger = structlog.get_logger()
20
+
21
+
22
+ def setup_logging() -> None:
23
+ """
24
+ Configures logging for the entire application.
25
+
26
+ This function sets up structlog with processors for structured logging.
27
+ It routes all standard library logging through structlog to ensure
28
+ consistent log formats. The output format is determined by the APP_ENV
29
+ setting (dev-friendly console format or production-ready JSON format).
30
+ """
31
+ # Type hint for the list of processors
32
+ shared_processors: list[Processor] = [
33
+ structlog.stdlib.add_logger_name,
34
+ structlog.stdlib.add_log_level,
35
+ structlog.stdlib.PositionalArgumentsFormatter(),
36
+ structlog.processors.TimeStamper(fmt="iso"),
37
+ structlog.processors.StackInfoRenderer(),
38
+ structlog.processors.format_exc_info,
39
+ ]
40
+
41
+ # Configure structlog
42
+ structlog.configure(
43
+ processors=shared_processors
44
+ + [
45
+ # Prepare event dict for `ProcessorFormatter`.
46
+ structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
47
+ ],
48
+ logger_factory=structlog.stdlib.LoggerFactory(),
49
+ wrapper_class=structlog.stdlib.BoundLogger,
50
+ cache_logger_on_first_use=True,
51
+ )
52
+
53
+ # Define the formatter based on the environment
54
+ if settings.APP_ENV == "dev":
55
+ formatter = structlog.stdlib.ProcessorFormatter(
56
+ # The final processor formats the log entry for console output.
57
+ processor=structlog.dev.ConsoleRenderer(colors=True),
58
+ )
59
+ else:
60
+ formatter = structlog.stdlib.ProcessorFormatter(
61
+ # The final processor formats the log entry as JSON.
62
+ processor=structlog.processors.JSONRenderer(),
63
+ # Remove metadata added by ProcessorFormatter
64
+ foreign_pre_chain=shared_processors,
65
+ )
66
+
67
+ # Configure the root logger
68
+ handler = logging.StreamHandler(sys.stdout)
69
+ handler.setFormatter(formatter)
70
+ root_logger = logging.getLogger()
71
+ root_logger.addHandler(handler)
72
+ root_logger.setLevel(settings.LOG_LEVEL.upper())
73
+
74
+ # Adjust log levels for noisy third-party libraries
75
+ logging.getLogger("flet_core").setLevel(logging.INFO)
76
+ logging.getLogger("flet_runtime").setLevel(logging.INFO)
77
+ logging.getLogger("flet_fastapi").setLevel(logging.INFO)
78
+ logging.getLogger("uvicorn.access").setLevel(logging.WARNING)
79
+
80
+ log_format = "DEV" if settings.APP_ENV == "dev" else "JSON"
81
+ logger.info(
82
+ "Logging setup complete",
83
+ level=settings.LOG_LEVEL,
84
+ log_format=log_format,
85
+ )
@@ -0,0 +1 @@
1
+ # Entry points for different Aegis Stack execution modes
@@ -0,0 +1,40 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Web server entry point for Aegis Stack.
4
+ Runs FastAPI + Flet only (clean separation of concerns).
5
+ """
6
+
7
+ import uvicorn
8
+
9
+ from app.core.config import settings
10
+ from app.core.log import logger, setup_logging
11
+ from app.integrations.main import create_integrated_app
12
+
13
+
14
+ def main() -> None:
15
+ """Main webserver entry point"""
16
+ setup_logging()
17
+ logger.info("Starting Aegis Stack Web Server...")
18
+
19
+ # Run the web server
20
+ if settings.AUTO_RELOAD:
21
+ # When reload is enabled, uvicorn requires an import string
22
+ uvicorn.run(
23
+ "app.integrations.main:create_integrated_app",
24
+ factory=True,
25
+ host="0.0.0.0",
26
+ port=settings.PORT,
27
+ reload=True,
28
+ )
29
+ else:
30
+ # Use the integration layer (handles webserver hooks, service discovery, etc.)
31
+ app = create_integrated_app()
32
+ uvicorn.run(
33
+ app,
34
+ host="0.0.0.0",
35
+ port=settings.PORT,
36
+ )
37
+
38
+
39
+ if __name__ == "__main__":
40
+ main()
@@ -0,0 +1,21 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Scheduler entrypoint for {{ cookiecutter.project_name }}.
4
+
5
+ This entrypoint starts the scheduler component.
6
+ """
7
+
8
+ import asyncio
9
+
10
+ from app.components.scheduler.main import run_scheduler
11
+ from app.core.log import setup_logging
12
+
13
+
14
+ async def main() -> None:
15
+ """Main scheduler entry point"""
16
+ setup_logging()
17
+ await run_scheduler()
18
+
19
+
20
+ if __name__ == "__main__":
21
+ asyncio.run(main())