aegis-stack 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aegis-stack might be problematic. Click here for more details.

Files changed (103) hide show
  1. aegis/__init__.py +5 -0
  2. aegis/__main__.py +374 -0
  3. aegis/core/CLAUDE.md +365 -0
  4. aegis/core/__init__.py +6 -0
  5. aegis/core/components.py +115 -0
  6. aegis/core/dependency_resolver.py +119 -0
  7. aegis/core/template_generator.py +163 -0
  8. aegis/templates/CLAUDE.md +306 -0
  9. aegis/templates/cookiecutter-aegis-project/cookiecutter.json +27 -0
  10. aegis/templates/cookiecutter-aegis-project/hooks/post_gen_project.py +172 -0
  11. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/.dockerignore +71 -0
  12. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/.env.example.j2 +70 -0
  13. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/.gitignore +127 -0
  14. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/Dockerfile +53 -0
  15. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/Makefile +211 -0
  16. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/README.md.j2 +196 -0
  17. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/__init__.py +5 -0
  18. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/cli/__init__.py +6 -0
  19. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/cli/health.py +321 -0
  20. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/cli/load_test.py +638 -0
  21. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/cli/main.py +41 -0
  22. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/__init__.py +0 -0
  23. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/__init__.py +0 -0
  24. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/health.py +134 -0
  25. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/models.py.j2 +247 -0
  26. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/routing.py.j2 +14 -0
  27. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/tasks.py.j2 +596 -0
  28. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/hooks.py +133 -0
  29. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/main.py +16 -0
  30. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/middleware/__init__.py +1 -0
  31. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/middleware/cors.py +20 -0
  32. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/shutdown/__init__.py +1 -0
  33. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/shutdown/cleanup.py +14 -0
  34. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/startup/__init__.py +1 -0
  35. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/startup/component_health.py.j2 +190 -0
  36. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/frontend/__init__.py +0 -0
  37. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/frontend/core/__init__.py +1 -0
  38. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/frontend/core/theme.py +46 -0
  39. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/frontend/main.py +687 -0
  40. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/scheduler/__init__.py +1 -0
  41. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/scheduler/main.py +138 -0
  42. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/CLAUDE.md +213 -0
  43. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/__init__.py +6 -0
  44. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/constants.py.j2 +30 -0
  45. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/pools.py +78 -0
  46. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/queues/__init__.py +1 -0
  47. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/queues/load_test.py +48 -0
  48. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/queues/media.py +41 -0
  49. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/queues/system.py +36 -0
  50. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/registry.py +139 -0
  51. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/tasks/__init__.py +119 -0
  52. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/tasks/load_tasks.py +526 -0
  53. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/tasks/simple_system_tasks.py +32 -0
  54. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/tasks/system_tasks.py +279 -0
  55. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/core/config.py.j2 +119 -0
  56. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/core/constants.py +60 -0
  57. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/core/db.py +67 -0
  58. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/core/log.py +85 -0
  59. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/entrypoints/__init__.py +1 -0
  60. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/entrypoints/webserver.py +40 -0
  61. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/entrypoints/{% if cookiecutter.include_scheduler == /"yes/" %}scheduler.py{% endif %}" +21 -0
  62. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/integrations/__init__.py +0 -0
  63. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/integrations/main.py +61 -0
  64. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/py.typed +0 -0
  65. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/__init__.py +1 -0
  66. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/load_test.py +661 -0
  67. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/load_test_models.py +269 -0
  68. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/shared/__init__.py +15 -0
  69. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/shared/models.py +26 -0
  70. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/__init__.py +52 -0
  71. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/alerts.py +94 -0
  72. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/health.py.j2 +1105 -0
  73. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/models.py +169 -0
  74. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/ui.py +52 -0
  75. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docker-compose.yml.j2 +195 -0
  76. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/api.md +191 -0
  77. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/components/scheduler.md +414 -0
  78. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/development.md +215 -0
  79. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/health.md +240 -0
  80. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/javascripts/mermaid-config.js +62 -0
  81. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/stylesheets/mermaid.css +95 -0
  82. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/mkdocs.yml.j2 +62 -0
  83. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/pyproject.toml.j2 +156 -0
  84. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/scripts/entrypoint.sh +87 -0
  85. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/scripts/entrypoint.sh.j2 +104 -0
  86. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/scripts/gen_docs.py +16 -0
  87. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/api/__init__.py +1 -0
  88. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/api/test_health_endpoints.py.j2 +239 -0
  89. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/components/test_scheduler.py +76 -0
  90. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/conftest.py.j2 +81 -0
  91. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/__init__.py +1 -0
  92. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_component_integration.py.j2 +376 -0
  93. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_health_logic.py.j2 +633 -0
  94. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_load_test_models.py +665 -0
  95. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_load_test_service.py +602 -0
  96. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_system_service.py +96 -0
  97. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_worker_health_registration.py.j2 +224 -0
  98. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/test_core.py +50 -0
  99. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/uv.lock +1673 -0
  100. aegis_stack-0.1.0.dist-info/METADATA +114 -0
  101. aegis_stack-0.1.0.dist-info/RECORD +103 -0
  102. aegis_stack-0.1.0.dist-info/WHEEL +4 -0
  103. aegis_stack-0.1.0.dist-info/entry_points.txt +2 -0
@@ -0,0 +1,661 @@
1
+ """
2
+ Load testing service module.
3
+
4
+ This module provides business logic for orchestrating and analyzing load tests,
5
+ separating concerns from API endpoints and worker tasks.
6
+ """
7
+
8
+ from typing import Any
9
+
10
+ from pydantic import ValidationError
11
+
12
+ from app.components.worker.constants import LoadTestTypes
13
+ from app.components.worker.pools import get_queue_pool
14
+ from app.core.config import get_load_test_queue
15
+ from app.core.log import logger
16
+ from app.services.load_test_models import (
17
+ LoadTestConfiguration,
18
+ LoadTestMetrics,
19
+ LoadTestResult,
20
+ OrchestratorRawResult,
21
+ PerformanceAnalysis,
22
+ TestTypeInfo,
23
+ ValidationStatus,
24
+ )
25
+
26
+ __all__ = [
27
+ "LoadTestConfiguration",
28
+ "LoadTestService",
29
+ "quick_cpu_test",
30
+ "quick_io_test",
31
+ "quick_memory_test",
32
+ ]
33
+
34
+
35
+ class LoadTestService:
36
+ """Service for managing load test operations."""
37
+
38
+ @staticmethod
39
+ def get_test_type_info(test_type: LoadTestTypes) -> dict[str, Any]:
40
+ """Get detailed information about a specific test type."""
41
+ test_info = {
42
+ LoadTestTypes.CPU_INTENSIVE: {
43
+ "name": "CPU Intensive",
44
+ "description": (
45
+ "Tests worker CPU processing with fibonacci calculations"
46
+ ),
47
+ "expected_metrics": [
48
+ "fibonacci_n",
49
+ "fibonacci_result",
50
+ "cpu_operations",
51
+ ],
52
+ "performance_signature": (
53
+ "CPU bound - should show computation time scaling with problem size"
54
+ ),
55
+ "typical_duration_ms": "1-10ms per task",
56
+ "concurrency_impact": (
57
+ "Limited by CPU cores, benefits from parallel processing"
58
+ ),
59
+ "validation_keys": ["fibonacci_n", "fibonacci_result"],
60
+ },
61
+ LoadTestTypes.IO_SIMULATION: {
62
+ "name": "I/O Simulation",
63
+ "description": "Tests async I/O handling with simulated delays",
64
+ "expected_metrics": [
65
+ "simulated_delay_ms",
66
+ "io_operations",
67
+ "async_operations",
68
+ ],
69
+ "performance_signature": (
70
+ "I/O bound - should show async concurrency benefits"
71
+ ),
72
+ "typical_duration_ms": (
73
+ "5-30ms per task (includes simulated delays)"
74
+ ),
75
+ "concurrency_impact": (
76
+ "Excellent with async - many tasks can run concurrently"
77
+ ),
78
+ "validation_keys": ["simulated_delay_ms", "io_operations"],
79
+ },
80
+ LoadTestTypes.MEMORY_OPERATIONS: {
81
+ "name": "Memory Operations",
82
+ "description": "Tests memory allocation and data structure operations",
83
+ "expected_metrics": [
84
+ "allocation_size",
85
+ "list_sum",
86
+ "dict_keys",
87
+ "max_value",
88
+ ],
89
+ "performance_signature": (
90
+ "Memory bound - should show allocation/deallocation patterns"
91
+ ),
92
+ "typical_duration_ms": "1-5ms per task",
93
+ "concurrency_impact": (
94
+ "Moderate - limited by memory bandwidth and GC pressure"
95
+ ),
96
+ "validation_keys": [
97
+ "allocation_size",
98
+ "list_sum",
99
+ "dict_keys",
100
+ ],
101
+ },
102
+ LoadTestTypes.FAILURE_TESTING: {
103
+ "name": "Failure Testing",
104
+ "description": "Tests error handling with ~20% random failures",
105
+ "expected_metrics": ["failure_rate", "error_types"],
106
+ "performance_signature": (
107
+ "Mixed - tests resilience and error handling paths"
108
+ ),
109
+ "typical_duration_ms": "1-10ms per task (when successful)",
110
+ "concurrency_impact": (
111
+ "Tests worker recovery and error isolation"
112
+ ),
113
+ "validation_keys": ["status"],
114
+ },
115
+ }
116
+ return test_info.get(test_type, {})
117
+
118
+ @staticmethod
119
+ async def enqueue_load_test(config: LoadTestConfiguration) -> str:
120
+ """
121
+ Enqueue a load test orchestrator task.
122
+
123
+ Args:
124
+ config: Load test configuration
125
+
126
+ Returns:
127
+ Task ID for the orchestrator job
128
+ """
129
+ from app.components.worker.pools import get_queue_pool
130
+
131
+ logger.info(
132
+ f"🚀 Enqueueing load test: {config.num_tasks} {config.task_type} tasks"
133
+ )
134
+
135
+ # Get appropriate queue pool
136
+ pool, queue_name = await get_queue_pool(config.target_queue)
137
+
138
+ try:
139
+ # Enqueue the orchestrator task
140
+ job = await pool.enqueue_job(
141
+ "load_test_orchestrator",
142
+ _queue_name=queue_name,
143
+ **config.model_dump(),
144
+ )
145
+
146
+ await pool.aclose()
147
+
148
+ if job is None:
149
+ raise RuntimeError("Failed to enqueue job - returned None")
150
+
151
+ logger.info(f"✅ Load test orchestrator enqueued: {job.job_id}")
152
+ return str(job.job_id)
153
+
154
+ except Exception as e:
155
+ await pool.aclose()
156
+ logger.error(f"❌ Failed to enqueue load test: {e}")
157
+ raise
158
+
159
+ @staticmethod
160
+ async def get_load_test_result(
161
+ task_id: str, target_queue: str | None = None
162
+ ) -> dict[str, Any] | None:
163
+ """
164
+ Retrieve and analyze load test results.
165
+
166
+ Args:
167
+ task_id: The orchestrator task ID
168
+ target_queue: Queue where the test was run (defaults to configured
169
+ load_test queue)
170
+
171
+ Returns:
172
+ Analyzed load test results or None if not found
173
+ """
174
+ # Use configured load test queue if not specified
175
+ if target_queue is None:
176
+ target_queue = get_load_test_queue()
177
+
178
+ pool = None
179
+ try:
180
+ pool, _ = await get_queue_pool(target_queue)
181
+ # Check if result exists
182
+ result_key = f"arq:result:{task_id}"
183
+ result_exists = await pool.exists(result_key)
184
+
185
+ if not result_exists:
186
+ return None
187
+
188
+ # Get the result data
189
+ result_data = await pool.get(result_key)
190
+ if not result_data:
191
+ return None
192
+
193
+ # Deserialize the result
194
+ import pickle
195
+
196
+ result = pickle.loads(result_data)
197
+
198
+ # Handle different result formats
199
+ if isinstance(result, Exception):
200
+ # Task failed completely
201
+ return {
202
+ "task": "load_test_orchestrator",
203
+ "status": "failed",
204
+ "error": str(result),
205
+ "test_id": task_id,
206
+ }
207
+ elif isinstance(result, dict):
208
+ # Check if it's a direct load test result
209
+ if result.get("task") == "load_test_orchestrator":
210
+ analyzed_result = LoadTestService._analyze_load_test_result(
211
+ result
212
+ )
213
+ return analyzed_result.model_dump()
214
+ # Check if it's an arq job result with embedded data
215
+ elif "r" in result and isinstance(result["r"], dict):
216
+ # Extract the actual result
217
+ actual_result = result["r"]
218
+ # Check if this looks like a load test orchestrator result
219
+ if (
220
+ "test_id" in actual_result
221
+ and "task_type" in actual_result
222
+ and "tasks_sent" in actual_result
223
+ ):
224
+ try:
225
+ # Validate and transform using Pydantic models
226
+ orchestrator_result = OrchestratorRawResult(
227
+ **actual_result
228
+ )
229
+ load_test_result = (
230
+ orchestrator_result.to_load_test_result()
231
+ )
232
+ analyzed_result = LoadTestService._analyze_load_test_result(
233
+ load_test_result
234
+ )
235
+ return analyzed_result.model_dump()
236
+ except ValidationError as e:
237
+ logger.error(
238
+ f"Failed to validate orchestrator result: {e}"
239
+ )
240
+ # Fall back to manual transformation if validation fails
241
+ transformed_result = (
242
+ LoadTestService._transform_orchestrator_result(
243
+ actual_result
244
+ )
245
+ )
246
+ analyzed_result = LoadTestService._analyze_load_test_result(
247
+ transformed_result
248
+ )
249
+ return analyzed_result.model_dump()
250
+ elif actual_result.get("task") == "load_test_orchestrator":
251
+ analyzed_result = LoadTestService._analyze_load_test_result(
252
+ actual_result
253
+ )
254
+ return analyzed_result.model_dump()
255
+ elif "r" in result and isinstance(result["r"], Exception):
256
+ # Task timed out or failed
257
+ return {
258
+ "task": "load_test_orchestrator",
259
+ "status": "timed_out",
260
+ "error": str(result["r"]),
261
+ "test_id": task_id,
262
+ "partial_info": (
263
+ "Task may have completed work but timed out at "
264
+ "orchestrator level"
265
+ ),
266
+ }
267
+
268
+ # result is already dict[str, Any] at this point
269
+ return result # type: ignore[no-any-return]
270
+
271
+ except Exception as e:
272
+ logger.error(f"Failed to get load test result for {task_id}: {e}")
273
+ return None
274
+ finally:
275
+ if pool is not None:
276
+ await pool.aclose()
277
+
278
+ @staticmethod
279
+ def _transform_orchestrator_result(
280
+ orchestrator_result: dict[str, Any],
281
+ ) -> dict[str, Any]:
282
+ """Transform orchestrator result to expected analysis format."""
283
+
284
+ # Create the configuration object from orchestrator parameters
285
+ configuration = {
286
+ "task_type": orchestrator_result.get("task_type", "unknown"),
287
+ "num_tasks": orchestrator_result.get("tasks_sent", 0),
288
+ "batch_size": orchestrator_result.get("batch_size", 0),
289
+ "delay_ms": orchestrator_result.get("delay_ms", 0),
290
+ "target_queue": orchestrator_result.get("target_queue", "unknown"),
291
+ }
292
+
293
+ # Create the metrics object from orchestrator result data
294
+ metrics = {
295
+ "tasks_sent": orchestrator_result.get("tasks_sent", 0),
296
+ "tasks_completed": orchestrator_result.get("tasks_completed", 0),
297
+ "tasks_failed": orchestrator_result.get("tasks_failed", 0),
298
+ "total_duration_seconds": orchestrator_result.get(
299
+ "total_duration_seconds", 0
300
+ ),
301
+ "overall_throughput": orchestrator_result.get(
302
+ "overall_throughput_per_second", 0
303
+ ),
304
+ "failure_rate_percent": orchestrator_result.get("failure_rate_percent", 0),
305
+ "completion_percentage": orchestrator_result.get(
306
+ "completion_percentage", 0
307
+ ),
308
+ "average_throughput_per_second": orchestrator_result.get(
309
+ "average_throughput_per_second", 0
310
+ ),
311
+ "monitor_duration_seconds": orchestrator_result.get(
312
+ "monitor_duration_seconds", 0
313
+ ),
314
+ }
315
+
316
+ # Create the transformed result
317
+ transformed = {
318
+ "task": "load_test_orchestrator",
319
+ "status": "completed",
320
+ "test_id": orchestrator_result.get("test_id", "unknown"),
321
+ "configuration": configuration,
322
+ "metrics": metrics,
323
+ "start_time": orchestrator_result.get("start_time"),
324
+ "end_time": orchestrator_result.get("end_time"),
325
+ "task_ids": orchestrator_result.get("task_ids", []),
326
+ }
327
+
328
+ return transformed
329
+
330
+ @staticmethod
331
+ def _analyze_load_test_result(
332
+ result: LoadTestResult | dict[str, Any],
333
+ ) -> LoadTestResult:
334
+ """Add analysis and validation to load test results."""
335
+
336
+ # Convert dict to model if needed
337
+ if isinstance(result, dict):
338
+ try:
339
+ result = LoadTestResult(**result)
340
+ except ValidationError as e:
341
+ logger.error(f"Failed to validate result as LoadTestResult: {e}")
342
+ # Return a basic error result
343
+ return LoadTestResult(
344
+ status="failed",
345
+ test_id=(
346
+ result.get("test_id", "unknown")
347
+ if isinstance(result, dict)
348
+ else "unknown"
349
+ ),
350
+ configuration=LoadTestConfiguration(
351
+ task_type=LoadTestTypes.CPU_INTENSIVE, # Safe default enum
352
+ num_tasks=10, # Minimum valid value
353
+ batch_size=1,
354
+ delay_ms=0,
355
+ target_queue="unknown",
356
+ ),
357
+ metrics=LoadTestMetrics(
358
+ tasks_sent=0,
359
+ tasks_completed=0,
360
+ tasks_failed=0,
361
+ total_duration_seconds=0.0,
362
+ overall_throughput=0.0,
363
+ failure_rate_percent=0.0,
364
+ completion_percentage=0.0,
365
+ average_throughput_per_second=0.0,
366
+ monitor_duration_seconds=0.0
367
+ ),
368
+ start_time=None,
369
+ end_time=None,
370
+ error=f"Validation failed: {e}",
371
+ analysis=None,
372
+ )
373
+
374
+ task_type = result.configuration.task_type
375
+
376
+ # Get expected characteristics for this test type
377
+ # Validate task type against known types
378
+ if task_type not in [
379
+ LoadTestTypes.CPU_INTENSIVE,
380
+ LoadTestTypes.IO_SIMULATION,
381
+ LoadTestTypes.MEMORY_OPERATIONS,
382
+ LoadTestTypes.FAILURE_TESTING,
383
+ ]:
384
+ task_type = LoadTestTypes.CPU_INTENSIVE # Default fallback
385
+
386
+ test_info_dict = LoadTestService.get_test_type_info(task_type)
387
+ test_info = TestTypeInfo(**test_info_dict)
388
+
389
+ # Create analysis components
390
+ performance_analysis = LoadTestService._analyze_performance_pydantic(result)
391
+ validation_status = LoadTestService._validate_test_execution_pydantic(
392
+ result, test_info
393
+ )
394
+ recommendations = LoadTestService._generate_recommendations_pydantic(result)
395
+
396
+ # Add analysis to result
397
+ from app.services.load_test_models import LoadTestAnalysis
398
+
399
+ analysis = LoadTestAnalysis(
400
+ test_type_info=test_info,
401
+ performance_analysis=performance_analysis,
402
+ validation_status=validation_status,
403
+ recommendations=recommendations,
404
+ )
405
+
406
+ result.analysis = analysis
407
+ return result
408
+
409
+ @staticmethod
410
+ def _analyze_performance(result: dict[str, Any]) -> dict[str, Any]:
411
+ """Analyze performance characteristics of the load test."""
412
+ metrics = result.get("metrics", {})
413
+
414
+ analysis = {
415
+ "throughput_rating": "unknown",
416
+ "efficiency_rating": "unknown",
417
+ "queue_pressure": "unknown",
418
+ }
419
+
420
+ # Analyze throughput
421
+ throughput = metrics.get("overall_throughput", 0)
422
+ if throughput >= 50:
423
+ analysis["throughput_rating"] = "excellent"
424
+ elif throughput >= 20:
425
+ analysis["throughput_rating"] = "good"
426
+ elif throughput >= 10:
427
+ analysis["throughput_rating"] = "fair"
428
+ else:
429
+ analysis["throughput_rating"] = "poor"
430
+
431
+ # Analyze efficiency (completion rate)
432
+ tasks_sent = metrics.get("tasks_sent", 1)
433
+ tasks_completed = metrics.get("tasks_completed", 0)
434
+ completion_rate = (
435
+ (tasks_completed / tasks_sent) * 100 if tasks_sent > 0 else 0
436
+ )
437
+
438
+ if completion_rate >= 95:
439
+ analysis["efficiency_rating"] = "excellent"
440
+ elif completion_rate >= 90:
441
+ analysis["efficiency_rating"] = "good"
442
+ elif completion_rate >= 80:
443
+ analysis["efficiency_rating"] = "fair"
444
+ else:
445
+ analysis["efficiency_rating"] = "poor"
446
+
447
+ # Analyze queue pressure (based on duration vs expected)
448
+ duration = metrics.get("total_duration_seconds", 0)
449
+ if duration > 60:
450
+ analysis["queue_pressure"] = "high"
451
+ elif duration > 30:
452
+ analysis["queue_pressure"] = "medium"
453
+ else:
454
+ analysis["queue_pressure"] = "low"
455
+
456
+ return analysis
457
+
458
+ @staticmethod
459
+ def _validate_test_execution(
460
+ result: dict[str, Any], test_info: dict[str, Any]
461
+ ) -> dict[str, Any]:
462
+ """Validate that the test executed as expected."""
463
+ validation: dict[str, Any] = {
464
+ "test_type_verified": False,
465
+ "expected_metrics_present": False,
466
+ "performance_signature_match": "unknown",
467
+ "issues": [],
468
+ }
469
+
470
+ # This would need actual task result inspection to verify test type
471
+ # For now, we assume the test executed correctly if it completed
472
+ status = result.get("status", "unknown")
473
+ if status == "completed":
474
+ validation["test_type_verified"] = True
475
+ validation["expected_metrics_present"] = True
476
+ validation["performance_signature_match"] = "verified"
477
+ else:
478
+ validation["issues"].append(f"Test status: {status}")
479
+
480
+ return validation
481
+
482
+ @staticmethod
483
+ def _generate_recommendations(result: dict[str, Any]) -> list[str]:
484
+ """Generate recommendations based on test results."""
485
+ recommendations = []
486
+
487
+ metrics = result.get("metrics", {})
488
+ throughput = metrics.get("overall_throughput", 0)
489
+ failure_rate = metrics.get("failure_rate_percent", 0)
490
+
491
+ if throughput < 10:
492
+ recommendations.append(
493
+ "Low throughput detected. Consider reducing task complexity or "
494
+ "increasing worker concurrency."
495
+ )
496
+
497
+ if failure_rate > 5:
498
+ recommendations.append(
499
+ f"High failure rate ({failure_rate:.1f}%). Check worker logs "
500
+ f"for error patterns."
501
+ )
502
+
503
+ duration = metrics.get("total_duration_seconds", 0)
504
+ tasks_sent = metrics.get("tasks_sent", 1)
505
+
506
+ if duration > 60 and tasks_sent < 200:
507
+ recommendations.append(
508
+ "Long execution time for relatively few tasks suggests queue "
509
+ "saturation. Consider testing with smaller batches or "
510
+ "different queues."
511
+ )
512
+
513
+ return recommendations
514
+
515
+ @staticmethod
516
+ def _analyze_performance_pydantic(result: LoadTestResult) -> PerformanceAnalysis:
517
+ """Analyze performance characteristics using Pydantic models."""
518
+
519
+ # Analyze throughput
520
+ throughput = result.metrics.overall_throughput
521
+ if throughput >= 50:
522
+ throughput_rating = "excellent"
523
+ elif throughput >= 20:
524
+ throughput_rating = "good"
525
+ elif throughput >= 10:
526
+ throughput_rating = "fair"
527
+ else:
528
+ throughput_rating = "poor"
529
+
530
+ # Analyze efficiency (completion rate)
531
+ tasks_sent = result.metrics.tasks_sent
532
+ tasks_completed = result.metrics.tasks_completed
533
+ completion_rate = (
534
+ (tasks_completed / tasks_sent) * 100 if tasks_sent > 0 else 0
535
+ )
536
+
537
+ if completion_rate >= 95:
538
+ efficiency_rating = "excellent"
539
+ elif completion_rate >= 90:
540
+ efficiency_rating = "good"
541
+ elif completion_rate >= 80:
542
+ efficiency_rating = "fair"
543
+ else:
544
+ efficiency_rating = "poor"
545
+
546
+ # Analyze queue pressure (based on duration vs expected)
547
+ duration = result.metrics.total_duration_seconds
548
+ if duration > 60:
549
+ queue_pressure = "high"
550
+ elif duration > 30:
551
+ queue_pressure = "medium"
552
+ else:
553
+ queue_pressure = "low"
554
+
555
+ return PerformanceAnalysis(
556
+ throughput_rating=throughput_rating,
557
+ efficiency_rating=efficiency_rating,
558
+ queue_pressure=queue_pressure,
559
+ )
560
+
561
+ @staticmethod
562
+ def _validate_test_execution_pydantic(
563
+ result: LoadTestResult, test_info: TestTypeInfo
564
+ ) -> ValidationStatus:
565
+ """Validate test execution using Pydantic models."""
566
+
567
+ issues = []
568
+
569
+ # Basic validation - if we got here, the test at least completed
570
+ test_type_verified = result.status == "completed"
571
+ expected_metrics_present = result.status == "completed"
572
+
573
+ if result.status == "completed":
574
+ performance_signature_match = "verified"
575
+ else:
576
+ performance_signature_match = "unknown"
577
+ issues.append(f"Test status: {result.status}")
578
+
579
+ # Additional validation based on metrics
580
+ if result.metrics.tasks_completed == 0 and result.metrics.tasks_sent > 0:
581
+ issues.append("No tasks completed despite tasks being sent")
582
+
583
+ if result.metrics.failure_rate_percent > 50:
584
+ issues.append(
585
+ f"High failure rate: {result.metrics.failure_rate_percent:.1f}%"
586
+ )
587
+
588
+ return ValidationStatus(
589
+ test_type_verified=test_type_verified,
590
+ expected_metrics_present=expected_metrics_present,
591
+ performance_signature_match=performance_signature_match,
592
+ issues=issues,
593
+ )
594
+
595
+ @staticmethod
596
+ def _generate_recommendations_pydantic(result: LoadTestResult) -> list[str]:
597
+ """Generate recommendations using Pydantic models."""
598
+
599
+ recommendations = []
600
+
601
+ throughput = result.metrics.overall_throughput
602
+ failure_rate = result.metrics.failure_rate_percent
603
+
604
+ if throughput < 10:
605
+ recommendations.append(
606
+ "Low throughput detected. Consider reducing task complexity "
607
+ "or increasing worker concurrency."
608
+ )
609
+
610
+ if failure_rate > 5:
611
+ recommendations.append(
612
+ f"High failure rate ({failure_rate:.1f}%). Check worker logs "
613
+ f"for error patterns."
614
+ )
615
+
616
+ duration = result.metrics.total_duration_seconds
617
+ tasks_sent = result.metrics.tasks_sent
618
+
619
+ if duration > 60 and tasks_sent < 200:
620
+ recommendations.append(
621
+ "Long execution time for relatively few tasks suggests queue "
622
+ "saturation. Consider testing with smaller batches or "
623
+ "different queues."
624
+ )
625
+
626
+ return recommendations
627
+
628
+
629
+ # Convenience functions for common load test patterns
630
+ async def quick_cpu_test(num_tasks: int = 50) -> str:
631
+ """Quick CPU load test with sensible defaults."""
632
+ config = LoadTestConfiguration(
633
+ num_tasks=num_tasks,
634
+ task_type=LoadTestTypes.CPU_INTENSIVE,
635
+ batch_size=10,
636
+ target_queue=get_load_test_queue(),
637
+ )
638
+ return await LoadTestService.enqueue_load_test(config)
639
+
640
+
641
+ async def quick_io_test(num_tasks: int = 100) -> str:
642
+ """Quick I/O load test with sensible defaults."""
643
+ config = LoadTestConfiguration(
644
+ num_tasks=num_tasks,
645
+ task_type=LoadTestTypes.IO_SIMULATION,
646
+ batch_size=20,
647
+ delay_ms=50,
648
+ target_queue=get_load_test_queue(),
649
+ )
650
+ return await LoadTestService.enqueue_load_test(config)
651
+
652
+
653
+ async def quick_memory_test(num_tasks: int = 200) -> str:
654
+ """Quick memory load test with sensible defaults."""
655
+ config = LoadTestConfiguration(
656
+ num_tasks=num_tasks,
657
+ task_type=LoadTestTypes.MEMORY_OPERATIONS,
658
+ batch_size=25,
659
+ target_queue=get_load_test_queue(),
660
+ )
661
+ return await LoadTestService.enqueue_load_test(config)