aegis-stack 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aegis-stack might be problematic. Click here for more details.

Files changed (103) hide show
  1. aegis/__init__.py +5 -0
  2. aegis/__main__.py +374 -0
  3. aegis/core/CLAUDE.md +365 -0
  4. aegis/core/__init__.py +6 -0
  5. aegis/core/components.py +115 -0
  6. aegis/core/dependency_resolver.py +119 -0
  7. aegis/core/template_generator.py +163 -0
  8. aegis/templates/CLAUDE.md +306 -0
  9. aegis/templates/cookiecutter-aegis-project/cookiecutter.json +27 -0
  10. aegis/templates/cookiecutter-aegis-project/hooks/post_gen_project.py +172 -0
  11. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/.dockerignore +71 -0
  12. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/.env.example.j2 +70 -0
  13. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/.gitignore +127 -0
  14. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/Dockerfile +53 -0
  15. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/Makefile +211 -0
  16. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/README.md.j2 +196 -0
  17. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/__init__.py +5 -0
  18. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/cli/__init__.py +6 -0
  19. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/cli/health.py +321 -0
  20. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/cli/load_test.py +638 -0
  21. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/cli/main.py +41 -0
  22. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/__init__.py +0 -0
  23. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/__init__.py +0 -0
  24. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/health.py +134 -0
  25. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/models.py.j2 +247 -0
  26. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/routing.py.j2 +14 -0
  27. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/tasks.py.j2 +596 -0
  28. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/hooks.py +133 -0
  29. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/main.py +16 -0
  30. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/middleware/__init__.py +1 -0
  31. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/middleware/cors.py +20 -0
  32. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/shutdown/__init__.py +1 -0
  33. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/shutdown/cleanup.py +14 -0
  34. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/startup/__init__.py +1 -0
  35. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/startup/component_health.py.j2 +190 -0
  36. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/frontend/__init__.py +0 -0
  37. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/frontend/core/__init__.py +1 -0
  38. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/frontend/core/theme.py +46 -0
  39. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/frontend/main.py +687 -0
  40. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/scheduler/__init__.py +1 -0
  41. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/scheduler/main.py +138 -0
  42. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/CLAUDE.md +213 -0
  43. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/__init__.py +6 -0
  44. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/constants.py.j2 +30 -0
  45. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/pools.py +78 -0
  46. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/queues/__init__.py +1 -0
  47. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/queues/load_test.py +48 -0
  48. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/queues/media.py +41 -0
  49. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/queues/system.py +36 -0
  50. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/registry.py +139 -0
  51. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/tasks/__init__.py +119 -0
  52. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/tasks/load_tasks.py +526 -0
  53. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/tasks/simple_system_tasks.py +32 -0
  54. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/tasks/system_tasks.py +279 -0
  55. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/core/config.py.j2 +119 -0
  56. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/core/constants.py +60 -0
  57. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/core/db.py +67 -0
  58. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/core/log.py +85 -0
  59. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/entrypoints/__init__.py +1 -0
  60. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/entrypoints/webserver.py +40 -0
  61. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/entrypoints/{% if cookiecutter.include_scheduler == /"yes/" %}scheduler.py{% endif %}" +21 -0
  62. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/integrations/__init__.py +0 -0
  63. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/integrations/main.py +61 -0
  64. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/py.typed +0 -0
  65. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/__init__.py +1 -0
  66. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/load_test.py +661 -0
  67. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/load_test_models.py +269 -0
  68. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/shared/__init__.py +15 -0
  69. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/shared/models.py +26 -0
  70. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/__init__.py +52 -0
  71. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/alerts.py +94 -0
  72. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/health.py.j2 +1105 -0
  73. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/models.py +169 -0
  74. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/ui.py +52 -0
  75. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docker-compose.yml.j2 +195 -0
  76. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/api.md +191 -0
  77. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/components/scheduler.md +414 -0
  78. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/development.md +215 -0
  79. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/health.md +240 -0
  80. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/javascripts/mermaid-config.js +62 -0
  81. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/stylesheets/mermaid.css +95 -0
  82. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/mkdocs.yml.j2 +62 -0
  83. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/pyproject.toml.j2 +156 -0
  84. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/scripts/entrypoint.sh +87 -0
  85. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/scripts/entrypoint.sh.j2 +104 -0
  86. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/scripts/gen_docs.py +16 -0
  87. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/api/__init__.py +1 -0
  88. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/api/test_health_endpoints.py.j2 +239 -0
  89. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/components/test_scheduler.py +76 -0
  90. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/conftest.py.j2 +81 -0
  91. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/__init__.py +1 -0
  92. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_component_integration.py.j2 +376 -0
  93. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_health_logic.py.j2 +633 -0
  94. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_load_test_models.py +665 -0
  95. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_load_test_service.py +602 -0
  96. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_system_service.py +96 -0
  97. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_worker_health_registration.py.j2 +224 -0
  98. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/test_core.py +50 -0
  99. aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/uv.lock +1673 -0
  100. aegis_stack-0.1.0.dist-info/METADATA +114 -0
  101. aegis_stack-0.1.0.dist-info/RECORD +103 -0
  102. aegis_stack-0.1.0.dist-info/WHEEL +4 -0
  103. aegis_stack-0.1.0.dist-info/entry_points.txt +2 -0
@@ -0,0 +1,602 @@
1
+ """
2
+ Unit tests for LoadTestService.
3
+
4
+ Tests business logic, data transformation, and analysis functions.
5
+ """
6
+
7
+ import pickle
8
+ from unittest.mock import AsyncMock, MagicMock, patch
9
+
10
+ from pydantic import ValidationError
11
+ import pytest
12
+
13
+ from app.components.worker.constants import LoadTestTypes
14
+ from app.services.load_test import LoadTestConfiguration, LoadTestService
15
+ from app.services.load_test_models import (
16
+ LoadTestResult,
17
+ PerformanceAnalysis,
18
+ )
19
+
20
+
21
+ class TestLoadTestConfiguration:
22
+ """Test LoadTestConfiguration class (legacy config handler)."""
23
+
24
+ def test_default_configuration(self):
25
+ """Test configuration with defaults."""
26
+ config = LoadTestConfiguration()
27
+
28
+ assert config.num_tasks >= 10
29
+ assert config.num_tasks <= 10000
30
+ assert config.task_type == LoadTestTypes.CPU_INTENSIVE
31
+ assert config.batch_size >= 1
32
+ assert config.delay_ms >= 0
33
+
34
+ def test_configuration_bounds(self):
35
+ """Test configuration value bounds enforcement."""
36
+ # Test upper bounds - should raise ValidationError
37
+ with pytest.raises(ValidationError):
38
+ LoadTestConfiguration(num_tasks=50000, batch_size=200, delay_ms=10000)
39
+
40
+ # Test lower bounds - should raise ValidationError
41
+ with pytest.raises(ValidationError):
42
+ LoadTestConfiguration(num_tasks=5, batch_size=0, delay_ms=-100)
43
+
44
+ def test_to_dict(self):
45
+ """Test configuration serialization."""
46
+ config = LoadTestConfiguration(
47
+ num_tasks=100,
48
+ task_type="io_simulation",
49
+ batch_size=20,
50
+ delay_ms=50,
51
+ target_queue="test_queue",
52
+ )
53
+
54
+ result = config.model_dump()
55
+
56
+ assert result["num_tasks"] == 100
57
+ assert result["task_type"] == "io_simulation"
58
+ assert result["batch_size"] == 20
59
+ assert result["delay_ms"] == 50
60
+ assert result["target_queue"] == "test_queue"
61
+
62
+
63
+ class TestLoadTestServiceTestTypeInfo:
64
+ """Test LoadTestService.get_test_type_info method."""
65
+
66
+ def test_cpu_test_type_info(self):
67
+ """Test CPU test type information."""
68
+ info = LoadTestService.get_test_type_info(LoadTestTypes.CPU_INTENSIVE)
69
+
70
+ assert info["name"] == "CPU Intensive"
71
+ assert "fibonacci" in info["description"].lower()
72
+ assert "fibonacci_n" in info["expected_metrics"]
73
+ assert "cpu_operations" in info["expected_metrics"]
74
+ assert "cpu bound" in info["performance_signature"].lower()
75
+
76
+ def test_io_test_type_info(self):
77
+ """Test I/O test type information."""
78
+ info = LoadTestService.get_test_type_info(LoadTestTypes.IO_SIMULATION)
79
+
80
+ assert info["name"] == "I/O Simulation"
81
+ assert "async" in info["description"].lower()
82
+ assert "simulated_delay_ms" in info["expected_metrics"]
83
+ assert "io_operations" in info["expected_metrics"]
84
+ assert "i/o bound" in info["performance_signature"].lower()
85
+
86
+ def test_memory_test_type_info(self):
87
+ """Test memory test type information."""
88
+ info = LoadTestService.get_test_type_info(LoadTestTypes.MEMORY_OPERATIONS)
89
+
90
+ assert info["name"] == "Memory Operations"
91
+ assert "allocation" in info["description"].lower()
92
+ assert "allocation_size" in info["expected_metrics"]
93
+ assert "list_sum" in info["expected_metrics"]
94
+ assert "memory bound" in info["performance_signature"].lower()
95
+
96
+ def test_failure_test_type_info(self):
97
+ """Test failure test type information."""
98
+ info = LoadTestService.get_test_type_info(LoadTestTypes.FAILURE_TESTING)
99
+
100
+ assert info["name"] == "Failure Testing"
101
+ assert "error handling" in info["description"].lower()
102
+ assert "failure_rate" in info["expected_metrics"]
103
+ assert "resilience" in info["performance_signature"].lower()
104
+
105
+ def test_unknown_test_type(self):
106
+ """Test handling of unknown test types."""
107
+ info = LoadTestService.get_test_type_info("unknown_type")
108
+
109
+ assert info == {} # Should return empty dict for unknown types
110
+
111
+
112
+ class TestLoadTestServiceAnalysis:
113
+ """Test LoadTestService analysis methods."""
114
+
115
+ def test_analyze_performance_excellent_throughput(self):
116
+ """Test performance analysis with excellent throughput."""
117
+ result_data = {
118
+ "metrics": {
119
+ "overall_throughput": 60.0, # Excellent (>= 50)
120
+ "tasks_sent": 100,
121
+ "tasks_completed": 100,
122
+ "total_duration_seconds": 10.0,
123
+ }
124
+ }
125
+
126
+ analysis = LoadTestService._analyze_performance(result_data)
127
+
128
+ assert analysis["throughput_rating"] == "excellent"
129
+ assert analysis["efficiency_rating"] == "excellent" # 100% completion
130
+ assert analysis["queue_pressure"] == "low" # < 30s duration
131
+
132
+ def test_analyze_performance_poor_throughput(self):
133
+ """Test performance analysis with poor throughput."""
134
+ result_data = {
135
+ "metrics": {
136
+ "overall_throughput": 5.0, # Poor (< 10)
137
+ "tasks_sent": 100,
138
+ "tasks_completed": 50, # 50% completion
139
+ "total_duration_seconds": 80.0, # High queue pressure
140
+ }
141
+ }
142
+
143
+ analysis = LoadTestService._analyze_performance(result_data)
144
+
145
+ assert analysis["throughput_rating"] == "poor"
146
+ assert analysis["efficiency_rating"] == "poor" # 50% completion
147
+ assert analysis["queue_pressure"] == "high" # > 60s duration
148
+
149
+ def test_analyze_performance_pydantic_models(self):
150
+ """Test Pydantic-based performance analysis."""
151
+ # Create a proper LoadTestResult
152
+ from app.services.load_test_models import (
153
+ LoadTestConfiguration as ConfigModel,
154
+ )
155
+ from app.services.load_test_models import (
156
+ LoadTestMetrics,
157
+ )
158
+
159
+ config = ConfigModel(
160
+ task_type="cpu_intensive",
161
+ num_tasks=100,
162
+ batch_size=10,
163
+ target_queue="load_test",
164
+ )
165
+
166
+ metrics = LoadTestMetrics(
167
+ tasks_sent=100,
168
+ tasks_completed=95,
169
+ tasks_failed=5,
170
+ total_duration_seconds=25.0,
171
+ overall_throughput=25.0, # Good throughput
172
+ failure_rate_percent=5.0,
173
+ )
174
+
175
+ result = LoadTestResult(
176
+ status="completed",
177
+ test_id="test-123",
178
+ configuration=config,
179
+ metrics=metrics,
180
+ )
181
+
182
+ analysis = LoadTestService._analyze_performance_pydantic(result)
183
+
184
+ assert isinstance(analysis, PerformanceAnalysis)
185
+ assert analysis.throughput_rating == "good" # 20 <= 25 < 50
186
+ assert analysis.efficiency_rating == "excellent" # 95% completion
187
+ assert analysis.queue_pressure == "low" # < 30s
188
+
189
+ def test_generate_recommendations_low_throughput(self):
190
+ """Test recommendations for low throughput."""
191
+ result_data = {
192
+ "metrics": {
193
+ "overall_throughput": 5.0, # Low
194
+ "failure_rate_percent": 2.0, # Acceptable
195
+ "total_duration_seconds": 20.0,
196
+ "tasks_sent": 100,
197
+ }
198
+ }
199
+
200
+ recommendations = LoadTestService._generate_recommendations(result_data)
201
+
202
+ assert len(recommendations) == 1
203
+ assert "low throughput" in recommendations[0].lower()
204
+ assert "worker concurrency" in recommendations[0].lower()
205
+
206
+ def test_generate_recommendations_high_failure_rate(self):
207
+ """Test recommendations for high failure rate."""
208
+ result_data = {
209
+ "metrics": {
210
+ "overall_throughput": 20.0, # Good
211
+ "failure_rate_percent": 15.0, # High
212
+ "total_duration_seconds": 25.0,
213
+ "tasks_sent": 100,
214
+ }
215
+ }
216
+
217
+ recommendations = LoadTestService._generate_recommendations(result_data)
218
+
219
+ assert len(recommendations) == 1
220
+ assert "high failure rate" in recommendations[0].lower()
221
+ assert "15.0%" in recommendations[0]
222
+ assert "worker logs" in recommendations[0].lower()
223
+
224
+ def test_generate_recommendations_queue_saturation(self):
225
+ """Test recommendations for queue saturation."""
226
+ result_data = {
227
+ "metrics": {
228
+ "overall_throughput": 15.0, # Fair
229
+ "failure_rate_percent": 2.0, # Good
230
+ "total_duration_seconds": 90.0, # Long
231
+ "tasks_sent": 50, # Few tasks for the duration
232
+ }
233
+ }
234
+
235
+ recommendations = LoadTestService._generate_recommendations(result_data)
236
+
237
+ assert len(recommendations) == 1
238
+ assert "queue saturation" in recommendations[0].lower()
239
+ assert "smaller batches" in recommendations[0].lower()
240
+
241
+
242
+ @pytest.mark.asyncio
243
+ class TestLoadTestServiceIntegration:
244
+ """Test LoadTestService integration with mocked dependencies."""
245
+
246
+ @patch("app.components.worker.pools.create_pool")
247
+ async def test_enqueue_load_test_success(self, mock_create_pool):
248
+ """Test successful load test enqueueing."""
249
+ # Mock pool and job
250
+ mock_pool = AsyncMock()
251
+ mock_job = MagicMock()
252
+ mock_job.job_id = "test-job-123"
253
+ mock_pool.enqueue_job.return_value = mock_job
254
+ mock_pool.ping.return_value = True # For cache validation
255
+ mock_create_pool.return_value = mock_pool
256
+
257
+ # Create configuration
258
+ config = LoadTestConfiguration(
259
+ num_tasks=50,
260
+ task_type="cpu_intensive",
261
+ batch_size=10,
262
+ target_queue="load_test",
263
+ )
264
+
265
+ # Test enqueueing
266
+ task_id = await LoadTestService.enqueue_load_test(config)
267
+
268
+ # Verify results
269
+ assert task_id == "test-job-123"
270
+ mock_pool.enqueue_job.assert_called_once_with(
271
+ "load_test_orchestrator",
272
+ _queue_name="arq:queue:load_test",
273
+ num_tasks=50,
274
+ task_type="cpu_intensive",
275
+ batch_size=10,
276
+ delay_ms=0,
277
+ target_queue="load_test",
278
+ )
279
+ mock_pool.aclose.assert_called_once()
280
+
281
+ @patch("app.components.worker.pools.create_pool")
282
+ async def test_enqueue_load_test_failure(self, mock_create_pool):
283
+ """Test load test enqueueing failure."""
284
+ # Clear cache to ensure fresh mock
285
+ from app.components.worker.pools import clear_pool_cache
286
+ await clear_pool_cache()
287
+
288
+ # Mock create_pool to raise an exception
289
+ mock_create_pool.side_effect = Exception("Redis connection failed")
290
+
291
+ config = LoadTestConfiguration()
292
+
293
+ # Should raise the exception since pool creation fails
294
+ with pytest.raises(Exception, match="Redis connection failed"):
295
+ await LoadTestService.enqueue_load_test(config)
296
+
297
+ # No pool cleanup needed since create_pool failed
298
+
299
+ @patch("app.components.worker.pools.create_pool")
300
+ async def test_get_load_test_result_success(
301
+ self, mock_create_pool
302
+ ): # noqa
303
+ """Test successful result retrieval with Pydantic validation."""
304
+ # Clear cache to ensure fresh mock
305
+ from app.components.worker.pools import clear_pool_cache
306
+ await clear_pool_cache()
307
+
308
+ # Mock Redis data (realistic orchestrator result)
309
+ raw_result_data = {
310
+ "test_id": "test-123",
311
+ "task_type": "io_simulation",
312
+ "tasks_sent": 10,
313
+ "tasks_completed": 10,
314
+ "tasks_failed": 0,
315
+ "total_duration_seconds": 2.5,
316
+ "overall_throughput_per_second": 4.0,
317
+ "failure_rate_percent": 0.0,
318
+ "completion_percentage": 100.0,
319
+ "average_throughput_per_second": 4.0,
320
+ "monitor_duration_seconds": 2.5,
321
+ "batch_size": 10,
322
+ "delay_ms": 0,
323
+ "target_queue": "load_test",
324
+ "start_time": "2023-01-01T10:00:00",
325
+ "end_time": "2023-01-01T10:00:02.5",
326
+ }
327
+
328
+ # Mock arq result format: {"r": actual_result, "t": 1, "s": true, ...}
329
+ arq_result = {"r": raw_result_data, "t": 1, "s": True}
330
+ pickled_result = pickle.dumps(arq_result)
331
+
332
+ # Mock pool
333
+ mock_pool = AsyncMock()
334
+ mock_pool.exists.return_value = True
335
+ mock_pool.get.return_value = pickled_result
336
+ mock_pool.ping.return_value = True # For cache validation
337
+ mock_pool.aclose.return_value = None # Mock cleanup
338
+ mock_create_pool.return_value = mock_pool
339
+
340
+ # Test result retrieval
341
+ result = await LoadTestService.get_load_test_result("test-123", "load_test")
342
+
343
+ # Verify result structure
344
+ assert result is not None
345
+ assert result["status"] == "completed"
346
+ assert result["test_id"] == "test-123"
347
+ assert result["metrics"]["tasks_completed"] == 10
348
+ assert result["metrics"]["overall_throughput"] == 4.0
349
+
350
+ # Verify analysis was added
351
+ assert "analysis" in result
352
+ assert "performance_analysis" in result["analysis"]
353
+ assert "recommendations" in result["analysis"]
354
+
355
+ mock_pool.aclose.assert_called_once()
356
+
357
+ @patch("app.components.worker.pools.create_pool")
358
+ async def test_get_load_test_result_not_found(
359
+ self, mock_create_pool
360
+ ): # noqa
361
+ """Test result retrieval when task doesn't exist."""
362
+ # Clear cache to ensure fresh mock
363
+ from app.components.worker.pools import clear_pool_cache
364
+ await clear_pool_cache()
365
+
366
+ # Mock pool with no results
367
+ mock_pool = AsyncMock()
368
+ mock_pool.exists.return_value = False
369
+ mock_pool.ping.return_value = True # For cache validation
370
+ mock_pool.aclose.return_value = None # Mock cleanup
371
+ mock_create_pool.return_value = mock_pool
372
+
373
+ result = await LoadTestService.get_load_test_result("nonexistent", "load_test")
374
+
375
+ assert result is None
376
+ mock_pool.aclose.assert_called_once()
377
+
378
+ @patch("app.components.worker.pools.create_pool")
379
+ async def test_get_load_test_result_validation_error_fallback(
380
+ self, mock_create_pool
381
+ ):
382
+ """Test fallback when Pydantic validation fails."""
383
+ # Clear cache to ensure fresh mock
384
+ from app.components.worker.pools import clear_pool_cache
385
+ await clear_pool_cache()
386
+
387
+ # Create invalid data that will fail validation
388
+ invalid_result_data = {
389
+ "test_id": "test-123",
390
+ "task_type": "io_simulation",
391
+ "tasks_sent": -10, # Invalid - negative value
392
+ "tasks_completed": 20, # Invalid - more than sent
393
+ "total_duration_seconds": -5.0, # Invalid - negative
394
+ "batch_size": 10,
395
+ "target_queue": "load_test",
396
+ }
397
+
398
+ arq_result = {"r": invalid_result_data}
399
+ pickled_result = pickle.dumps(arq_result)
400
+
401
+ mock_pool = AsyncMock()
402
+ mock_pool.exists.return_value = True
403
+ mock_pool.get.return_value = pickled_result
404
+ mock_pool.ping.return_value = True # For cache validation
405
+ mock_pool.aclose.return_value = None # Mock cleanup
406
+ mock_create_pool.return_value = mock_pool
407
+
408
+ # Should fall back to manual transformation when Pydantic validation fails
409
+ result = await LoadTestService.get_load_test_result("test-123", "load_test")
410
+
411
+ # Should still get a result (via fallback)
412
+ assert result is not None
413
+ mock_pool.aclose.assert_called_once()
414
+
415
+ @patch("app.components.worker.pools.create_pool")
416
+ async def test_get_load_test_result_exception_handling(self, mock_create_pool):
417
+ """Test exception handling during result retrieval."""
418
+ # Clear cache to ensure fresh mock
419
+ from app.components.worker.pools import clear_pool_cache
420
+ await clear_pool_cache()
421
+
422
+ # Mock create_pool to raise exception
423
+ mock_create_pool.side_effect = Exception("Redis connection lost")
424
+
425
+ result = await LoadTestService.get_load_test_result("test-123", "load_test")
426
+
427
+ assert result is None # Should return None on exception
428
+ # No aclose to assert since create_pool raised exception
429
+
430
+
431
+ class TestTransformOrchestratorResult:
432
+ """Test the orchestrator result transformation logic."""
433
+
434
+ def test_transform_complete_result(self):
435
+ """Test transformation with all fields present."""
436
+ orchestrator_result = {
437
+ "test_id": "transform-test",
438
+ "task_type": "memory_operations",
439
+ "tasks_sent": 100,
440
+ "tasks_completed": 95,
441
+ "tasks_failed": 5,
442
+ "total_duration_seconds": 45.5,
443
+ "overall_throughput_per_second": 2.1,
444
+ "failure_rate_percent": 5.0,
445
+ "completion_percentage": 95.0,
446
+ "average_throughput_per_second": 2.1,
447
+ "monitor_duration_seconds": 45.0,
448
+ "batch_size": 20,
449
+ "delay_ms": 100,
450
+ "target_queue": "system",
451
+ "start_time": "2023-01-01T12:00:00",
452
+ "end_time": "2023-01-01T12:00:45",
453
+ "task_ids": ["id1", "id2", "id3"],
454
+ }
455
+
456
+ transformed = LoadTestService._transform_orchestrator_result(
457
+ orchestrator_result
458
+ )
459
+
460
+ # Check basic structure
461
+ assert transformed["task"] == "load_test_orchestrator"
462
+ assert transformed["status"] == "completed"
463
+ assert transformed["test_id"] == "transform-test"
464
+
465
+ # Check configuration mapping
466
+ config = transformed["configuration"]
467
+ assert config["task_type"] == "memory_operations"
468
+ assert config["num_tasks"] == 100
469
+ assert config["batch_size"] == 20
470
+ assert config["delay_ms"] == 100
471
+ assert config["target_queue"] == "system"
472
+
473
+ # Check metrics mapping
474
+ metrics = transformed["metrics"]
475
+ assert metrics["tasks_sent"] == 100
476
+ assert metrics["tasks_completed"] == 95
477
+ assert metrics["tasks_failed"] == 5
478
+ assert metrics["total_duration_seconds"] == 45.5
479
+ assert metrics["overall_throughput"] == 2.1
480
+ assert metrics["failure_rate_percent"] == 5.0
481
+
482
+ # Check optional fields
483
+ assert transformed["start_time"] == "2023-01-01T12:00:00"
484
+ assert transformed["end_time"] == "2023-01-01T12:00:45"
485
+ assert transformed["task_ids"] == ["id1", "id2", "id3"]
486
+
487
+ def test_transform_minimal_result(self):
488
+ """Test transformation with minimal required fields."""
489
+ minimal_result = {
490
+ "test_id": "minimal",
491
+ "task_type": "cpu_intensive",
492
+ "tasks_sent": 10,
493
+ "tasks_completed": 10,
494
+ "total_duration_seconds": 5.0,
495
+ "batch_size": 10,
496
+ "target_queue": "load_test",
497
+ }
498
+
499
+ transformed = LoadTestService._transform_orchestrator_result(minimal_result)
500
+
501
+ # Should handle missing optional fields gracefully
502
+ assert transformed["test_id"] == "minimal"
503
+ assert transformed["configuration"]["task_type"] == "cpu_intensive"
504
+ assert transformed["metrics"]["tasks_sent"] == 10
505
+ assert transformed["metrics"]["tasks_failed"] == 0 # Default
506
+ assert transformed["metrics"]["overall_throughput"] == 0 # Default
507
+ assert transformed["start_time"] is None
508
+ assert transformed["task_ids"] == []
509
+
510
+
511
+ # Performance and stress tests
512
+ class TestLoadTestServicePerformance:
513
+ """Test performance characteristics of the service."""
514
+
515
+ def test_test_type_info_caching_behavior(self):
516
+ """Test that test type info doesn't have unexpected side effects."""
517
+ # Call multiple times to ensure no state leakage
518
+ info1 = LoadTestService.get_test_type_info(LoadTestTypes.CPU_INTENSIVE)
519
+ info2 = LoadTestService.get_test_type_info(LoadTestTypes.CPU_INTENSIVE)
520
+
521
+ # Should return same data
522
+ assert info1 == info2
523
+
524
+ # Modifying one shouldn't affect the other (defensive copy)
525
+ info1["name"] = "Modified"
526
+ info3 = LoadTestService.get_test_type_info(LoadTestTypes.CPU_INTENSIVE)
527
+ assert info3["name"] == "CPU Intensive" # Should be unmodified
528
+
529
+ def test_analysis_with_edge_case_values(self):
530
+ """Test analysis functions with edge case values."""
531
+ # Zero duration
532
+ result_data = {
533
+ "metrics": {
534
+ "overall_throughput": 0.0,
535
+ "tasks_sent": 0,
536
+ "tasks_completed": 0,
537
+ "total_duration_seconds": 0.0,
538
+ }
539
+ }
540
+
541
+ analysis = LoadTestService._analyze_performance(result_data)
542
+ assert analysis["throughput_rating"] == "poor"
543
+ assert analysis["queue_pressure"] == "low"
544
+
545
+ # Very high values
546
+ result_data = {
547
+ "metrics": {
548
+ "overall_throughput": 10000.0,
549
+ "tasks_sent": 100000,
550
+ "tasks_completed": 100000,
551
+ "total_duration_seconds": 10.0,
552
+ }
553
+ }
554
+
555
+ analysis = LoadTestService._analyze_performance(result_data)
556
+ assert analysis["throughput_rating"] == "excellent"
557
+ assert analysis["efficiency_rating"] == "excellent"
558
+
559
+
560
+ # Error conditions and boundary testing
561
+ class TestLoadTestServiceErrorHandling:
562
+ """Test error handling in LoadTestService."""
563
+
564
+ @patch("app.core.config.get_load_test_queue")
565
+ def test_analyze_load_test_result_missing_configuration(self, mock_get_queue):
566
+ """Test analysis with missing configuration."""
567
+ # Mock the queue function to return a valid default
568
+ mock_get_queue.return_value = "load_test"
569
+
570
+ incomplete_result = {
571
+ "test_id": "incomplete",
572
+ "status": "completed",
573
+ # Missing configuration and metrics
574
+ }
575
+
576
+ # Should return a fallback LoadTestResult when validation fails
577
+ result = LoadTestService._analyze_load_test_result(incomplete_result)
578
+ assert isinstance(result, LoadTestResult)
579
+ assert result.status == "failed"
580
+ assert result.test_id == "incomplete"
581
+
582
+ def test_validate_test_execution_with_edge_cases(self):
583
+ """Test validation with edge case conditions."""
584
+ result_data = {"status": "unknown"}
585
+ test_info = {"validation_keys": ["some_key"]}
586
+
587
+ validation = LoadTestService._validate_test_execution(result_data, test_info)
588
+
589
+ assert validation["test_type_verified"] is False
590
+ assert "unknown" in validation["issues"][0]
591
+
592
+ def test_recommendations_empty_metrics(self):
593
+ """Test recommendations generation with empty metrics."""
594
+ empty_result = {"metrics": {}}
595
+
596
+ recommendations = LoadTestService._generate_recommendations(empty_result)
597
+
598
+ # Should handle missing metrics gracefully
599
+ assert isinstance(recommendations, list)
600
+ # Should still generate relevant recommendations based on defaults
601
+ # (likely low throughput)
602
+ assert len(recommendations) >= 1
@@ -0,0 +1,96 @@
1
+ """Test system monitoring functions."""
2
+
3
+ import pytest
4
+
5
+ from app.services.system import (
6
+ ComponentStatus,
7
+ get_system_status,
8
+ is_system_healthy,
9
+ register_health_check,
10
+ )
11
+
12
+
13
+ class TestSystemService:
14
+ """Test the system monitoring functions."""
15
+
16
+ @pytest.mark.asyncio
17
+ async def test_component_status_creation(self) -> None:
18
+ """Test component status Pydantic model."""
19
+ status = ComponentStatus(
20
+ name="test_component",
21
+ message="All good",
22
+ response_time_ms=100.0,
23
+ metadata={"version": "1.0"},
24
+ )
25
+
26
+ assert status.name == "test_component"
27
+ assert status.healthy is True
28
+ assert status.message == "All good"
29
+ assert status.response_time_ms == 100.0
30
+ assert status.metadata == {"version": "1.0"}
31
+
32
+ @pytest.mark.asyncio
33
+ async def test_system_status_properties(self) -> None:
34
+ """Test system status Pydantic model properties."""
35
+ status = await get_system_status()
36
+
37
+ assert isinstance(status.overall_healthy, bool)
38
+ assert len(status.components) >= 1
39
+ assert isinstance(status.healthy_components, list)
40
+ assert isinstance(status.unhealthy_components, list)
41
+ assert isinstance(status.health_percentage, float)
42
+
43
+ @pytest.mark.asyncio
44
+ async def test_system_health_checks(self) -> None:
45
+ """Test basic health checks functionality."""
46
+ status = await get_system_status()
47
+
48
+ # Test that we get valid system information
49
+ assert hasattr(status, "components")
50
+ assert hasattr(status, "overall_healthy")
51
+ assert hasattr(status, "timestamp")
52
+ assert hasattr(status, "system_info")
53
+
54
+ # Verify components exist (at least core system checks)
55
+ assert len(status.components) > 0
56
+
57
+ # Check that each component has required fields
58
+ for component_name, component_status in status.components.items():
59
+ assert isinstance(component_name, str)
60
+ assert isinstance(component_status.healthy, bool)
61
+ assert isinstance(component_status.message, str)
62
+ assert isinstance(component_status.name, str)
63
+
64
+ @pytest.mark.asyncio
65
+ async def test_is_system_healthy(self) -> None:
66
+ """Test quick health check function."""
67
+ healthy = await is_system_healthy()
68
+ assert isinstance(healthy, bool)
69
+
70
+ @pytest.mark.asyncio
71
+ async def test_custom_health_check_registration(self) -> None:
72
+ """Test custom health check registration."""
73
+
74
+ async def custom_check() -> ComponentStatus:
75
+ return ComponentStatus(
76
+ name="custom_test",
77
+ message="Custom check passed",
78
+ response_time_ms=None,
79
+ )
80
+
81
+ # Register custom check
82
+ register_health_check("custom_test", custom_check)
83
+
84
+ try:
85
+ # Get status and verify custom check is included under aegis component
86
+ status = await get_system_status()
87
+ assert "aegis" in status.components
88
+ aegis_component = status.components["aegis"]
89
+ assert "custom_test" in aegis_component.sub_components
90
+ assert aegis_component.sub_components["custom_test"].name == "custom_test"
91
+ assert aegis_component.sub_components["custom_test"].healthy is True
92
+ finally:
93
+ # Clean up the custom health check registration
94
+ from app.services.system.health import _health_checks
95
+ if "custom_test" in _health_checks:
96
+ del _health_checks["custom_test"]