aegis-stack 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aegis-stack might be problematic. Click here for more details.
- aegis/__init__.py +5 -0
- aegis/__main__.py +374 -0
- aegis/core/CLAUDE.md +365 -0
- aegis/core/__init__.py +6 -0
- aegis/core/components.py +115 -0
- aegis/core/dependency_resolver.py +119 -0
- aegis/core/template_generator.py +163 -0
- aegis/templates/CLAUDE.md +306 -0
- aegis/templates/cookiecutter-aegis-project/cookiecutter.json +27 -0
- aegis/templates/cookiecutter-aegis-project/hooks/post_gen_project.py +172 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/.dockerignore +71 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/.env.example.j2 +70 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/.gitignore +127 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/Dockerfile +53 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/Makefile +211 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/README.md.j2 +196 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/__init__.py +5 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/cli/__init__.py +6 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/cli/health.py +321 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/cli/load_test.py +638 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/cli/main.py +41 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/__init__.py +0 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/__init__.py +0 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/health.py +134 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/models.py.j2 +247 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/routing.py.j2 +14 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/tasks.py.j2 +596 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/hooks.py +133 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/main.py +16 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/middleware/__init__.py +1 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/middleware/cors.py +20 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/shutdown/__init__.py +1 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/shutdown/cleanup.py +14 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/startup/__init__.py +1 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/startup/component_health.py.j2 +190 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/frontend/__init__.py +0 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/frontend/core/__init__.py +1 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/frontend/core/theme.py +46 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/frontend/main.py +687 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/scheduler/__init__.py +1 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/scheduler/main.py +138 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/CLAUDE.md +213 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/__init__.py +6 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/constants.py.j2 +30 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/pools.py +78 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/queues/__init__.py +1 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/queues/load_test.py +48 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/queues/media.py +41 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/queues/system.py +36 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/registry.py +139 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/tasks/__init__.py +119 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/tasks/load_tasks.py +526 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/tasks/simple_system_tasks.py +32 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/tasks/system_tasks.py +279 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/core/config.py.j2 +119 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/core/constants.py +60 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/core/db.py +67 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/core/log.py +85 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/entrypoints/__init__.py +1 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/entrypoints/webserver.py +40 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/entrypoints/{% if cookiecutter.include_scheduler == /"yes/" %}scheduler.py{% endif %}" +21 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/integrations/__init__.py +0 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/integrations/main.py +61 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/py.typed +0 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/__init__.py +1 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/load_test.py +661 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/load_test_models.py +269 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/shared/__init__.py +15 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/shared/models.py +26 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/__init__.py +52 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/alerts.py +94 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/health.py.j2 +1105 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/models.py +169 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/ui.py +52 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docker-compose.yml.j2 +195 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/api.md +191 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/components/scheduler.md +414 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/development.md +215 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/health.md +240 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/javascripts/mermaid-config.js +62 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/stylesheets/mermaid.css +95 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/mkdocs.yml.j2 +62 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/pyproject.toml.j2 +156 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/scripts/entrypoint.sh +87 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/scripts/entrypoint.sh.j2 +104 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/scripts/gen_docs.py +16 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/api/__init__.py +1 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/api/test_health_endpoints.py.j2 +239 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/components/test_scheduler.py +76 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/conftest.py.j2 +81 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/__init__.py +1 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_component_integration.py.j2 +376 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_health_logic.py.j2 +633 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_load_test_models.py +665 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_load_test_service.py +602 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_system_service.py +96 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_worker_health_registration.py.j2 +224 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/test_core.py +50 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/uv.lock +1673 -0
- aegis_stack-0.1.0.dist-info/METADATA +114 -0
- aegis_stack-0.1.0.dist-info/RECORD +103 -0
- aegis_stack-0.1.0.dist-info/WHEEL +4 -0
- aegis_stack-0.1.0.dist-info/entry_points.txt +2 -0
|
@@ -0,0 +1,665 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Unit tests for load test Pydantic models.
|
|
3
|
+
|
|
4
|
+
Tests validation, constraints, and data transformation for all load test models.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import pytest
|
|
8
|
+
from pydantic import ValidationError
|
|
9
|
+
|
|
10
|
+
from app.components.worker.constants import LoadTestTypes
|
|
11
|
+
from app.services.load_test_models import (
|
|
12
|
+
LoadTestConfiguration,
|
|
13
|
+
LoadTestErrorModel as LoadTestError,
|
|
14
|
+
LoadTestMetrics,
|
|
15
|
+
LoadTestResult,
|
|
16
|
+
OrchestratorRawResult,
|
|
17
|
+
PerformanceAnalysis,
|
|
18
|
+
TestTypeInfo,
|
|
19
|
+
ValidationStatus,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class TestLoadTestConfiguration:
|
|
24
|
+
"""Test LoadTestConfiguration model validation."""
|
|
25
|
+
|
|
26
|
+
def test_valid_configuration(self):
|
|
27
|
+
"""Test creating valid configuration."""
|
|
28
|
+
config = LoadTestConfiguration(
|
|
29
|
+
task_type="cpu_intensive",
|
|
30
|
+
num_tasks=100,
|
|
31
|
+
batch_size=10,
|
|
32
|
+
delay_ms=50,
|
|
33
|
+
target_queue="load_test"
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
assert config.task_type == LoadTestTypes.CPU_INTENSIVE
|
|
37
|
+
assert config.num_tasks == 100
|
|
38
|
+
assert config.batch_size == 10
|
|
39
|
+
assert config.delay_ms == 50
|
|
40
|
+
assert config.target_queue == "load_test"
|
|
41
|
+
|
|
42
|
+
def test_num_tasks_validation(self):
|
|
43
|
+
"""Test num_tasks constraints."""
|
|
44
|
+
# Valid range
|
|
45
|
+
config = LoadTestConfiguration(
|
|
46
|
+
task_type="cpu_intensive",
|
|
47
|
+
num_tasks=50,
|
|
48
|
+
batch_size=10,
|
|
49
|
+
target_queue="load_test"
|
|
50
|
+
)
|
|
51
|
+
assert config.num_tasks == 50
|
|
52
|
+
|
|
53
|
+
# Too low
|
|
54
|
+
with pytest.raises(
|
|
55
|
+
ValidationError, match="greater than or equal to 10"
|
|
56
|
+
):
|
|
57
|
+
LoadTestConfiguration(
|
|
58
|
+
task_type="cpu_intensive",
|
|
59
|
+
num_tasks=5,
|
|
60
|
+
batch_size=10,
|
|
61
|
+
target_queue="load_test"
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
# Too high
|
|
65
|
+
with pytest.raises(
|
|
66
|
+
ValidationError, match="less than or equal to 10000"
|
|
67
|
+
):
|
|
68
|
+
LoadTestConfiguration(
|
|
69
|
+
task_type="cpu_intensive",
|
|
70
|
+
num_tasks=20000,
|
|
71
|
+
batch_size=10,
|
|
72
|
+
target_queue="load_test"
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
def test_batch_size_validation(self):
|
|
76
|
+
"""Test batch_size constraints."""
|
|
77
|
+
# Valid range
|
|
78
|
+
config = LoadTestConfiguration(
|
|
79
|
+
task_type="cpu_intensive",
|
|
80
|
+
num_tasks=100,
|
|
81
|
+
batch_size=25,
|
|
82
|
+
target_queue="load_test"
|
|
83
|
+
)
|
|
84
|
+
assert config.batch_size == 25
|
|
85
|
+
|
|
86
|
+
# Too low
|
|
87
|
+
with pytest.raises(
|
|
88
|
+
ValidationError, match="greater than or equal to 1"
|
|
89
|
+
):
|
|
90
|
+
LoadTestConfiguration(
|
|
91
|
+
task_type="cpu_intensive",
|
|
92
|
+
num_tasks=100,
|
|
93
|
+
batch_size=0,
|
|
94
|
+
target_queue="load_test"
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
# Too high
|
|
98
|
+
with pytest.raises(
|
|
99
|
+
ValidationError, match="less than or equal to 100"
|
|
100
|
+
):
|
|
101
|
+
LoadTestConfiguration(
|
|
102
|
+
task_type="cpu_intensive",
|
|
103
|
+
num_tasks=100,
|
|
104
|
+
batch_size=150,
|
|
105
|
+
target_queue="load_test"
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
def test_delay_ms_validation(self):
|
|
109
|
+
"""Test delay_ms constraints."""
|
|
110
|
+
# Valid range
|
|
111
|
+
config = LoadTestConfiguration(
|
|
112
|
+
task_type="cpu_intensive",
|
|
113
|
+
num_tasks=100,
|
|
114
|
+
batch_size=10,
|
|
115
|
+
delay_ms=1000,
|
|
116
|
+
target_queue="load_test"
|
|
117
|
+
)
|
|
118
|
+
assert config.delay_ms == 1000
|
|
119
|
+
|
|
120
|
+
# Too low (negative)
|
|
121
|
+
with pytest.raises(
|
|
122
|
+
ValidationError, match="greater than or equal to 0"
|
|
123
|
+
):
|
|
124
|
+
LoadTestConfiguration(
|
|
125
|
+
task_type="cpu_intensive",
|
|
126
|
+
num_tasks=100,
|
|
127
|
+
batch_size=10,
|
|
128
|
+
delay_ms=-100,
|
|
129
|
+
target_queue="load_test"
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
# Too high
|
|
133
|
+
with pytest.raises(
|
|
134
|
+
ValidationError, match="less than or equal to 5000"
|
|
135
|
+
):
|
|
136
|
+
LoadTestConfiguration(
|
|
137
|
+
task_type="cpu_intensive",
|
|
138
|
+
num_tasks=100,
|
|
139
|
+
batch_size=10,
|
|
140
|
+
delay_ms=10000,
|
|
141
|
+
target_queue="load_test"
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
class TestLoadTestMetrics:
|
|
146
|
+
"""Test LoadTestMetrics model validation."""
|
|
147
|
+
|
|
148
|
+
def test_valid_metrics(self):
|
|
149
|
+
"""Test creating valid metrics."""
|
|
150
|
+
metrics = LoadTestMetrics(
|
|
151
|
+
tasks_sent=100,
|
|
152
|
+
tasks_completed=95,
|
|
153
|
+
tasks_failed=5,
|
|
154
|
+
total_duration_seconds=30.5,
|
|
155
|
+
overall_throughput=3.1,
|
|
156
|
+
failure_rate_percent=5.0
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
assert metrics.tasks_sent == 100
|
|
160
|
+
assert metrics.tasks_completed == 95
|
|
161
|
+
assert metrics.tasks_failed == 5
|
|
162
|
+
assert metrics.total_duration_seconds == 30.5
|
|
163
|
+
assert metrics.overall_throughput == 3.1
|
|
164
|
+
assert metrics.failure_rate_percent == 5.0
|
|
165
|
+
|
|
166
|
+
def test_completed_not_exceed_sent_validator(self):
|
|
167
|
+
"""Test that completed tasks cannot exceed sent tasks."""
|
|
168
|
+
# Valid case
|
|
169
|
+
metrics = LoadTestMetrics(
|
|
170
|
+
tasks_sent=100,
|
|
171
|
+
tasks_completed=90,
|
|
172
|
+
total_duration_seconds=30.0
|
|
173
|
+
)
|
|
174
|
+
assert metrics.tasks_completed == 90
|
|
175
|
+
|
|
176
|
+
# Invalid case - more completed than sent
|
|
177
|
+
with pytest.raises(
|
|
178
|
+
ValidationError,
|
|
179
|
+
match="Completed tasks \\(150\\) cannot exceed sent tasks \\(100\\)",
|
|
180
|
+
):
|
|
181
|
+
LoadTestMetrics(
|
|
182
|
+
tasks_sent=100,
|
|
183
|
+
tasks_completed=150,
|
|
184
|
+
total_duration_seconds=30.0
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
def test_failure_rate_consistency_validator(self):
|
|
188
|
+
"""Test that failure rate matches task counts."""
|
|
189
|
+
# Valid case - 10 failed out of 100 = 10%
|
|
190
|
+
metrics = LoadTestMetrics(
|
|
191
|
+
tasks_sent=100,
|
|
192
|
+
tasks_completed=90,
|
|
193
|
+
tasks_failed=10,
|
|
194
|
+
total_duration_seconds=30.0,
|
|
195
|
+
failure_rate_percent=10.0
|
|
196
|
+
)
|
|
197
|
+
assert metrics.failure_rate_percent == 10.0
|
|
198
|
+
|
|
199
|
+
# Invalid case - mismatch between counts and percentage
|
|
200
|
+
with pytest.raises(
|
|
201
|
+
ValidationError, match="Failure rate 50.0% doesn't match task counts"
|
|
202
|
+
):
|
|
203
|
+
LoadTestMetrics(
|
|
204
|
+
tasks_sent=100,
|
|
205
|
+
tasks_completed=90,
|
|
206
|
+
tasks_failed=10, # Should be 10%, not 50%
|
|
207
|
+
total_duration_seconds=30.0,
|
|
208
|
+
failure_rate_percent=50.0
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
def test_negative_values_rejected(self):
|
|
212
|
+
"""Test that negative values are rejected."""
|
|
213
|
+
with pytest.raises(
|
|
214
|
+
ValidationError, match="greater than or equal to 0"
|
|
215
|
+
):
|
|
216
|
+
LoadTestMetrics(
|
|
217
|
+
tasks_sent=-10,
|
|
218
|
+
tasks_completed=0,
|
|
219
|
+
total_duration_seconds=30.0
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
with pytest.raises(
|
|
223
|
+
ValidationError, match="greater than or equal to 0"
|
|
224
|
+
):
|
|
225
|
+
LoadTestMetrics(
|
|
226
|
+
tasks_sent=100,
|
|
227
|
+
tasks_completed=-5,
|
|
228
|
+
total_duration_seconds=30.0
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
def test_percentage_bounds(self):
|
|
232
|
+
"""Test percentage fields are within valid ranges."""
|
|
233
|
+
# Valid percentages
|
|
234
|
+
metrics = LoadTestMetrics(
|
|
235
|
+
tasks_sent=100,
|
|
236
|
+
tasks_completed=100,
|
|
237
|
+
total_duration_seconds=30.0,
|
|
238
|
+
failure_rate_percent=0.0,
|
|
239
|
+
completion_percentage=100.0
|
|
240
|
+
)
|
|
241
|
+
assert metrics.failure_rate_percent == 0.0
|
|
242
|
+
assert metrics.completion_percentage == 100.0
|
|
243
|
+
|
|
244
|
+
# Invalid percentage - over 100%
|
|
245
|
+
with pytest.raises(
|
|
246
|
+
ValidationError, match="less than or equal to 100"
|
|
247
|
+
):
|
|
248
|
+
LoadTestMetrics(
|
|
249
|
+
tasks_sent=100,
|
|
250
|
+
tasks_completed=100,
|
|
251
|
+
total_duration_seconds=30.0,
|
|
252
|
+
failure_rate_percent=150.0
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
class TestPerformanceAnalysis:
|
|
257
|
+
"""Test PerformanceAnalysis model validation."""
|
|
258
|
+
|
|
259
|
+
def test_valid_ratings(self):
|
|
260
|
+
"""Test valid rating values."""
|
|
261
|
+
analysis = PerformanceAnalysis(
|
|
262
|
+
throughput_rating="excellent",
|
|
263
|
+
efficiency_rating="good",
|
|
264
|
+
queue_pressure="low"
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
assert analysis.throughput_rating == "excellent"
|
|
268
|
+
assert analysis.efficiency_rating == "good"
|
|
269
|
+
assert analysis.queue_pressure == "low"
|
|
270
|
+
|
|
271
|
+
def test_invalid_rating_values(self):
|
|
272
|
+
"""Test that invalid rating values are rejected."""
|
|
273
|
+
with pytest.raises(
|
|
274
|
+
ValidationError, match="String should match pattern"
|
|
275
|
+
):
|
|
276
|
+
PerformanceAnalysis(
|
|
277
|
+
throughput_rating="amazing", # Not in allowed values
|
|
278
|
+
efficiency_rating="good",
|
|
279
|
+
queue_pressure="low"
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
with pytest.raises(
|
|
283
|
+
ValidationError, match="String should match pattern"
|
|
284
|
+
):
|
|
285
|
+
PerformanceAnalysis(
|
|
286
|
+
throughput_rating="excellent",
|
|
287
|
+
efficiency_rating="terrible", # Not in allowed values
|
|
288
|
+
queue_pressure="low"
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
def test_all_valid_rating_combinations(self):
|
|
292
|
+
"""Test all valid rating value combinations."""
|
|
293
|
+
valid_ratings = ["unknown", "poor", "fair", "good", "excellent"]
|
|
294
|
+
valid_pressures = ["unknown", "low", "moderate", "high"]
|
|
295
|
+
|
|
296
|
+
for throughput in valid_ratings:
|
|
297
|
+
for efficiency in valid_ratings:
|
|
298
|
+
for pressure in valid_pressures:
|
|
299
|
+
analysis = PerformanceAnalysis(
|
|
300
|
+
throughput_rating=throughput,
|
|
301
|
+
efficiency_rating=efficiency,
|
|
302
|
+
queue_pressure=pressure
|
|
303
|
+
)
|
|
304
|
+
assert analysis.throughput_rating == throughput
|
|
305
|
+
assert analysis.efficiency_rating == efficiency
|
|
306
|
+
assert analysis.queue_pressure == pressure
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
class TestValidationStatus:
|
|
310
|
+
"""Test ValidationStatus model validation."""
|
|
311
|
+
|
|
312
|
+
def test_valid_status(self):
|
|
313
|
+
"""Test creating valid validation status."""
|
|
314
|
+
status = ValidationStatus(
|
|
315
|
+
test_type_verified=True,
|
|
316
|
+
expected_metrics_present=True,
|
|
317
|
+
performance_signature_match="verified",
|
|
318
|
+
issues=["Some issue"]
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
assert status.test_type_verified is True
|
|
322
|
+
assert status.expected_metrics_present is True
|
|
323
|
+
assert status.performance_signature_match == "verified"
|
|
324
|
+
assert status.issues == ["Some issue"]
|
|
325
|
+
|
|
326
|
+
def test_default_values(self):
|
|
327
|
+
"""Test default values for optional fields."""
|
|
328
|
+
status = ValidationStatus()
|
|
329
|
+
|
|
330
|
+
assert status.test_type_verified is False
|
|
331
|
+
assert status.expected_metrics_present is False
|
|
332
|
+
assert status.performance_signature_match == "unknown"
|
|
333
|
+
assert status.issues == []
|
|
334
|
+
|
|
335
|
+
def test_invalid_signature_match_values(self):
|
|
336
|
+
"""Test that invalid signature match values are rejected."""
|
|
337
|
+
with pytest.raises(
|
|
338
|
+
ValidationError, match="String should match pattern"
|
|
339
|
+
):
|
|
340
|
+
ValidationStatus(
|
|
341
|
+
performance_signature_match="definitely" # Not in allowed values
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
|
|
345
|
+
class TestLoadTestResult:
|
|
346
|
+
"""Test LoadTestResult model validation."""
|
|
347
|
+
|
|
348
|
+
def test_valid_result(self):
|
|
349
|
+
"""Test creating valid load test result."""
|
|
350
|
+
config = LoadTestConfiguration(
|
|
351
|
+
task_type="cpu_intensive",
|
|
352
|
+
num_tasks=100,
|
|
353
|
+
batch_size=10,
|
|
354
|
+
target_queue="load_test"
|
|
355
|
+
)
|
|
356
|
+
|
|
357
|
+
metrics = LoadTestMetrics(
|
|
358
|
+
tasks_sent=100,
|
|
359
|
+
tasks_completed=100,
|
|
360
|
+
total_duration_seconds=30.0
|
|
361
|
+
)
|
|
362
|
+
|
|
363
|
+
result = LoadTestResult(
|
|
364
|
+
status="completed",
|
|
365
|
+
test_id="test-123",
|
|
366
|
+
configuration=config,
|
|
367
|
+
metrics=metrics
|
|
368
|
+
)
|
|
369
|
+
|
|
370
|
+
assert result.status == "completed"
|
|
371
|
+
assert result.test_id == "test-123"
|
|
372
|
+
assert result.task == "load_test_orchestrator" # Default value
|
|
373
|
+
assert result.configuration.task_type == LoadTestTypes.CPU_INTENSIVE
|
|
374
|
+
assert result.metrics.tasks_sent == 100
|
|
375
|
+
|
|
376
|
+
def test_invalid_status_values(self):
|
|
377
|
+
"""Test that invalid status values are rejected."""
|
|
378
|
+
config = LoadTestConfiguration(
|
|
379
|
+
task_type="cpu_intensive",
|
|
380
|
+
num_tasks=100,
|
|
381
|
+
batch_size=10,
|
|
382
|
+
target_queue="load_test"
|
|
383
|
+
)
|
|
384
|
+
|
|
385
|
+
metrics = LoadTestMetrics(
|
|
386
|
+
tasks_sent=100,
|
|
387
|
+
tasks_completed=100,
|
|
388
|
+
total_duration_seconds=30.0
|
|
389
|
+
)
|
|
390
|
+
|
|
391
|
+
with pytest.raises(
|
|
392
|
+
ValidationError, match="String should match pattern"
|
|
393
|
+
):
|
|
394
|
+
LoadTestResult(
|
|
395
|
+
status="running", # Not in allowed values
|
|
396
|
+
test_id="test-123",
|
|
397
|
+
configuration=config,
|
|
398
|
+
metrics=metrics
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
def test_status_consistency_validator(self):
|
|
402
|
+
"""Test status consistency validation."""
|
|
403
|
+
config = LoadTestConfiguration(
|
|
404
|
+
task_type="cpu_intensive",
|
|
405
|
+
num_tasks=100,
|
|
406
|
+
batch_size=10,
|
|
407
|
+
target_queue="load_test"
|
|
408
|
+
)
|
|
409
|
+
|
|
410
|
+
# Failed status without error should be rejected
|
|
411
|
+
metrics = LoadTestMetrics(
|
|
412
|
+
tasks_sent=100,
|
|
413
|
+
tasks_completed=0,
|
|
414
|
+
total_duration_seconds=30.0
|
|
415
|
+
)
|
|
416
|
+
|
|
417
|
+
with pytest.raises(
|
|
418
|
+
ValidationError, match="Failed status requires error message"
|
|
419
|
+
):
|
|
420
|
+
LoadTestResult(
|
|
421
|
+
status="failed",
|
|
422
|
+
test_id="test-123",
|
|
423
|
+
configuration=config,
|
|
424
|
+
metrics=metrics
|
|
425
|
+
# Missing error field
|
|
426
|
+
)
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
class TestOrchestratorRawResult:
|
|
430
|
+
"""Test OrchestratorRawResult model and transformation."""
|
|
431
|
+
|
|
432
|
+
def test_valid_raw_result(self):
|
|
433
|
+
"""Test creating valid orchestrator raw result."""
|
|
434
|
+
raw_result = OrchestratorRawResult(
|
|
435
|
+
test_id="test-123",
|
|
436
|
+
task_type="cpu_intensive",
|
|
437
|
+
tasks_sent=100,
|
|
438
|
+
tasks_completed=95,
|
|
439
|
+
tasks_failed=5,
|
|
440
|
+
total_duration_seconds=30.5,
|
|
441
|
+
overall_throughput_per_second=3.1,
|
|
442
|
+
failure_rate_percent=5.0,
|
|
443
|
+
completion_percentage=95.0,
|
|
444
|
+
average_throughput_per_second=3.1,
|
|
445
|
+
monitor_duration_seconds=30.0,
|
|
446
|
+
batch_size=10,
|
|
447
|
+
target_queue="load_test"
|
|
448
|
+
)
|
|
449
|
+
|
|
450
|
+
assert raw_result.test_id == "test-123"
|
|
451
|
+
assert raw_result.task_type == LoadTestTypes.CPU_INTENSIVE.value
|
|
452
|
+
assert raw_result.tasks_sent == 100
|
|
453
|
+
assert raw_result.tasks_completed == 95
|
|
454
|
+
|
|
455
|
+
def test_to_load_test_result_transformation(self):
|
|
456
|
+
"""Test transformation from raw result to LoadTestResult."""
|
|
457
|
+
raw_result = OrchestratorRawResult(
|
|
458
|
+
test_id="test-123",
|
|
459
|
+
task_type="io_simulation",
|
|
460
|
+
tasks_sent=50,
|
|
461
|
+
tasks_completed=48,
|
|
462
|
+
tasks_failed=2,
|
|
463
|
+
total_duration_seconds=15.5,
|
|
464
|
+
overall_throughput_per_second=3.1,
|
|
465
|
+
failure_rate_percent=4.0,
|
|
466
|
+
completion_percentage=96.0,
|
|
467
|
+
average_throughput_per_second=3.1,
|
|
468
|
+
monitor_duration_seconds=15.0,
|
|
469
|
+
batch_size=5,
|
|
470
|
+
target_queue="load_test",
|
|
471
|
+
start_time="2023-01-01T10:00:00",
|
|
472
|
+
end_time="2023-01-01T10:00:15",
|
|
473
|
+
task_ids=["task1", "task2", "task3"]
|
|
474
|
+
)
|
|
475
|
+
|
|
476
|
+
result = raw_result.to_load_test_result()
|
|
477
|
+
|
|
478
|
+
# Check main fields
|
|
479
|
+
assert result.status == "completed"
|
|
480
|
+
assert result.test_id == "test-123"
|
|
481
|
+
assert result.task == "load_test_orchestrator"
|
|
482
|
+
|
|
483
|
+
# Check configuration transformation
|
|
484
|
+
assert result.configuration.task_type == LoadTestTypes.IO_SIMULATION
|
|
485
|
+
assert result.configuration.num_tasks == 50
|
|
486
|
+
assert result.configuration.batch_size == 5
|
|
487
|
+
assert result.configuration.target_queue == "load_test"
|
|
488
|
+
|
|
489
|
+
# Check metrics transformation
|
|
490
|
+
assert result.metrics.tasks_sent == 50
|
|
491
|
+
assert result.metrics.tasks_completed == 48
|
|
492
|
+
assert result.metrics.tasks_failed == 2
|
|
493
|
+
assert result.metrics.total_duration_seconds == 15.5
|
|
494
|
+
assert result.metrics.overall_throughput == 3.1
|
|
495
|
+
assert result.metrics.failure_rate_percent == 4.0
|
|
496
|
+
|
|
497
|
+
# Check optional fields
|
|
498
|
+
assert result.start_time == "2023-01-01T10:00:00"
|
|
499
|
+
assert result.end_time == "2023-01-01T10:00:15"
|
|
500
|
+
assert result.task_ids == ["task1", "task2", "task3"]
|
|
501
|
+
|
|
502
|
+
def test_transformation_with_minimal_data(self):
|
|
503
|
+
"""Test transformation with only required fields."""
|
|
504
|
+
raw_result = OrchestratorRawResult(
|
|
505
|
+
test_id="minimal-test",
|
|
506
|
+
task_type="memory_operations",
|
|
507
|
+
tasks_sent=10,
|
|
508
|
+
tasks_completed=10,
|
|
509
|
+
total_duration_seconds=5.0,
|
|
510
|
+
batch_size=10,
|
|
511
|
+
target_queue="system"
|
|
512
|
+
)
|
|
513
|
+
|
|
514
|
+
result = raw_result.to_load_test_result()
|
|
515
|
+
|
|
516
|
+
assert result.test_id == "minimal-test"
|
|
517
|
+
assert result.configuration.task_type == LoadTestTypes.MEMORY_OPERATIONS
|
|
518
|
+
assert result.metrics.tasks_sent == 10
|
|
519
|
+
assert result.metrics.tasks_completed == 10
|
|
520
|
+
assert result.metrics.tasks_failed == 0 # Default value
|
|
521
|
+
assert result.start_time is None
|
|
522
|
+
assert result.end_time is None
|
|
523
|
+
assert result.task_ids == []
|
|
524
|
+
|
|
525
|
+
|
|
526
|
+
class TestLoadTestError:
|
|
527
|
+
"""Test LoadTestError model validation."""
|
|
528
|
+
|
|
529
|
+
def test_valid_error(self):
|
|
530
|
+
"""Test creating valid load test error."""
|
|
531
|
+
error = LoadTestError(
|
|
532
|
+
status="failed",
|
|
533
|
+
test_id="error-test-123",
|
|
534
|
+
error="Task execution timeout",
|
|
535
|
+
partial_info="Some tasks may have completed",
|
|
536
|
+
tasks_sent=100
|
|
537
|
+
)
|
|
538
|
+
|
|
539
|
+
assert error.task == "load_test_orchestrator" # Default
|
|
540
|
+
assert error.status == "failed"
|
|
541
|
+
assert error.test_id == "error-test-123"
|
|
542
|
+
assert error.error == "Task execution timeout"
|
|
543
|
+
assert error.partial_info == "Some tasks may have completed"
|
|
544
|
+
assert error.tasks_sent == 100
|
|
545
|
+
|
|
546
|
+
def test_invalid_status_values(self):
|
|
547
|
+
"""Test that invalid status values are rejected."""
|
|
548
|
+
with pytest.raises(ValidationError, match="String should match pattern"):
|
|
549
|
+
LoadTestError(
|
|
550
|
+
status="completed", # Should only be failed or timed_out
|
|
551
|
+
test_id="error-test-123",
|
|
552
|
+
error="Some error"
|
|
553
|
+
)
|
|
554
|
+
|
|
555
|
+
def test_required_fields(self):
|
|
556
|
+
"""Test that required fields are validated."""
|
|
557
|
+
# Missing test_id
|
|
558
|
+
with pytest.raises(ValidationError, match="Field required"):
|
|
559
|
+
LoadTestError(
|
|
560
|
+
status="failed",
|
|
561
|
+
error="Some error"
|
|
562
|
+
)
|
|
563
|
+
|
|
564
|
+
# Missing error
|
|
565
|
+
with pytest.raises(ValidationError, match="Field required"):
|
|
566
|
+
LoadTestError(
|
|
567
|
+
status="timed_out",
|
|
568
|
+
test_id="error-test-123"
|
|
569
|
+
)
|
|
570
|
+
|
|
571
|
+
|
|
572
|
+
# Integration test for real-world data shapes
|
|
573
|
+
class TestRealWorldDataShapes:
|
|
574
|
+
"""Test models with real-world data patterns."""
|
|
575
|
+
|
|
576
|
+
def test_typical_successful_load_test_flow(self):
|
|
577
|
+
"""Test the complete flow with typical successful data."""
|
|
578
|
+
# Raw orchestrator result (what comes from Redis)
|
|
579
|
+
raw_data = {
|
|
580
|
+
"test_id": "6273dc3c0a87424e93318244e1baf73b",
|
|
581
|
+
"task_type": "io_simulation",
|
|
582
|
+
"tasks_sent": 10,
|
|
583
|
+
"tasks_completed": 10,
|
|
584
|
+
"tasks_failed": 0,
|
|
585
|
+
"total_duration_seconds": 2.02,
|
|
586
|
+
"overall_throughput_per_second": 4.96,
|
|
587
|
+
"failure_rate_percent": 0.0,
|
|
588
|
+
"completion_percentage": 100.0,
|
|
589
|
+
"average_throughput_per_second": 4.98,
|
|
590
|
+
"monitor_duration_seconds": 2.01,
|
|
591
|
+
"batch_size": 10,
|
|
592
|
+
"delay_ms": 0,
|
|
593
|
+
"target_queue": "load_test",
|
|
594
|
+
"start_time": "2025-08-16T16:07:46.080128",
|
|
595
|
+
"end_time": "2025-08-16T16:07:48.097005",
|
|
596
|
+
"task_ids": [
|
|
597
|
+
"ba4c043531c645f8956616eb60df1cc4",
|
|
598
|
+
"8669123b761c4284a0423ccaa362e0b8"
|
|
599
|
+
]
|
|
600
|
+
}
|
|
601
|
+
|
|
602
|
+
# Validate raw result
|
|
603
|
+
orchestrator_result = OrchestratorRawResult(**raw_data)
|
|
604
|
+
assert orchestrator_result.test_id == "6273dc3c0a87424e93318244e1baf73b"
|
|
605
|
+
|
|
606
|
+
# Transform to standard result
|
|
607
|
+
load_test_result = orchestrator_result.to_load_test_result()
|
|
608
|
+
assert load_test_result.status == "completed"
|
|
609
|
+
assert load_test_result.metrics.tasks_completed == 10
|
|
610
|
+
assert load_test_result.metrics.failure_rate_percent == 0.0
|
|
611
|
+
|
|
612
|
+
# This validates that our Pydantic models handle real Redis data correctly
|
|
613
|
+
|
|
614
|
+
def test_partial_failure_scenario(self):
|
|
615
|
+
"""Test handling of partial failures."""
|
|
616
|
+
raw_data = {
|
|
617
|
+
"test_id": "partial-fail-test",
|
|
618
|
+
"task_type": "cpu_intensive",
|
|
619
|
+
"tasks_sent": 100,
|
|
620
|
+
"tasks_completed": 85,
|
|
621
|
+
"tasks_failed": 15,
|
|
622
|
+
"total_duration_seconds": 45.0,
|
|
623
|
+
"overall_throughput_per_second": 1.89,
|
|
624
|
+
"failure_rate_percent": 15.0,
|
|
625
|
+
"completion_percentage": 85.0,
|
|
626
|
+
"average_throughput_per_second": 1.89,
|
|
627
|
+
"monitor_duration_seconds": 45.0,
|
|
628
|
+
"batch_size": 20,
|
|
629
|
+
"delay_ms": 100,
|
|
630
|
+
"target_queue": "system"
|
|
631
|
+
}
|
|
632
|
+
|
|
633
|
+
# Should validate successfully despite failures
|
|
634
|
+
orchestrator_result = OrchestratorRawResult(**raw_data)
|
|
635
|
+
load_test_result = orchestrator_result.to_load_test_result()
|
|
636
|
+
|
|
637
|
+
assert load_test_result.metrics.tasks_completed == 85
|
|
638
|
+
assert load_test_result.metrics.tasks_failed == 15
|
|
639
|
+
assert load_test_result.metrics.failure_rate_percent == 15.0
|
|
640
|
+
|
|
641
|
+
def test_edge_case_minimum_values(self):
|
|
642
|
+
"""Test edge cases with minimum allowed values."""
|
|
643
|
+
raw_data = {
|
|
644
|
+
"test_id": "minimal",
|
|
645
|
+
"task_type": "cpu_intensive",
|
|
646
|
+
"tasks_sent": 10, # Minimum allowed
|
|
647
|
+
"tasks_completed": 1,
|
|
648
|
+
"tasks_failed": 9,
|
|
649
|
+
"total_duration_seconds": 0.1,
|
|
650
|
+
"overall_throughput_per_second": 10.0,
|
|
651
|
+
"failure_rate_percent": 90.0,
|
|
652
|
+
"completion_percentage": 10.0,
|
|
653
|
+
"average_throughput_per_second": 10.0,
|
|
654
|
+
"monitor_duration_seconds": 0.1,
|
|
655
|
+
"batch_size": 1, # Minimum allowed
|
|
656
|
+
"delay_ms": 0,
|
|
657
|
+
"target_queue": "load_test"
|
|
658
|
+
}
|
|
659
|
+
|
|
660
|
+
orchestrator_result = OrchestratorRawResult(**raw_data)
|
|
661
|
+
load_test_result = orchestrator_result.to_load_test_result()
|
|
662
|
+
|
|
663
|
+
assert load_test_result.configuration.num_tasks == 10
|
|
664
|
+
assert load_test_result.configuration.batch_size == 1
|
|
665
|
+
assert load_test_result.metrics.failure_rate_percent == 90.0
|