aegis-stack 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aegis-stack might be problematic. Click here for more details.
- aegis/__init__.py +5 -0
- aegis/__main__.py +374 -0
- aegis/core/CLAUDE.md +365 -0
- aegis/core/__init__.py +6 -0
- aegis/core/components.py +115 -0
- aegis/core/dependency_resolver.py +119 -0
- aegis/core/template_generator.py +163 -0
- aegis/templates/CLAUDE.md +306 -0
- aegis/templates/cookiecutter-aegis-project/cookiecutter.json +27 -0
- aegis/templates/cookiecutter-aegis-project/hooks/post_gen_project.py +172 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/.dockerignore +71 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/.env.example.j2 +70 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/.gitignore +127 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/Dockerfile +53 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/Makefile +211 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/README.md.j2 +196 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/__init__.py +5 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/cli/__init__.py +6 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/cli/health.py +321 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/cli/load_test.py +638 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/cli/main.py +41 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/__init__.py +0 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/__init__.py +0 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/health.py +134 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/models.py.j2 +247 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/routing.py.j2 +14 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/api/tasks.py.j2 +596 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/hooks.py +133 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/main.py +16 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/middleware/__init__.py +1 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/middleware/cors.py +20 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/shutdown/__init__.py +1 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/shutdown/cleanup.py +14 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/startup/__init__.py +1 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/backend/startup/component_health.py.j2 +190 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/frontend/__init__.py +0 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/frontend/core/__init__.py +1 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/frontend/core/theme.py +46 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/frontend/main.py +687 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/scheduler/__init__.py +1 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/scheduler/main.py +138 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/CLAUDE.md +213 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/__init__.py +6 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/constants.py.j2 +30 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/pools.py +78 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/queues/__init__.py +1 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/queues/load_test.py +48 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/queues/media.py +41 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/queues/system.py +36 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/registry.py +139 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/tasks/__init__.py +119 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/tasks/load_tasks.py +526 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/tasks/simple_system_tasks.py +32 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/components/worker/tasks/system_tasks.py +279 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/core/config.py.j2 +119 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/core/constants.py +60 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/core/db.py +67 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/core/log.py +85 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/entrypoints/__init__.py +1 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/entrypoints/webserver.py +40 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/entrypoints/{% if cookiecutter.include_scheduler == /"yes/" %}scheduler.py{% endif %}" +21 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/integrations/__init__.py +0 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/integrations/main.py +61 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/py.typed +0 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/__init__.py +1 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/load_test.py +661 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/load_test_models.py +269 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/shared/__init__.py +15 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/shared/models.py +26 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/__init__.py +52 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/alerts.py +94 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/health.py.j2 +1105 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/models.py +169 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/app/services/system/ui.py +52 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docker-compose.yml.j2 +195 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/api.md +191 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/components/scheduler.md +414 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/development.md +215 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/health.md +240 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/javascripts/mermaid-config.js +62 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/docs/stylesheets/mermaid.css +95 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/mkdocs.yml.j2 +62 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/pyproject.toml.j2 +156 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/scripts/entrypoint.sh +87 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/scripts/entrypoint.sh.j2 +104 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/scripts/gen_docs.py +16 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/api/__init__.py +1 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/api/test_health_endpoints.py.j2 +239 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/components/test_scheduler.py +76 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/conftest.py.j2 +81 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/__init__.py +1 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_component_integration.py.j2 +376 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_health_logic.py.j2 +633 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_load_test_models.py +665 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_load_test_service.py +602 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_system_service.py +96 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/services/test_worker_health_registration.py.j2 +224 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/tests/test_core.py +50 -0
- aegis/templates/cookiecutter-aegis-project/{{cookiecutter.project_slug}}/uv.lock +1673 -0
- aegis_stack-0.1.0.dist-info/METADATA +114 -0
- aegis_stack-0.1.0.dist-info/RECORD +103 -0
- aegis_stack-0.1.0.dist-info/WHEEL +4 -0
- aegis_stack-0.1.0.dist-info/entry_points.txt +2 -0
|
@@ -0,0 +1,638 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Load testing CLI commands.
|
|
3
|
+
|
|
4
|
+
Provides command-line interface for running and managing load tests,
|
|
5
|
+
with full parameter configuration and result analysis.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
from enum import Enum
|
|
10
|
+
import json
|
|
11
|
+
import time
|
|
12
|
+
from typing import Any
|
|
13
|
+
|
|
14
|
+
from rich import print as rprint
|
|
15
|
+
from rich.console import Console
|
|
16
|
+
from rich.panel import Panel
|
|
17
|
+
from rich.progress import Progress, SpinnerColumn, TextColumn
|
|
18
|
+
from rich.table import Table
|
|
19
|
+
import typer
|
|
20
|
+
|
|
21
|
+
from app.components.worker.constants import LoadTestTypes
|
|
22
|
+
from app.core.config import get_load_test_queue
|
|
23
|
+
from app.core.log import logger
|
|
24
|
+
from app.services.load_test import (
|
|
25
|
+
LoadTestConfiguration,
|
|
26
|
+
LoadTestService,
|
|
27
|
+
quick_cpu_test,
|
|
28
|
+
quick_io_test,
|
|
29
|
+
quick_memory_test,
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
app = typer.Typer(
|
|
33
|
+
name="load-test",
|
|
34
|
+
help="Load testing commands for worker performance analysis",
|
|
35
|
+
no_args_is_help=True,
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
console = Console()
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class QueueChoice(str, Enum):
|
|
42
|
+
"""Available queue types for load testing."""
|
|
43
|
+
load_test = "load_test"
|
|
44
|
+
system = "system" # Legacy option
|
|
45
|
+
media = "media" # Legacy option
|
|
46
|
+
|
|
47
|
+
@classmethod
|
|
48
|
+
def get_default(cls) -> str:
|
|
49
|
+
"""Get the default queue from config."""
|
|
50
|
+
return get_load_test_queue()
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
@app.command("run")
|
|
54
|
+
def run_load_test(
|
|
55
|
+
num_tasks: int = typer.Option(
|
|
56
|
+
100, "--tasks", "-n", help="Number of tasks to spawn", min=10, max=10000
|
|
57
|
+
),
|
|
58
|
+
task_type: LoadTestTypes = typer.Option(
|
|
59
|
+
LoadTestTypes.CPU_INTENSIVE, "--type", "-t", help="Type of load test to run"
|
|
60
|
+
),
|
|
61
|
+
batch_size: int = typer.Option(
|
|
62
|
+
10, "--batch", "-b", help="Tasks per batch", min=1, max=100
|
|
63
|
+
),
|
|
64
|
+
delay_ms: int = typer.Option(
|
|
65
|
+
0, "--delay", "-d", help="Delay between batches (ms)", min=0, max=5000
|
|
66
|
+
),
|
|
67
|
+
target_queue: QueueChoice = typer.Option(
|
|
68
|
+
QueueChoice.load_test, "--queue", "-q", help="Target queue for testing"
|
|
69
|
+
),
|
|
70
|
+
wait: bool = typer.Option(
|
|
71
|
+
True, "--wait/--no-wait", help="Wait for test completion and show results"
|
|
72
|
+
),
|
|
73
|
+
timeout: int = typer.Option(
|
|
74
|
+
600, "--timeout", help="Timeout for waiting (seconds)", min=10, max=3600
|
|
75
|
+
),
|
|
76
|
+
) -> None:
|
|
77
|
+
"""
|
|
78
|
+
Run a customizable load test with specified parameters.
|
|
79
|
+
|
|
80
|
+
This command allows full control over load test configuration including
|
|
81
|
+
task count, type, batching, and queue targeting.
|
|
82
|
+
"""
|
|
83
|
+
|
|
84
|
+
config = LoadTestConfiguration(
|
|
85
|
+
num_tasks=num_tasks,
|
|
86
|
+
task_type=task_type,
|
|
87
|
+
batch_size=batch_size,
|
|
88
|
+
delay_ms=delay_ms,
|
|
89
|
+
target_queue=target_queue.value
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
# Display test configuration
|
|
93
|
+
_display_test_configuration(config, task_type.value)
|
|
94
|
+
|
|
95
|
+
# Enqueue the load test
|
|
96
|
+
rprint("š [bold blue]Starting load test...[/bold blue]")
|
|
97
|
+
|
|
98
|
+
try:
|
|
99
|
+
task_id = asyncio.run(LoadTestService.enqueue_load_test(config))
|
|
100
|
+
rprint("ā
[green]Load test enqueued successfully![/green]")
|
|
101
|
+
rprint(f"š [cyan]Task ID:[/cyan] {task_id}")
|
|
102
|
+
|
|
103
|
+
if wait:
|
|
104
|
+
_wait_for_completion_and_display_results(
|
|
105
|
+
task_id, target_queue.value, timeout
|
|
106
|
+
)
|
|
107
|
+
else:
|
|
108
|
+
rprint("\nš” [yellow]Use this command to check results later:[/yellow]")
|
|
109
|
+
rprint(f" [bold]full-stack load-test results {task_id}[/bold]")
|
|
110
|
+
|
|
111
|
+
except Exception as e:
|
|
112
|
+
rprint(f"ā [red]Failed to start load test:[/red] {e}")
|
|
113
|
+
raise typer.Exit(1)
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
@app.command("cpu")
|
|
117
|
+
def quick_cpu_test_cmd(
|
|
118
|
+
num_tasks: int = typer.Option(
|
|
119
|
+
50, "--tasks", "-n", help="Number of CPU tasks", min=10, max=1000
|
|
120
|
+
),
|
|
121
|
+
wait: bool = typer.Option(True, "--wait/--no-wait", help="Wait for completion"),
|
|
122
|
+
) -> None:
|
|
123
|
+
"""
|
|
124
|
+
Quick CPU-intensive load test with optimized defaults.
|
|
125
|
+
|
|
126
|
+
Tests computational performance with fibonacci calculations,
|
|
127
|
+
mathematical operations, and prime number checking.
|
|
128
|
+
"""
|
|
129
|
+
|
|
130
|
+
rprint("š„ļø [bold blue]Quick CPU Load Test[/bold blue]")
|
|
131
|
+
rprint(f"š [cyan]Tasks:[/cyan] {num_tasks} CPU-intensive tasks")
|
|
132
|
+
rprint("š¢ [cyan]Work type:[/cyan] Fibonacci + math operations + prime checking")
|
|
133
|
+
|
|
134
|
+
try:
|
|
135
|
+
task_id = asyncio.run(quick_cpu_test(num_tasks))
|
|
136
|
+
rprint(f"ā
[green]CPU test started![/green] Task ID: {task_id}")
|
|
137
|
+
|
|
138
|
+
if wait:
|
|
139
|
+
_wait_for_completion_and_display_results(
|
|
140
|
+
task_id, get_load_test_queue(), 600
|
|
141
|
+
)
|
|
142
|
+
else:
|
|
143
|
+
rprint(
|
|
144
|
+
f"š” [yellow]Check results:[/yellow] full-stack load-test results "
|
|
145
|
+
f"{task_id}"
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
except Exception as e:
|
|
149
|
+
rprint(f"ā [red]CPU test failed:[/red] {e}")
|
|
150
|
+
raise typer.Exit(1)
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
@app.command("io")
|
|
154
|
+
def quick_io_test_cmd(
|
|
155
|
+
num_tasks: int = typer.Option(
|
|
156
|
+
100, "--tasks", "-n", help="Number of I/O tasks", min=10, max=1000
|
|
157
|
+
),
|
|
158
|
+
wait: bool = typer.Option(True, "--wait/--no-wait", help="Wait for completion"),
|
|
159
|
+
) -> None:
|
|
160
|
+
"""
|
|
161
|
+
Quick I/O simulation load test with optimized defaults.
|
|
162
|
+
|
|
163
|
+
Tests async I/O performance with simulated network delays,
|
|
164
|
+
concurrent operations, and file I/O patterns.
|
|
165
|
+
"""
|
|
166
|
+
|
|
167
|
+
rprint("š¾ [bold blue]Quick I/O Load Test[/bold blue]")
|
|
168
|
+
rprint(f"š [cyan]Tasks:[/cyan] {num_tasks} I/O simulation tasks")
|
|
169
|
+
rprint(
|
|
170
|
+
"š [cyan]Work type:[/cyan] Network delays + concurrent async + file operations"
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
try:
|
|
174
|
+
task_id = asyncio.run(quick_io_test(num_tasks))
|
|
175
|
+
rprint(f"ā
[green]I/O test started![/green] Task ID: {task_id}")
|
|
176
|
+
|
|
177
|
+
if wait:
|
|
178
|
+
_wait_for_completion_and_display_results(
|
|
179
|
+
task_id, get_load_test_queue(), 600
|
|
180
|
+
)
|
|
181
|
+
else:
|
|
182
|
+
rprint(
|
|
183
|
+
f"š” [yellow]Check results:[/yellow] full-stack load-test results "
|
|
184
|
+
f"{task_id}"
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
except Exception as e:
|
|
188
|
+
rprint(f"ā [red]I/O test failed:[/red] {e}")
|
|
189
|
+
raise typer.Exit(1)
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
@app.command("memory")
|
|
193
|
+
def quick_memory_test_cmd(
|
|
194
|
+
num_tasks: int = typer.Option(
|
|
195
|
+
200, "--tasks", "-n", help="Number of memory tasks", min=10, max=1000
|
|
196
|
+
),
|
|
197
|
+
wait: bool = typer.Option(True, "--wait/--no-wait", help="Wait for completion"),
|
|
198
|
+
) -> None:
|
|
199
|
+
"""
|
|
200
|
+
Quick memory allocation load test with optimized defaults.
|
|
201
|
+
|
|
202
|
+
Tests memory performance with data structure allocation,
|
|
203
|
+
manipulation, and garbage collection patterns.
|
|
204
|
+
"""
|
|
205
|
+
|
|
206
|
+
rprint("š§ [bold blue]Quick Memory Load Test[/bold blue]")
|
|
207
|
+
rprint(f"š [cyan]Tasks:[/cyan] {num_tasks} memory allocation tasks")
|
|
208
|
+
rprint(
|
|
209
|
+
"š [cyan]Work type:[/cyan] Data structures + allocation patterns + GC testing"
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
try:
|
|
213
|
+
task_id = asyncio.run(quick_memory_test(num_tasks))
|
|
214
|
+
rprint(f"ā
[green]Memory test started![/green] Task ID: {task_id}")
|
|
215
|
+
|
|
216
|
+
if wait:
|
|
217
|
+
_wait_for_completion_and_display_results(
|
|
218
|
+
task_id, get_load_test_queue(), 600
|
|
219
|
+
)
|
|
220
|
+
else:
|
|
221
|
+
rprint(
|
|
222
|
+
f"š” [yellow]Check results:[/yellow] full-stack load-test results "
|
|
223
|
+
f"{task_id}"
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
except Exception as e:
|
|
227
|
+
rprint(f"ā [red]Memory test failed:[/red] {e}")
|
|
228
|
+
raise typer.Exit(1)
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
@app.command("results")
|
|
232
|
+
def show_results(
|
|
233
|
+
task_id: str = typer.Argument(..., help="Load test task ID"),
|
|
234
|
+
target_queue: QueueChoice = typer.Option(
|
|
235
|
+
QueueChoice.system, "--queue", "-q", help="Queue where test was run"
|
|
236
|
+
),
|
|
237
|
+
detailed: bool = typer.Option(
|
|
238
|
+
False, "--detailed", "-d", help="Show detailed analysis and metrics"
|
|
239
|
+
),
|
|
240
|
+
json_output: bool = typer.Option(
|
|
241
|
+
False, "--json", help="Output results in JSON format"
|
|
242
|
+
),
|
|
243
|
+
) -> None:
|
|
244
|
+
"""
|
|
245
|
+
Display results and analysis for a completed load test.
|
|
246
|
+
|
|
247
|
+
Retrieves and analyzes load test results, showing performance metrics,
|
|
248
|
+
test type verification, and recommendations.
|
|
249
|
+
"""
|
|
250
|
+
|
|
251
|
+
try:
|
|
252
|
+
result = asyncio.run(
|
|
253
|
+
LoadTestService.get_load_test_result(task_id, target_queue.value)
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
if not result:
|
|
257
|
+
rprint(f"ā [red]No results found for task ID:[/red] {task_id}")
|
|
258
|
+
rprint(
|
|
259
|
+
"š” [yellow]Check that the task ID is correct and the test has "
|
|
260
|
+
"completed[/yellow]"
|
|
261
|
+
)
|
|
262
|
+
raise typer.Exit(1)
|
|
263
|
+
|
|
264
|
+
if json_output:
|
|
265
|
+
print(json.dumps(result, indent=2, default=str))
|
|
266
|
+
return
|
|
267
|
+
|
|
268
|
+
_display_load_test_results(result, detailed)
|
|
269
|
+
|
|
270
|
+
except Exception as e:
|
|
271
|
+
rprint(f"ā [red]Failed to retrieve results:[/red] {e}")
|
|
272
|
+
raise typer.Exit(1)
|
|
273
|
+
|
|
274
|
+
|
|
275
|
+
@app.command("info")
|
|
276
|
+
def show_test_type_info(
|
|
277
|
+
test_type: str | None = typer.Argument(
|
|
278
|
+
None, help="Specific test type to show info for"
|
|
279
|
+
),
|
|
280
|
+
) -> None:
|
|
281
|
+
"""
|
|
282
|
+
Display information about available load test types and their characteristics.
|
|
283
|
+
|
|
284
|
+
Shows what each test type does, expected performance signatures,
|
|
285
|
+
and guidance for choosing the right test for your needs.
|
|
286
|
+
"""
|
|
287
|
+
|
|
288
|
+
if test_type:
|
|
289
|
+
# Show detailed info for specific test type
|
|
290
|
+
try:
|
|
291
|
+
test_type_enum = LoadTestTypes(test_type)
|
|
292
|
+
info = LoadTestService.get_test_type_info(test_type_enum)
|
|
293
|
+
_display_test_type_info(test_type, info)
|
|
294
|
+
except ValueError:
|
|
295
|
+
rprint(f"ā [red]Unknown test type:[/red] {test_type}")
|
|
296
|
+
rprint(
|
|
297
|
+
"š” [yellow]Available types:[/yellow] cpu_intensive, io_simulation, "
|
|
298
|
+
"memory_operations, failure_testing"
|
|
299
|
+
)
|
|
300
|
+
raise typer.Exit(1)
|
|
301
|
+
else:
|
|
302
|
+
# Show overview of all test types
|
|
303
|
+
rprint("š§Ŗ [bold blue]Available Load Test Types[/bold blue]\n")
|
|
304
|
+
|
|
305
|
+
for load_test_type in [
|
|
306
|
+
LoadTestTypes.CPU_INTENSIVE,
|
|
307
|
+
LoadTestTypes.IO_SIMULATION,
|
|
308
|
+
LoadTestTypes.MEMORY_OPERATIONS,
|
|
309
|
+
LoadTestTypes.FAILURE_TESTING,
|
|
310
|
+
]:
|
|
311
|
+
info = LoadTestService.get_test_type_info(load_test_type)
|
|
312
|
+
rprint(
|
|
313
|
+
f"š¹ [bold cyan]{load_test_type}[/bold cyan] - "
|
|
314
|
+
f"{info.get('name', 'Unknown')}"
|
|
315
|
+
)
|
|
316
|
+
rprint(f" {info.get('description', 'No description available')}")
|
|
317
|
+
rprint(
|
|
318
|
+
f" [dim]Typical duration: "
|
|
319
|
+
f"{info.get('typical_duration_ms', 'Unknown')}[/dim]\n"
|
|
320
|
+
)
|
|
321
|
+
|
|
322
|
+
rprint("š” [yellow]Use --help with specific commands for more details[/yellow]")
|
|
323
|
+
rprint("š [cyan]Examples:[/cyan]")
|
|
324
|
+
rprint(" full-stack load-test info cpu_intensive")
|
|
325
|
+
rprint(" full-stack load-test info io_simulation")
|
|
326
|
+
rprint(" full-stack load-test info memory_operations")
|
|
327
|
+
|
|
328
|
+
|
|
329
|
+
def _display_test_configuration(config: LoadTestConfiguration, test_type: str) -> None:
|
|
330
|
+
"""Display load test configuration in a formatted panel."""
|
|
331
|
+
|
|
332
|
+
test_info = LoadTestService.get_test_type_info(LoadTestTypes(test_type))
|
|
333
|
+
|
|
334
|
+
config_text = f"""
|
|
335
|
+
[bold cyan]Test Configuration[/bold cyan]
|
|
336
|
+
|
|
337
|
+
š¢ [cyan]Tasks:[/cyan] {config.num_tasks} {test_type} tasks
|
|
338
|
+
š¦ [cyan]Batching:[/cyan] {config.batch_size} tasks per batch
|
|
339
|
+
ā±ļø [cyan]Delay:[/cyan] {config.delay_ms}ms between batches
|
|
340
|
+
šÆ [cyan]Queue:[/cyan] {config.target_queue}
|
|
341
|
+
|
|
342
|
+
[bold yellow]Test Type Details[/bold yellow]
|
|
343
|
+
š [yellow]Name:[/yellow] {test_info.get('name', 'Unknown')}
|
|
344
|
+
š [yellow]Description:[/yellow] {test_info.get('description', 'No description')}
|
|
345
|
+
ā” [yellow]Performance:[/yellow] {test_info.get('performance_signature', 'Unknown')}
|
|
346
|
+
š [yellow]Concurrency:[/yellow] {test_info.get('concurrency_impact', 'Unknown')}
|
|
347
|
+
""".strip()
|
|
348
|
+
|
|
349
|
+
console.print(Panel(config_text, title="š Load Test Setup", border_style="blue"))
|
|
350
|
+
|
|
351
|
+
|
|
352
|
+
def _display_test_type_info(test_type: str, info: dict[str, Any]) -> None:
|
|
353
|
+
"""Display detailed information about a specific test type."""
|
|
354
|
+
|
|
355
|
+
info_text = f"""
|
|
356
|
+
[bold cyan]{info.get('name', 'Unknown Test Type')}[/bold cyan]
|
|
357
|
+
|
|
358
|
+
[bold yellow]Description[/bold yellow]
|
|
359
|
+
{info.get('description', 'No description available')}
|
|
360
|
+
|
|
361
|
+
[bold yellow]Performance Characteristics[/bold yellow]
|
|
362
|
+
š [yellow]Signature:[/yellow] {info.get('performance_signature', 'Unknown')}
|
|
363
|
+
ā±ļø [yellow]Duration:[/yellow] {info.get('typical_duration_ms', 'Unknown')}
|
|
364
|
+
š [yellow]Concurrency:[/yellow] {info.get('concurrency_impact', 'Unknown')}
|
|
365
|
+
|
|
366
|
+
[bold yellow]Expected Metrics[/bold yellow]
|
|
367
|
+
š {', '.join(info.get('expected_metrics', ['No metrics specified']))}
|
|
368
|
+
|
|
369
|
+
[bold yellow]Validation Keys[/bold yellow]
|
|
370
|
+
š {', '.join(info.get('validation_keys', ['No validation keys']))}
|
|
371
|
+
""".strip()
|
|
372
|
+
|
|
373
|
+
console.print(
|
|
374
|
+
Panel(info_text, title=f"š§Ŗ {test_type} Test Type", border_style="green")
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
|
|
378
|
+
def _wait_for_completion_and_display_results(
|
|
379
|
+
task_id: str, target_queue: str, timeout: int
|
|
380
|
+
) -> None:
|
|
381
|
+
"""Wait for load test completion and display results."""
|
|
382
|
+
|
|
383
|
+
rprint(
|
|
384
|
+
f"\nā³ [yellow]Waiting for load test completion (timeout: {timeout}s)..."
|
|
385
|
+
f"[/yellow]"
|
|
386
|
+
)
|
|
387
|
+
|
|
388
|
+
start_time = time.time()
|
|
389
|
+
|
|
390
|
+
with Progress(
|
|
391
|
+
SpinnerColumn(),
|
|
392
|
+
TextColumn("[progress.description]{task.description}"),
|
|
393
|
+
console=console,
|
|
394
|
+
) as progress:
|
|
395
|
+
|
|
396
|
+
wait_task = progress.add_task(
|
|
397
|
+
"Waiting for load test to complete...", total=None
|
|
398
|
+
)
|
|
399
|
+
|
|
400
|
+
while True:
|
|
401
|
+
elapsed = time.time() - start_time
|
|
402
|
+
|
|
403
|
+
if elapsed > timeout:
|
|
404
|
+
progress.update(wait_task, description="ā Timeout reached")
|
|
405
|
+
rprint(f"\nā° [red]Timeout reached after {timeout}s[/red]")
|
|
406
|
+
rprint(
|
|
407
|
+
f"š” [yellow]Check results manually:[/yellow] "
|
|
408
|
+
f"full-stack load-test results {task_id}"
|
|
409
|
+
)
|
|
410
|
+
return
|
|
411
|
+
|
|
412
|
+
try:
|
|
413
|
+
result = asyncio.run(
|
|
414
|
+
LoadTestService.get_load_test_result(task_id, target_queue)
|
|
415
|
+
)
|
|
416
|
+
|
|
417
|
+
if result:
|
|
418
|
+
progress.update(wait_task, description="ā
Load test completed!")
|
|
419
|
+
break
|
|
420
|
+
|
|
421
|
+
except Exception as e:
|
|
422
|
+
logger.debug(f"Error checking results: {e}")
|
|
423
|
+
|
|
424
|
+
# Update progress description with elapsed time
|
|
425
|
+
progress.update(
|
|
426
|
+
wait_task, description=f"Waiting for completion... ({elapsed:.1f}s)"
|
|
427
|
+
)
|
|
428
|
+
time.sleep(2)
|
|
429
|
+
|
|
430
|
+
rprint("\nš [bold green]Load test completed![/bold green]")
|
|
431
|
+
_display_load_test_results(result, detailed=True)
|
|
432
|
+
|
|
433
|
+
|
|
434
|
+
def _display_load_test_results(result: dict[str, Any], detailed: bool = False) -> None:
|
|
435
|
+
"""Display formatted load test results."""
|
|
436
|
+
|
|
437
|
+
# Basic results
|
|
438
|
+
status = result.get("status", "unknown")
|
|
439
|
+
|
|
440
|
+
# Handle timeout and failure cases
|
|
441
|
+
if status in ["timed_out", "failed"]:
|
|
442
|
+
_display_error_result(result, status)
|
|
443
|
+
return
|
|
444
|
+
|
|
445
|
+
# Handle successful results
|
|
446
|
+
metrics = result.get("metrics", {})
|
|
447
|
+
|
|
448
|
+
# Create summary table for successful results
|
|
449
|
+
table = Table(
|
|
450
|
+
title="š Load Test Results Summary",
|
|
451
|
+
show_header=True,
|
|
452
|
+
header_style="bold magenta",
|
|
453
|
+
)
|
|
454
|
+
table.add_column("Metric", style="cyan", no_wrap=True)
|
|
455
|
+
table.add_column("Value", style="green")
|
|
456
|
+
table.add_column("Details", style="dim")
|
|
457
|
+
|
|
458
|
+
# Add basic metrics
|
|
459
|
+
table.add_row("Status", status, "Test execution status")
|
|
460
|
+
table.add_row(
|
|
461
|
+
"Tasks Sent", str(metrics.get("tasks_sent", "unknown")), "Total tasks enqueued"
|
|
462
|
+
)
|
|
463
|
+
table.add_row(
|
|
464
|
+
"Tasks Completed",
|
|
465
|
+
str(metrics.get("tasks_completed", "unknown")),
|
|
466
|
+
"Successfully completed",
|
|
467
|
+
)
|
|
468
|
+
table.add_row(
|
|
469
|
+
"Tasks Failed",
|
|
470
|
+
str(metrics.get("tasks_failed", "unknown")),
|
|
471
|
+
"Failed during execution",
|
|
472
|
+
)
|
|
473
|
+
table.add_row(
|
|
474
|
+
"Duration",
|
|
475
|
+
f"{metrics.get('total_duration_seconds', 0):.2f}s",
|
|
476
|
+
"Total test duration",
|
|
477
|
+
)
|
|
478
|
+
table.add_row(
|
|
479
|
+
"Throughput",
|
|
480
|
+
f"{metrics.get('overall_throughput', 0):.2f} tasks/sec",
|
|
481
|
+
"Average task completion rate",
|
|
482
|
+
)
|
|
483
|
+
table.add_row(
|
|
484
|
+
"Failure Rate",
|
|
485
|
+
f"{metrics.get('failure_rate_percent', 0):.1f}%",
|
|
486
|
+
"Percentage of failed tasks",
|
|
487
|
+
)
|
|
488
|
+
|
|
489
|
+
console.print(table)
|
|
490
|
+
|
|
491
|
+
# Show performance analysis if available
|
|
492
|
+
analysis = result.get("analysis", {})
|
|
493
|
+
if analysis and detailed:
|
|
494
|
+
_display_performance_analysis(analysis)
|
|
495
|
+
|
|
496
|
+
# Show performance summary
|
|
497
|
+
perf_summary = result.get("performance_summary", "No summary available")
|
|
498
|
+
console.print("\nš” [bold yellow]Performance Summary[/bold yellow]")
|
|
499
|
+
console.print(f" {perf_summary}")
|
|
500
|
+
|
|
501
|
+
# Show recommendations if available
|
|
502
|
+
recommendations = analysis.get("recommendations", []) if analysis else []
|
|
503
|
+
if recommendations:
|
|
504
|
+
console.print("\nš§ [bold blue]Recommendations[/bold blue]")
|
|
505
|
+
for i, rec in enumerate(recommendations, 1):
|
|
506
|
+
console.print(f" {i}. {rec}")
|
|
507
|
+
|
|
508
|
+
|
|
509
|
+
def _display_error_result(result: dict[str, Any], status: str) -> None:
|
|
510
|
+
"""Display results for failed or timed out load tests."""
|
|
511
|
+
|
|
512
|
+
test_id = result.get("test_id", "unknown")
|
|
513
|
+
error = result.get("error", "Unknown error")
|
|
514
|
+
partial_info = result.get("partial_info", "")
|
|
515
|
+
|
|
516
|
+
if status == "timed_out":
|
|
517
|
+
console.print(Panel(
|
|
518
|
+
f"[bold red]ā° Load Test Timed Out[/bold red]\n\n"
|
|
519
|
+
f"[cyan]Test ID:[/cyan] {test_id}\n"
|
|
520
|
+
f"[cyan]Error:[/cyan] {error}\n\n"
|
|
521
|
+
f"[yellow]What this means:[/yellow]\n"
|
|
522
|
+
f"The orchestrator task timed out, but individual worker tasks "
|
|
523
|
+
f"may have completed successfully.\n"
|
|
524
|
+
f"This often happens with large load tests that exceed the "
|
|
525
|
+
f"queue timeout.\n\n"
|
|
526
|
+
f"[blue]To investigate:[/blue]\n"
|
|
527
|
+
f"⢠Check worker logs for individual task completion\n"
|
|
528
|
+
f"⢠Consider using smaller batch sizes for large tests\n"
|
|
529
|
+
f"⢠Check queue metrics for actual task completion counts",
|
|
530
|
+
title="š Load Test Analysis",
|
|
531
|
+
border_style="red"
|
|
532
|
+
))
|
|
533
|
+
|
|
534
|
+
if partial_info:
|
|
535
|
+
console.print(f"\nš [dim]{partial_info}[/dim]")
|
|
536
|
+
|
|
537
|
+
elif status == "failed":
|
|
538
|
+
console.print(Panel(
|
|
539
|
+
f"[bold red]ā Load Test Failed[/bold red]\n\n"
|
|
540
|
+
f"[cyan]Test ID:[/cyan] {test_id}\n"
|
|
541
|
+
f"[cyan]Error:[/cyan] {error}\n\n"
|
|
542
|
+
f"[blue]Next steps:[/blue]\n"
|
|
543
|
+
f"⢠Check worker logs for detailed error information\n"
|
|
544
|
+
f"⢠Verify queue connectivity and worker status\n"
|
|
545
|
+
f"⢠Try a smaller test to isolate the issue",
|
|
546
|
+
title="š Load Test Analysis",
|
|
547
|
+
border_style="red"
|
|
548
|
+
))
|
|
549
|
+
|
|
550
|
+
# Show basic troubleshooting info
|
|
551
|
+
console.print("\nš” [bold yellow]Troubleshooting Tips[/bold yellow]")
|
|
552
|
+
console.print(" 1. Check worker container logs: docker compose logs worker")
|
|
553
|
+
console.print(" 2. Verify system health: full-stack health check")
|
|
554
|
+
console.print(
|
|
555
|
+
" 3. Try a smaller load test first: full-stack load-test cpu --tasks 10"
|
|
556
|
+
)
|
|
557
|
+
|
|
558
|
+
|
|
559
|
+
def _display_performance_analysis(analysis: dict[str, Any]) -> None:
|
|
560
|
+
"""Display detailed performance analysis."""
|
|
561
|
+
|
|
562
|
+
perf_analysis = analysis.get("performance_analysis", {})
|
|
563
|
+
validation = analysis.get("validation_status", {})
|
|
564
|
+
|
|
565
|
+
# Performance ratings table
|
|
566
|
+
perf_table = Table(
|
|
567
|
+
title="šÆ Performance Analysis",
|
|
568
|
+
show_header=True,
|
|
569
|
+
header_style="bold blue",
|
|
570
|
+
)
|
|
571
|
+
perf_table.add_column("Aspect", style="cyan")
|
|
572
|
+
perf_table.add_column("Rating", style="bold")
|
|
573
|
+
perf_table.add_column("Description", style="dim")
|
|
574
|
+
|
|
575
|
+
# Add performance ratings with color coding
|
|
576
|
+
throughput_rating = perf_analysis.get("throughput_rating", "unknown")
|
|
577
|
+
throughput_color = _get_rating_color(throughput_rating)
|
|
578
|
+
perf_table.add_row(
|
|
579
|
+
"Throughput",
|
|
580
|
+
f"[{throughput_color}]{throughput_rating}[/{throughput_color}]",
|
|
581
|
+
"Task processing speed",
|
|
582
|
+
)
|
|
583
|
+
|
|
584
|
+
efficiency_rating = perf_analysis.get("efficiency_rating", "unknown")
|
|
585
|
+
efficiency_color = _get_rating_color(efficiency_rating)
|
|
586
|
+
perf_table.add_row(
|
|
587
|
+
"Efficiency",
|
|
588
|
+
f"[{efficiency_color}]{efficiency_rating}[/{efficiency_color}]",
|
|
589
|
+
"Task completion rate",
|
|
590
|
+
)
|
|
591
|
+
|
|
592
|
+
queue_pressure = perf_analysis.get("queue_pressure", "unknown")
|
|
593
|
+
pressure_color = (
|
|
594
|
+
"red"
|
|
595
|
+
if queue_pressure == "high"
|
|
596
|
+
else "yellow"
|
|
597
|
+
if queue_pressure == "medium"
|
|
598
|
+
else "green"
|
|
599
|
+
)
|
|
600
|
+
perf_table.add_row(
|
|
601
|
+
"Queue Pressure",
|
|
602
|
+
f"[{pressure_color}]{queue_pressure}[/{pressure_color}]",
|
|
603
|
+
"Queue saturation level",
|
|
604
|
+
)
|
|
605
|
+
|
|
606
|
+
console.print(perf_table)
|
|
607
|
+
|
|
608
|
+
# Validation status
|
|
609
|
+
if validation:
|
|
610
|
+
console.print("\nā
[bold green]Test Validation[/bold green]")
|
|
611
|
+
console.print(
|
|
612
|
+
f" š Test type verified: "
|
|
613
|
+
f"{validation.get('test_type_verified', 'unknown')}"
|
|
614
|
+
)
|
|
615
|
+
console.print(
|
|
616
|
+
f" š Expected metrics present: "
|
|
617
|
+
f"{validation.get('expected_metrics_present', 'unknown')}"
|
|
618
|
+
)
|
|
619
|
+
console.print(
|
|
620
|
+
f" š Performance signature match: "
|
|
621
|
+
f"{validation.get('performance_signature_match', 'unknown')}"
|
|
622
|
+
)
|
|
623
|
+
|
|
624
|
+
|
|
625
|
+
def _get_rating_color(rating: str) -> str:
|
|
626
|
+
"""Get color for performance rating."""
|
|
627
|
+
color_map = {
|
|
628
|
+
"excellent": "green",
|
|
629
|
+
"good": "blue",
|
|
630
|
+
"fair": "yellow",
|
|
631
|
+
"poor": "red",
|
|
632
|
+
"unknown": "dim"
|
|
633
|
+
}
|
|
634
|
+
return color_map.get(rating, "dim")
|
|
635
|
+
|
|
636
|
+
|
|
637
|
+
if __name__ == "__main__":
|
|
638
|
+
app()
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Main CLI application entry point.
|
|
3
|
+
|
|
4
|
+
Command-line interface for full-stack management tasks.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import importlib
|
|
8
|
+
from typing import TYPE_CHECKING
|
|
9
|
+
|
|
10
|
+
import typer
|
|
11
|
+
|
|
12
|
+
from app.cli import health
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from app.cli import load_test
|
|
16
|
+
|
|
17
|
+
app = typer.Typer(
|
|
18
|
+
name="full-stack",
|
|
19
|
+
help="full-stack management CLI",
|
|
20
|
+
no_args_is_help=True,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
# Register sub-commands
|
|
24
|
+
app.add_typer(health.app, name="health")
|
|
25
|
+
|
|
26
|
+
# Conditionally register load-test command if worker components are available
|
|
27
|
+
try:
|
|
28
|
+
load_test_module = importlib.import_module("app.cli.load_test")
|
|
29
|
+
app.add_typer(load_test_module.app, name="load-test")
|
|
30
|
+
except ImportError:
|
|
31
|
+
# Worker components not available, skip load-test commands
|
|
32
|
+
pass
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def main() -> None:
|
|
36
|
+
"""Entry point for the CLI application."""
|
|
37
|
+
app()
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
if __name__ == "__main__":
|
|
41
|
+
main()
|
|
File without changes
|