ai-testing-swarm 0.1.12__tar.gz → 0.1.14__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ai_testing_swarm-0.1.12/src/ai_testing_swarm.egg-info → ai_testing_swarm-0.1.14}/PKG-INFO +20 -4
- ai_testing_swarm-0.1.12/PKG-INFO → ai_testing_swarm-0.1.14/README.md +17 -12
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/pyproject.toml +6 -1
- ai_testing_swarm-0.1.14/src/ai_testing_swarm/__init__.py +1 -0
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/agents/execution_agent.py +23 -0
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/agents/llm_reasoning_agent.py +31 -1
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/agents/release_gate_agent.py +5 -0
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/cli.py +17 -2
- ai_testing_swarm-0.1.14/src/ai_testing_swarm/core/openapi_validator.py +157 -0
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/orchestrator.py +16 -5
- ai_testing_swarm-0.1.14/src/ai_testing_swarm/reporting/report_writer.py +327 -0
- ai_testing_swarm-0.1.12/README.md → ai_testing_swarm-0.1.14/src/ai_testing_swarm.egg-info/PKG-INFO +28 -3
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm.egg-info/SOURCES.txt +3 -0
- ai_testing_swarm-0.1.14/src/ai_testing_swarm.egg-info/requires.txt +3 -0
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/tests/test_openapi_loader.py +1 -0
- ai_testing_swarm-0.1.14/tests/test_openapi_validator.py +77 -0
- ai_testing_swarm-0.1.12/src/ai_testing_swarm/__init__.py +0 -1
- ai_testing_swarm-0.1.12/src/ai_testing_swarm/reporting/report_writer.py +0 -147
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/setup.cfg +0 -0
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/agents/__init__.py +0 -0
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/agents/learning_agent.py +0 -0
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/agents/test_planner_agent.py +0 -0
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/agents/test_writer_agent.py +0 -0
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/agents/ui_agent.py +0 -0
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/core/__init__.py +0 -0
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/core/api_client.py +0 -0
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/core/config.py +0 -0
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/core/curl_parser.py +0 -0
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/core/openai_client.py +0 -0
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/core/openapi_loader.py +0 -0
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/core/safety.py +0 -0
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/reporting/__init__.py +0 -0
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm.egg-info/dependency_links.txt +0 -0
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm.egg-info/entry_points.txt +0 -0
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm.egg-info/top_level.txt +0 -0
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/tests/test_policy_expected_negatives.py +0 -0
- {ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/tests/test_swarm_api.py +0 -0
|
@@ -1,17 +1,19 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ai-testing-swarm
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.14
|
|
4
4
|
Summary: AI-powered testing swarm
|
|
5
5
|
Author-email: Arif Shah <ashah7775@gmail.com>
|
|
6
6
|
License: MIT
|
|
7
7
|
Requires-Python: >=3.9
|
|
8
8
|
Description-Content-Type: text/markdown
|
|
9
|
+
Provides-Extra: openapi
|
|
10
|
+
Requires-Dist: jsonschema>=4.0; extra == "openapi"
|
|
9
11
|
|
|
10
12
|
# AI Testing Swarm
|
|
11
13
|
|
|
12
14
|
AI Testing Swarm is a **super-advanced, mutation-driven API testing framework** (with optional OpenAPI + OpenAI augmentation) built on top of **pytest**.
|
|
13
15
|
|
|
14
|
-
It generates a large set of deterministic negative/edge/security test cases for an API request, executes them (optionally in parallel, with retries/throttling), and produces a JSON
|
|
16
|
+
It generates a large set of deterministic negative/edge/security test cases for an API request, executes them (optionally in parallel, with retries/throttling), and produces a report (JSON/Markdown/HTML) with summaries.
|
|
15
17
|
|
|
16
18
|
> Notes:
|
|
17
19
|
> - UI testing is not the focus of the current releases.
|
|
@@ -25,6 +27,12 @@ It generates a large set of deterministic negative/edge/security test cases for
|
|
|
25
27
|
pip install ai-testing-swarm
|
|
26
28
|
```
|
|
27
29
|
|
|
30
|
+
Optional (OpenAPI JSON schema validation for responses):
|
|
31
|
+
|
|
32
|
+
```bash
|
|
33
|
+
pip install "ai-testing-swarm[openapi]"
|
|
34
|
+
```
|
|
35
|
+
|
|
28
36
|
CLI entrypoint:
|
|
29
37
|
|
|
30
38
|
```bash
|
|
@@ -49,9 +57,15 @@ Run:
|
|
|
49
57
|
ai-test --input request.json
|
|
50
58
|
```
|
|
51
59
|
|
|
52
|
-
|
|
60
|
+
Choose a report format:
|
|
61
|
+
|
|
62
|
+
```bash
|
|
63
|
+
ai-test --input request.json --report-format html
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
A report is written under:
|
|
53
67
|
|
|
54
|
-
- `./ai_swarm_reports/<METHOD>_<endpoint>/<METHOD>_<endpoint>_<timestamp
|
|
68
|
+
- `./ai_swarm_reports/<METHOD>_<endpoint>/<METHOD>_<endpoint>_<timestamp>.<json|md|html>`
|
|
55
69
|
|
|
56
70
|
Reports include:
|
|
57
71
|
- per-test results
|
|
@@ -97,6 +111,8 @@ Reports include:
|
|
|
97
111
|
- OpenAPI **JSON** works by default.
|
|
98
112
|
- OpenAPI **YAML** requires `PyYAML` installed.
|
|
99
113
|
- Base URL is read from `spec.servers[0].url`.
|
|
114
|
+
- When using OpenAPI input, the swarm will also *optionally* validate response status codes against `operation.responses`.
|
|
115
|
+
- If `jsonschema` is installed (via `ai-testing-swarm[openapi]`) and the response is JSON, response bodies are validated against the OpenAPI `application/json` schema.
|
|
100
116
|
- Override with `AI_SWARM_OPENAPI_BASE_URL` if your spec doesn’t include servers.
|
|
101
117
|
|
|
102
118
|
---
|
|
@@ -1,17 +1,8 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: ai-testing-swarm
|
|
3
|
-
Version: 0.1.12
|
|
4
|
-
Summary: AI-powered testing swarm
|
|
5
|
-
Author-email: Arif Shah <ashah7775@gmail.com>
|
|
6
|
-
License: MIT
|
|
7
|
-
Requires-Python: >=3.9
|
|
8
|
-
Description-Content-Type: text/markdown
|
|
9
|
-
|
|
10
1
|
# AI Testing Swarm
|
|
11
2
|
|
|
12
3
|
AI Testing Swarm is a **super-advanced, mutation-driven API testing framework** (with optional OpenAPI + OpenAI augmentation) built on top of **pytest**.
|
|
13
4
|
|
|
14
|
-
It generates a large set of deterministic negative/edge/security test cases for an API request, executes them (optionally in parallel, with retries/throttling), and produces a JSON
|
|
5
|
+
It generates a large set of deterministic negative/edge/security test cases for an API request, executes them (optionally in parallel, with retries/throttling), and produces a report (JSON/Markdown/HTML) with summaries.
|
|
15
6
|
|
|
16
7
|
> Notes:
|
|
17
8
|
> - UI testing is not the focus of the current releases.
|
|
@@ -25,6 +16,12 @@ It generates a large set of deterministic negative/edge/security test cases for
|
|
|
25
16
|
pip install ai-testing-swarm
|
|
26
17
|
```
|
|
27
18
|
|
|
19
|
+
Optional (OpenAPI JSON schema validation for responses):
|
|
20
|
+
|
|
21
|
+
```bash
|
|
22
|
+
pip install "ai-testing-swarm[openapi]"
|
|
23
|
+
```
|
|
24
|
+
|
|
28
25
|
CLI entrypoint:
|
|
29
26
|
|
|
30
27
|
```bash
|
|
@@ -49,9 +46,15 @@ Run:
|
|
|
49
46
|
ai-test --input request.json
|
|
50
47
|
```
|
|
51
48
|
|
|
52
|
-
|
|
49
|
+
Choose a report format:
|
|
50
|
+
|
|
51
|
+
```bash
|
|
52
|
+
ai-test --input request.json --report-format html
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
A report is written under:
|
|
53
56
|
|
|
54
|
-
- `./ai_swarm_reports/<METHOD>_<endpoint>/<METHOD>_<endpoint>_<timestamp
|
|
57
|
+
- `./ai_swarm_reports/<METHOD>_<endpoint>/<METHOD>_<endpoint>_<timestamp>.<json|md|html>`
|
|
55
58
|
|
|
56
59
|
Reports include:
|
|
57
60
|
- per-test results
|
|
@@ -97,6 +100,8 @@ Reports include:
|
|
|
97
100
|
- OpenAPI **JSON** works by default.
|
|
98
101
|
- OpenAPI **YAML** requires `PyYAML` installed.
|
|
99
102
|
- Base URL is read from `spec.servers[0].url`.
|
|
103
|
+
- When using OpenAPI input, the swarm will also *optionally* validate response status codes against `operation.responses`.
|
|
104
|
+
- If `jsonschema` is installed (via `ai-testing-swarm[openapi]`) and the response is JSON, response bodies are validated against the OpenAPI `application/json` schema.
|
|
100
105
|
- Override with `AI_SWARM_OPENAPI_BASE_URL` if your spec doesn’t include servers.
|
|
101
106
|
|
|
102
107
|
---
|
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "ai-testing-swarm"
|
|
7
|
-
version = "0.1.
|
|
7
|
+
version = "0.1.14"
|
|
8
8
|
description = "AI-powered testing swarm"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
requires-python = ">=3.9"
|
|
@@ -13,6 +13,11 @@ authors = [
|
|
|
13
13
|
]
|
|
14
14
|
license = { text = "MIT" }
|
|
15
15
|
|
|
16
|
+
[project.optional-dependencies]
|
|
17
|
+
openapi = [
|
|
18
|
+
"jsonschema>=4.0",
|
|
19
|
+
]
|
|
20
|
+
|
|
16
21
|
[project.scripts]
|
|
17
22
|
ai-test = "ai_testing_swarm.cli:main"
|
|
18
23
|
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.1.14"
|
{ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/agents/execution_agent.py
RENAMED
|
@@ -137,6 +137,27 @@ class ExecutionAgent:
|
|
|
137
137
|
except Exception:
|
|
138
138
|
body_snippet = resp.text
|
|
139
139
|
|
|
140
|
+
openapi_validation = []
|
|
141
|
+
try:
|
|
142
|
+
ctx = request.get("_openapi") or {}
|
|
143
|
+
if isinstance(ctx, dict) and ctx.get("spec") and ctx.get("path") and ctx.get("method"):
|
|
144
|
+
# If method was mutated to something else, we can't reliably map to an OpenAPI op.
|
|
145
|
+
if str(ctx.get("method")).upper() == str(method).upper():
|
|
146
|
+
from ai_testing_swarm.core.openapi_validator import validate_openapi_response
|
|
147
|
+
|
|
148
|
+
issues = validate_openapi_response(
|
|
149
|
+
spec=ctx["spec"],
|
|
150
|
+
path=ctx["path"],
|
|
151
|
+
method=ctx["method"],
|
|
152
|
+
status_code=resp.status_code,
|
|
153
|
+
response_headers=dict(resp.headers),
|
|
154
|
+
response_json=body_snippet if isinstance(body_snippet, (dict, list)) else None,
|
|
155
|
+
)
|
|
156
|
+
openapi_validation = [i.__dict__ for i in issues]
|
|
157
|
+
except Exception:
|
|
158
|
+
# Best-effort only; never fail execution on validator issues.
|
|
159
|
+
openapi_validation = []
|
|
160
|
+
|
|
140
161
|
return {
|
|
141
162
|
"name": test["name"],
|
|
142
163
|
"mutation": mutation,
|
|
@@ -151,7 +172,9 @@ class ExecutionAgent:
|
|
|
151
172
|
"status_code": resp.status_code,
|
|
152
173
|
"elapsed_ms": elapsed_ms,
|
|
153
174
|
"attempt": attempt,
|
|
175
|
+
"headers": dict(resp.headers),
|
|
154
176
|
"body_snippet": body_snippet,
|
|
177
|
+
"openapi_validation": openapi_validation,
|
|
155
178
|
},
|
|
156
179
|
}
|
|
157
180
|
|
|
@@ -120,7 +120,7 @@ class LLMReasoningAgent:
|
|
|
120
120
|
if mutation:
|
|
121
121
|
strategy = mutation.get("strategy")
|
|
122
122
|
|
|
123
|
-
#
|
|
123
|
+
# Header tampering / content negotiation tests
|
|
124
124
|
if status_code == 406 and strategy == "headers":
|
|
125
125
|
return {
|
|
126
126
|
"type": "content_negotiation",
|
|
@@ -128,6 +128,14 @@ class LLMReasoningAgent:
|
|
|
128
128
|
"explanation": "406 Not Acceptable after Accept/header mutation (expected)"
|
|
129
129
|
}
|
|
130
130
|
|
|
131
|
+
# Header tampering accepted (2xx) can be risky depending on the mutation
|
|
132
|
+
if 200 <= status_code < 300 and strategy == "headers":
|
|
133
|
+
return {
|
|
134
|
+
"type": "headers_accepted",
|
|
135
|
+
"confidence": 0.8,
|
|
136
|
+
"explanation": "Header mutation still returned 2xx (review if expected)"
|
|
137
|
+
}
|
|
138
|
+
|
|
131
139
|
# Wrong-method negative test accepted => risk
|
|
132
140
|
if 200 <= status_code < 300 and strategy == "method_misuse":
|
|
133
141
|
return {
|
|
@@ -143,6 +151,14 @@ class LLMReasoningAgent:
|
|
|
143
151
|
"explanation": "400 response after required parameter removal"
|
|
144
152
|
}
|
|
145
153
|
|
|
154
|
+
# Missing param accepted (2xx) is often weak validation (risky)
|
|
155
|
+
if 200 <= status_code < 300 and strategy == "missing_param":
|
|
156
|
+
return {
|
|
157
|
+
"type": "missing_param_accepted",
|
|
158
|
+
"confidence": 1.0,
|
|
159
|
+
"explanation": "Parameter removal still returned 2xx (potential missing validation)"
|
|
160
|
+
}
|
|
161
|
+
|
|
146
162
|
if status_code == 400 and strategy == "null_param":
|
|
147
163
|
return {
|
|
148
164
|
"type": "missing_param",
|
|
@@ -150,6 +166,13 @@ class LLMReasoningAgent:
|
|
|
150
166
|
"explanation": "400 response after nullifying parameter"
|
|
151
167
|
}
|
|
152
168
|
|
|
169
|
+
if 200 <= status_code < 300 and strategy == "null_param":
|
|
170
|
+
return {
|
|
171
|
+
"type": "null_param_accepted",
|
|
172
|
+
"confidence": 1.0,
|
|
173
|
+
"explanation": "Nullified parameter still returned 2xx (potential weak validation)"
|
|
174
|
+
}
|
|
175
|
+
|
|
153
176
|
if status_code == 400 and strategy == "invalid_param":
|
|
154
177
|
return {
|
|
155
178
|
"type": "invalid_param",
|
|
@@ -157,6 +180,13 @@ class LLMReasoningAgent:
|
|
|
157
180
|
"explanation": "400 response after invalid parameter mutation"
|
|
158
181
|
}
|
|
159
182
|
|
|
183
|
+
if 200 <= status_code < 300 and strategy == "invalid_param":
|
|
184
|
+
return {
|
|
185
|
+
"type": "invalid_param_accepted",
|
|
186
|
+
"confidence": 1.0,
|
|
187
|
+
"explanation": "Invalid parameter still returned 2xx (potential weak validation)"
|
|
188
|
+
}
|
|
189
|
+
|
|
160
190
|
# ✅ FIX: security payload blocked → SAFE
|
|
161
191
|
if status_code >= 400 and status_code < 500 and strategy == "security":
|
|
162
192
|
return {
|
|
@@ -96,6 +96,11 @@ class ReleaseGateAgent:
|
|
|
96
96
|
# ⚠️ Ambiguous behavior (release with caution)
|
|
97
97
|
RISKY_FAILURES = {
|
|
98
98
|
"unknown",
|
|
99
|
+
"missing_param_accepted",
|
|
100
|
+
"null_param_accepted",
|
|
101
|
+
"invalid_param_accepted",
|
|
102
|
+
"headers_accepted",
|
|
103
|
+
"method_risk",
|
|
99
104
|
}
|
|
100
105
|
|
|
101
106
|
# ✅ EXPECTED & HEALTHY system behavior
|
|
@@ -31,7 +31,7 @@ def normalize_request(payload: dict) -> dict:
|
|
|
31
31
|
from ai_testing_swarm.core.openapi_loader import load_openapi, build_request_from_openapi
|
|
32
32
|
|
|
33
33
|
spec = load_openapi(payload["openapi"])
|
|
34
|
-
|
|
34
|
+
req = build_request_from_openapi(
|
|
35
35
|
spec,
|
|
36
36
|
path=payload["path"],
|
|
37
37
|
method=payload["method"],
|
|
@@ -40,6 +40,14 @@ def normalize_request(payload: dict) -> dict:
|
|
|
40
40
|
query_params=payload.get("query_params") or {},
|
|
41
41
|
body=payload.get("body"),
|
|
42
42
|
)
|
|
43
|
+
# Attach OpenAPI context for optional response validation/reporting.
|
|
44
|
+
req["_openapi"] = {
|
|
45
|
+
"source": payload["openapi"],
|
|
46
|
+
"path": payload["path"],
|
|
47
|
+
"method": str(payload["method"]).upper(),
|
|
48
|
+
"spec": spec,
|
|
49
|
+
}
|
|
50
|
+
return req
|
|
43
51
|
|
|
44
52
|
# Case 2: already normalized
|
|
45
53
|
required_keys = {"method", "url"}
|
|
@@ -92,6 +100,13 @@ def main():
|
|
|
92
100
|
help="Safety: allow only public test hosts (httpbin/postman-echo/reqres) for this run",
|
|
93
101
|
)
|
|
94
102
|
|
|
103
|
+
parser.add_argument(
|
|
104
|
+
"--report-format",
|
|
105
|
+
default="json",
|
|
106
|
+
choices=["json", "md", "html"],
|
|
107
|
+
help="Report format to write (default: json)",
|
|
108
|
+
)
|
|
109
|
+
|
|
95
110
|
args = parser.parse_args()
|
|
96
111
|
|
|
97
112
|
# ------------------------------------------------------------
|
|
@@ -112,7 +127,7 @@ def main():
|
|
|
112
127
|
import os
|
|
113
128
|
os.environ["AI_SWARM_PUBLIC_ONLY"] = "1"
|
|
114
129
|
|
|
115
|
-
decision, results = SwarmOrchestrator().run(request)
|
|
130
|
+
decision, results = SwarmOrchestrator().run(request, report_format=args.report_format)
|
|
116
131
|
|
|
117
132
|
# ------------------------------------------------------------
|
|
118
133
|
# Console output
|
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
@dataclass
|
|
7
|
+
class OpenAPIValidationIssue:
|
|
8
|
+
type: str
|
|
9
|
+
message: str
|
|
10
|
+
details: dict | None = None
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def _matches_status_key(status_code: int, key: str) -> bool:
|
|
14
|
+
"""OpenAPI response keys can be explicit ("200"), wildcard ("2XX"), or "default"."""
|
|
15
|
+
key = str(key).strip().upper()
|
|
16
|
+
if key == "DEFAULT":
|
|
17
|
+
return True
|
|
18
|
+
if len(key) == 3 and key.endswith("XX") and key[0].isdigit():
|
|
19
|
+
return int(key[0]) == int(status_code / 100)
|
|
20
|
+
return key == str(status_code)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _get_operation(spec: dict, *, path: str, method: str) -> dict | None:
|
|
24
|
+
paths = spec.get("paths") or {}
|
|
25
|
+
op = (paths.get(path) or {}).get(str(method).lower())
|
|
26
|
+
if isinstance(op, dict):
|
|
27
|
+
return op
|
|
28
|
+
return None
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def validate_openapi_response(
|
|
32
|
+
*,
|
|
33
|
+
spec: dict,
|
|
34
|
+
path: str,
|
|
35
|
+
method: str,
|
|
36
|
+
status_code: int | None,
|
|
37
|
+
response_headers: dict | None = None,
|
|
38
|
+
response_json=None,
|
|
39
|
+
) -> list[OpenAPIValidationIssue]:
|
|
40
|
+
"""Validate a response against an OpenAPI operation.
|
|
41
|
+
|
|
42
|
+
- Status code must be declared in operation.responses (supports 2XX and default)
|
|
43
|
+
- If response appears to be JSON and jsonschema is installed, validate body
|
|
44
|
+
|
|
45
|
+
Returns a list of issues (empty => OK or validation skipped).
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
issues: list[OpenAPIValidationIssue] = []
|
|
49
|
+
|
|
50
|
+
op = _get_operation(spec, path=path, method=method)
|
|
51
|
+
if not op:
|
|
52
|
+
return [
|
|
53
|
+
OpenAPIValidationIssue(
|
|
54
|
+
type="openapi_operation_missing",
|
|
55
|
+
message=f"Operation not found in spec: {str(method).upper()} {path}",
|
|
56
|
+
)
|
|
57
|
+
]
|
|
58
|
+
|
|
59
|
+
responses = op.get("responses") or {}
|
|
60
|
+
if status_code is None:
|
|
61
|
+
# Network errors etc. Not an OpenAPI mismatch.
|
|
62
|
+
return issues
|
|
63
|
+
|
|
64
|
+
# --------------------
|
|
65
|
+
# Status validation
|
|
66
|
+
# --------------------
|
|
67
|
+
declared_keys = [str(k) for k in responses.keys()]
|
|
68
|
+
if declared_keys:
|
|
69
|
+
if not any(_matches_status_key(int(status_code), k) for k in declared_keys):
|
|
70
|
+
issues.append(
|
|
71
|
+
OpenAPIValidationIssue(
|
|
72
|
+
type="openapi_status",
|
|
73
|
+
message=f"Status {status_code} not declared in OpenAPI responses: {declared_keys}",
|
|
74
|
+
details={"status_code": status_code, "declared": declared_keys},
|
|
75
|
+
)
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
# --------------------
|
|
79
|
+
# JSON schema validation (optional)
|
|
80
|
+
# --------------------
|
|
81
|
+
ct = ""
|
|
82
|
+
if response_headers and isinstance(response_headers, dict):
|
|
83
|
+
for k, v in response_headers.items():
|
|
84
|
+
if str(k).lower() == "content-type" and v:
|
|
85
|
+
ct = str(v).lower()
|
|
86
|
+
break
|
|
87
|
+
|
|
88
|
+
is_json = isinstance(response_json, (dict, list))
|
|
89
|
+
if not is_json:
|
|
90
|
+
# if body wasn't parsed as JSON, skip schema validation
|
|
91
|
+
return issues
|
|
92
|
+
|
|
93
|
+
if ct and "json" not in ct:
|
|
94
|
+
# Respect explicit non-JSON content-type
|
|
95
|
+
return issues
|
|
96
|
+
|
|
97
|
+
# Find best matching response schema: exact > 2XX > default
|
|
98
|
+
chosen_resp: dict | None = None
|
|
99
|
+
for k in (str(status_code), f"{int(status_code/100)}XX", "default"):
|
|
100
|
+
if k in responses:
|
|
101
|
+
chosen_resp = responses.get(k)
|
|
102
|
+
break
|
|
103
|
+
|
|
104
|
+
if not isinstance(chosen_resp, dict):
|
|
105
|
+
return issues
|
|
106
|
+
|
|
107
|
+
content = chosen_resp.get("content") or {}
|
|
108
|
+
schema = None
|
|
109
|
+
if isinstance(content, dict):
|
|
110
|
+
app_json = content.get("application/json") or content.get("application/*+json")
|
|
111
|
+
if isinstance(app_json, dict):
|
|
112
|
+
schema = app_json.get("schema")
|
|
113
|
+
|
|
114
|
+
if not schema:
|
|
115
|
+
return issues
|
|
116
|
+
|
|
117
|
+
try:
|
|
118
|
+
import jsonschema # type: ignore
|
|
119
|
+
|
|
120
|
+
# Resolve in-document refs like #/components/schemas/X
|
|
121
|
+
resolver = jsonschema.RefResolver.from_schema(spec) # type: ignore[attr-defined]
|
|
122
|
+
validator_cls = jsonschema.validators.validator_for(schema) # type: ignore[attr-defined]
|
|
123
|
+
validator_cls.check_schema(schema)
|
|
124
|
+
validator = validator_cls(schema, resolver=resolver)
|
|
125
|
+
errors = sorted(validator.iter_errors(response_json), key=lambda e: list(e.path))
|
|
126
|
+
if errors:
|
|
127
|
+
# Keep just the first few errors for signal.
|
|
128
|
+
details = {
|
|
129
|
+
"error_count": len(errors),
|
|
130
|
+
"errors": [
|
|
131
|
+
{
|
|
132
|
+
"message": e.message,
|
|
133
|
+
"path": list(e.path),
|
|
134
|
+
"schema_path": list(e.schema_path),
|
|
135
|
+
}
|
|
136
|
+
for e in errors[:5]
|
|
137
|
+
],
|
|
138
|
+
}
|
|
139
|
+
issues.append(
|
|
140
|
+
OpenAPIValidationIssue(
|
|
141
|
+
type="openapi_schema",
|
|
142
|
+
message="Response JSON does not match OpenAPI schema",
|
|
143
|
+
details=details,
|
|
144
|
+
)
|
|
145
|
+
)
|
|
146
|
+
except ImportError:
|
|
147
|
+
# Optional dependency not installed: status validation still works.
|
|
148
|
+
return issues
|
|
149
|
+
except Exception as e:
|
|
150
|
+
issues.append(
|
|
151
|
+
OpenAPIValidationIssue(
|
|
152
|
+
type="openapi_schema_error",
|
|
153
|
+
message=f"OpenAPI schema validation failed: {e}",
|
|
154
|
+
)
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
return issues
|
|
@@ -19,6 +19,15 @@ EXPECTED_FAILURES = {
|
|
|
19
19
|
"content_negotiation",
|
|
20
20
|
}
|
|
21
21
|
|
|
22
|
+
RISKY_FAILURES = {
|
|
23
|
+
"unknown",
|
|
24
|
+
"missing_param_accepted",
|
|
25
|
+
"null_param_accepted",
|
|
26
|
+
"invalid_param_accepted",
|
|
27
|
+
"headers_accepted",
|
|
28
|
+
"method_risk",
|
|
29
|
+
}
|
|
30
|
+
|
|
22
31
|
class SwarmOrchestrator:
|
|
23
32
|
"""
|
|
24
33
|
Central brain of the AI Testing Swarm.
|
|
@@ -31,7 +40,7 @@ class SwarmOrchestrator:
|
|
|
31
40
|
self.learner = LearningAgent()
|
|
32
41
|
self.release_gate = ReleaseGateAgent()
|
|
33
42
|
|
|
34
|
-
def run(self, request: dict):
|
|
43
|
+
def run(self, request: dict, *, report_format: str = "json"):
|
|
35
44
|
"""Runs the full AI testing swarm and returns (decision, results)."""
|
|
36
45
|
|
|
37
46
|
# Safety hook (currently no-op; kept for backward compatibility)
|
|
@@ -64,7 +73,9 @@ class SwarmOrchestrator:
|
|
|
64
73
|
"confidence": classification.get("confidence", 1.0),
|
|
65
74
|
"failure_type": classification.get("type"),
|
|
66
75
|
"status": (
|
|
67
|
-
"PASSED" if classification.get("type") in EXPECTED_FAILURES
|
|
76
|
+
"PASSED" if classification.get("type") in EXPECTED_FAILURES
|
|
77
|
+
else "RISK" if classification.get("type") in RISKY_FAILURES
|
|
78
|
+
else "FAILED"
|
|
68
79
|
),
|
|
69
80
|
})
|
|
70
81
|
# Optional learning step
|
|
@@ -111,8 +122,8 @@ class SwarmOrchestrator:
|
|
|
111
122
|
except Exception as e:
|
|
112
123
|
meta["ai_summary_error"] = str(e)
|
|
113
124
|
|
|
114
|
-
report_path = write_report(request, results, meta=meta)
|
|
115
|
-
logger.info("📄 Swarm
|
|
116
|
-
print(f"📄 Swarm
|
|
125
|
+
report_path = write_report(request, results, meta=meta, report_format=report_format)
|
|
126
|
+
logger.info("📄 Swarm report written to: %s", report_path)
|
|
127
|
+
print(f"📄 Swarm report written to: {report_path}")
|
|
117
128
|
|
|
118
129
|
return decision, results
|
|
@@ -0,0 +1,327 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import os
|
|
3
|
+
import re
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from urllib.parse import urlparse
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
# ============================================================
|
|
10
|
+
# 🔍 FIND CALLER PROJECT ROOT (NOT PACKAGE ROOT)
|
|
11
|
+
# ============================================================
|
|
12
|
+
def find_execution_root() -> Path:
|
|
13
|
+
"""
|
|
14
|
+
Resolve project root based on WHERE tests are executed from,
|
|
15
|
+
not where this package lives.
|
|
16
|
+
"""
|
|
17
|
+
current = Path.cwd().resolve()
|
|
18
|
+
|
|
19
|
+
while current != current.parent:
|
|
20
|
+
if any(
|
|
21
|
+
(current / marker).exists()
|
|
22
|
+
for marker in (
|
|
23
|
+
"pyproject.toml",
|
|
24
|
+
"setup.py",
|
|
25
|
+
"requirements.txt",
|
|
26
|
+
".git",
|
|
27
|
+
)
|
|
28
|
+
):
|
|
29
|
+
return current
|
|
30
|
+
current = current.parent
|
|
31
|
+
|
|
32
|
+
# Fallback: use cwd directly
|
|
33
|
+
return Path.cwd().resolve()
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
PROJECT_ROOT = find_execution_root()
|
|
37
|
+
REPORTS_DIR = PROJECT_ROOT / "ai_swarm_reports"
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
# ============================================================
|
|
41
|
+
# 🧹 UTILS
|
|
42
|
+
# ============================================================
|
|
43
|
+
def extract_endpoint_name(method: str, url: str) -> str:
|
|
44
|
+
"""
|
|
45
|
+
POST https://preprod-api.getepichome.in/api/validate-gst/
|
|
46
|
+
-> POST_validate-gst
|
|
47
|
+
"""
|
|
48
|
+
parsed = urlparse(url)
|
|
49
|
+
parts = [p for p in parsed.path.split("/") if p]
|
|
50
|
+
|
|
51
|
+
endpoint = parts[-1] if parts else "root"
|
|
52
|
+
endpoint = re.sub(r"[^a-zA-Z0-9_-]", "-", endpoint)
|
|
53
|
+
|
|
54
|
+
return f"{method}_{endpoint}"
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
# ============================================================
|
|
58
|
+
# 📝 REPORT WRITER
|
|
59
|
+
# ============================================================
|
|
60
|
+
from ai_testing_swarm.core.config import AI_SWARM_SLA_MS
|
|
61
|
+
|
|
62
|
+
SENSITIVE_HEADER_KEYS = {
|
|
63
|
+
"authorization",
|
|
64
|
+
"cookie",
|
|
65
|
+
"set-cookie",
|
|
66
|
+
"api-token",
|
|
67
|
+
"api-secret-key",
|
|
68
|
+
"x-api-key",
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def _redact_headers(headers: dict) -> dict:
|
|
73
|
+
if not isinstance(headers, dict):
|
|
74
|
+
return headers
|
|
75
|
+
out = {}
|
|
76
|
+
for k, v in headers.items():
|
|
77
|
+
if str(k).lower() in SENSITIVE_HEADER_KEYS and v is not None:
|
|
78
|
+
out[k] = "***REDACTED***"
|
|
79
|
+
else:
|
|
80
|
+
out[k] = v
|
|
81
|
+
return out
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def _redact_results(results: list) -> list:
|
|
85
|
+
redacted = []
|
|
86
|
+
for r in results:
|
|
87
|
+
r = dict(r)
|
|
88
|
+
req = dict(r.get("request") or {})
|
|
89
|
+
req["headers"] = _redact_headers(req.get("headers") or {})
|
|
90
|
+
r["request"] = req
|
|
91
|
+
redacted.append(r)
|
|
92
|
+
return redacted
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def _markdown_escape(s: str) -> str:
|
|
96
|
+
return str(s).replace("`", "\\`")
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def _render_markdown(report: dict) -> str:
|
|
100
|
+
lines: list[str] = []
|
|
101
|
+
lines.append(f"# AI Testing Swarm Report")
|
|
102
|
+
lines.append("")
|
|
103
|
+
lines.append(f"**Endpoint:** `{_markdown_escape(report.get('endpoint'))}` ")
|
|
104
|
+
lines.append(f"**Run time:** `{_markdown_escape(report.get('run_time'))}` ")
|
|
105
|
+
lines.append(f"**Total tests:** `{report.get('total_tests')}`")
|
|
106
|
+
lines.append("")
|
|
107
|
+
|
|
108
|
+
summary = report.get("summary") or {}
|
|
109
|
+
counts_ft = summary.get("counts_by_failure_type") or {}
|
|
110
|
+
counts_sc = summary.get("counts_by_status_code") or {}
|
|
111
|
+
|
|
112
|
+
lines.append("## Summary")
|
|
113
|
+
lines.append("")
|
|
114
|
+
lines.append("### Counts by failure type")
|
|
115
|
+
for k, v in sorted(counts_ft.items(), key=lambda kv: (-kv[1], kv[0])):
|
|
116
|
+
lines.append(f"- **{_markdown_escape(k)}**: {v}")
|
|
117
|
+
|
|
118
|
+
lines.append("")
|
|
119
|
+
lines.append("### Counts by status code")
|
|
120
|
+
for k, v in sorted(counts_sc.items(), key=lambda kv: (-kv[1], kv[0])):
|
|
121
|
+
lines.append(f"- **{_markdown_escape(k)}**: {v}")
|
|
122
|
+
|
|
123
|
+
# Risky findings
|
|
124
|
+
results = report.get("results") or []
|
|
125
|
+
risky = [r for r in results if str(r.get("status")) == "RISK"]
|
|
126
|
+
failed = [r for r in results if str(r.get("status")) == "FAILED"]
|
|
127
|
+
|
|
128
|
+
lines.append("")
|
|
129
|
+
lines.append("## Top risky findings")
|
|
130
|
+
if not risky and not failed:
|
|
131
|
+
lines.append("- (none)")
|
|
132
|
+
else:
|
|
133
|
+
top = (risky + failed)[:10]
|
|
134
|
+
for r in top:
|
|
135
|
+
resp = r.get("response") or {}
|
|
136
|
+
sc = resp.get("status_code")
|
|
137
|
+
lines.append(
|
|
138
|
+
f"- **{_markdown_escape(r.get('status'))}** `{_markdown_escape(r.get('name'))}` "
|
|
139
|
+
f"(status={sc}, failure_type={_markdown_escape(r.get('failure_type'))})"
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
return "\n".join(lines) + "\n"
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def _html_escape(s: str) -> str:
|
|
146
|
+
return (
|
|
147
|
+
str(s)
|
|
148
|
+
.replace("&", "&")
|
|
149
|
+
.replace("<", "<")
|
|
150
|
+
.replace(">", ">")
|
|
151
|
+
.replace('"', """)
|
|
152
|
+
.replace("'", "'")
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def _render_html(report: dict) -> str:
|
|
157
|
+
endpoint = _html_escape(report.get("endpoint"))
|
|
158
|
+
run_time = _html_escape(report.get("run_time"))
|
|
159
|
+
total_tests = report.get("total_tests")
|
|
160
|
+
summary = report.get("summary") or {}
|
|
161
|
+
|
|
162
|
+
results = report.get("results") or []
|
|
163
|
+
risky = [r for r in results if str(r.get("status")) == "RISK"]
|
|
164
|
+
failed = [r for r in results if str(r.get("status")) == "FAILED"]
|
|
165
|
+
top_risky = (risky + failed)[:10]
|
|
166
|
+
|
|
167
|
+
def _kv_list(d: dict) -> str:
|
|
168
|
+
items = sorted((d or {}).items(), key=lambda kv: (-kv[1], kv[0]))
|
|
169
|
+
return "".join(f"<li><b>{_html_escape(k)}</b>: {v}</li>" for k, v in items) or "<li>(none)</li>"
|
|
170
|
+
|
|
171
|
+
def _pre(obj) -> str:
|
|
172
|
+
try:
|
|
173
|
+
txt = json.dumps(obj, indent=2, ensure_ascii=False)
|
|
174
|
+
except Exception:
|
|
175
|
+
txt = str(obj)
|
|
176
|
+
return f"<pre>{_html_escape(txt)}</pre>"
|
|
177
|
+
|
|
178
|
+
rows = []
|
|
179
|
+
for r in results:
|
|
180
|
+
resp = r.get("response") or {}
|
|
181
|
+
issues = resp.get("openapi_validation") or []
|
|
182
|
+
status = _html_escape(r.get("status"))
|
|
183
|
+
name = _html_escape(r.get("name"))
|
|
184
|
+
failure_type = _html_escape(r.get("failure_type"))
|
|
185
|
+
sc = _html_escape(resp.get("status_code"))
|
|
186
|
+
elapsed = _html_escape(resp.get("elapsed_ms"))
|
|
187
|
+
badge = {
|
|
188
|
+
"PASSED": "badge passed",
|
|
189
|
+
"FAILED": "badge failed",
|
|
190
|
+
"RISK": "badge risk",
|
|
191
|
+
}.get(r.get("status"), "badge")
|
|
192
|
+
|
|
193
|
+
rows.append(
|
|
194
|
+
"<details class='case'>"
|
|
195
|
+
f"<summary><span class='{badge}'>{status}</span> "
|
|
196
|
+
f"<b>{name}</b> — status={sc}, elapsed_ms={elapsed}, failure_type={failure_type}"
|
|
197
|
+
f"{(' — openapi_issues=' + str(len(issues))) if issues else ''}"
|
|
198
|
+
"</summary>"
|
|
199
|
+
"<div class='grid'>"
|
|
200
|
+
f"<div><h4>Request</h4>{_pre(r.get('request'))}</div>"
|
|
201
|
+
f"<div><h4>Response</h4>{_pre(resp)}</div>"
|
|
202
|
+
f"<div><h4>Reasoning</h4>{_pre({k: r.get(k) for k in ('reason','confidence','failure_type','status')})}</div>"
|
|
203
|
+
"</div>"
|
|
204
|
+
"</details>"
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
top_list = "".join(
|
|
208
|
+
f"<li><b>{_html_escape(r.get('status'))}</b> {_html_escape(r.get('name'))}"
|
|
209
|
+
f" (failure_type={_html_escape(r.get('failure_type'))})</li>"
|
|
210
|
+
for r in top_risky
|
|
211
|
+
) or "<li>(none)</li>"
|
|
212
|
+
|
|
213
|
+
css = """
|
|
214
|
+
body{font-family:ui-sans-serif,system-ui,Segoe UI,Roboto,Arial; margin:20px;}
|
|
215
|
+
.meta{color:#444;margin-bottom:16px}
|
|
216
|
+
.grid{display:grid; grid-template-columns: 1fr 1fr 1fr; gap:12px; margin-top:10px}
|
|
217
|
+
pre{background:#0b1020;color:#e6e6e6;padding:10px;border-radius:8px;overflow:auto;max-height:360px}
|
|
218
|
+
details.case{border:1px solid #ddd; border-radius:10px; padding:10px; margin:10px 0}
|
|
219
|
+
summary{cursor:pointer}
|
|
220
|
+
.badge{display:inline-block; padding:2px 8px; border-radius:999px; font-size:12px; margin-right:8px; border:1px solid #aaa}
|
|
221
|
+
.badge.passed{background:#e6ffed;border-color:#36b37e}
|
|
222
|
+
.badge.failed{background:#ffebe6;border-color:#ff5630}
|
|
223
|
+
.badge.risk{background:#fff7e6;border-color:#ffab00}
|
|
224
|
+
"""
|
|
225
|
+
|
|
226
|
+
return f"""<!doctype html>
|
|
227
|
+
<html>
|
|
228
|
+
<head>
|
|
229
|
+
<meta charset='utf-8'/>
|
|
230
|
+
<title>AI Testing Swarm Report</title>
|
|
231
|
+
<style>{css}</style>
|
|
232
|
+
</head>
|
|
233
|
+
<body>
|
|
234
|
+
<h1>AI Testing Swarm Report</h1>
|
|
235
|
+
<div class='meta'>
|
|
236
|
+
<div><b>Endpoint:</b> <code>{endpoint}</code></div>
|
|
237
|
+
<div><b>Run time:</b> <code>{run_time}</code></div>
|
|
238
|
+
<div><b>Total tests:</b> <code>{total_tests}</code></div>
|
|
239
|
+
</div>
|
|
240
|
+
|
|
241
|
+
<h2>Summary</h2>
|
|
242
|
+
<div class='grid'>
|
|
243
|
+
<div><h3>Counts by failure type</h3><ul>{_kv_list(summary.get('counts_by_failure_type') or {})}</ul></div>
|
|
244
|
+
<div><h3>Counts by status code</h3><ul>{_kv_list(summary.get('counts_by_status_code') or {})}</ul></div>
|
|
245
|
+
<div><h3>Top risky findings</h3><ul>{top_list}</ul></div>
|
|
246
|
+
</div>
|
|
247
|
+
|
|
248
|
+
<h2>Results</h2>
|
|
249
|
+
{''.join(rows)}
|
|
250
|
+
|
|
251
|
+
</body>
|
|
252
|
+
</html>"""
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
def write_report(
|
|
256
|
+
request: dict,
|
|
257
|
+
results: list,
|
|
258
|
+
*,
|
|
259
|
+
meta: dict | None = None,
|
|
260
|
+
report_format: str = "json",
|
|
261
|
+
) -> str:
|
|
262
|
+
"""Write a swarm report.
|
|
263
|
+
|
|
264
|
+
report_format:
|
|
265
|
+
- json (default): full machine-readable report
|
|
266
|
+
- md: human-readable markdown summary
|
|
267
|
+
- html: single-file HTML report with collapsible sections
|
|
268
|
+
"""
|
|
269
|
+
|
|
270
|
+
REPORTS_DIR.mkdir(exist_ok=True)
|
|
271
|
+
|
|
272
|
+
method = request.get("method", "UNKNOWN")
|
|
273
|
+
url = request.get("url", "")
|
|
274
|
+
|
|
275
|
+
endpoint_name = extract_endpoint_name(method, url)
|
|
276
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
277
|
+
|
|
278
|
+
endpoint_dir = REPORTS_DIR / endpoint_name
|
|
279
|
+
endpoint_dir.mkdir(parents=True, exist_ok=True)
|
|
280
|
+
|
|
281
|
+
safe_results = _redact_results(results)
|
|
282
|
+
|
|
283
|
+
summary = {
|
|
284
|
+
"counts_by_failure_type": {},
|
|
285
|
+
"counts_by_status_code": {},
|
|
286
|
+
"slow_tests": [],
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
for r in safe_results:
|
|
290
|
+
ft = r.get("failure_type") or "unknown"
|
|
291
|
+
summary["counts_by_failure_type"][ft] = summary["counts_by_failure_type"].get(ft, 0) + 1
|
|
292
|
+
|
|
293
|
+
sc = (r.get("response") or {}).get("status_code")
|
|
294
|
+
sc_key = str(sc)
|
|
295
|
+
summary["counts_by_status_code"][sc_key] = summary["counts_by_status_code"].get(sc_key, 0) + 1
|
|
296
|
+
|
|
297
|
+
elapsed = (r.get("response") or {}).get("elapsed_ms")
|
|
298
|
+
if isinstance(elapsed, int) and elapsed > AI_SWARM_SLA_MS:
|
|
299
|
+
summary["slow_tests"].append({"name": r.get("name"), "elapsed_ms": elapsed})
|
|
300
|
+
|
|
301
|
+
report = {
|
|
302
|
+
"endpoint": f"{method} {url}",
|
|
303
|
+
"run_time": timestamp,
|
|
304
|
+
"total_tests": len(safe_results),
|
|
305
|
+
"summary": summary,
|
|
306
|
+
"meta": meta or {},
|
|
307
|
+
"results": safe_results,
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
report_format = (report_format or "json").lower().strip()
|
|
311
|
+
if report_format not in {"json", "md", "html"}:
|
|
312
|
+
report_format = "json"
|
|
313
|
+
|
|
314
|
+
ext = {"json": "json", "md": "md", "html": "html"}[report_format]
|
|
315
|
+
report_path = endpoint_dir / f"{endpoint_name}_{timestamp}.{ext}"
|
|
316
|
+
|
|
317
|
+
if report_format == "json":
|
|
318
|
+
with open(report_path, "w", encoding="utf-8") as f:
|
|
319
|
+
json.dump(report, f, indent=2)
|
|
320
|
+
elif report_format == "md":
|
|
321
|
+
with open(report_path, "w", encoding="utf-8") as f:
|
|
322
|
+
f.write(_render_markdown(report))
|
|
323
|
+
else:
|
|
324
|
+
with open(report_path, "w", encoding="utf-8") as f:
|
|
325
|
+
f.write(_render_html(report))
|
|
326
|
+
|
|
327
|
+
return str(report_path)
|
ai_testing_swarm-0.1.12/README.md → ai_testing_swarm-0.1.14/src/ai_testing_swarm.egg-info/PKG-INFO
RENAMED
|
@@ -1,8 +1,19 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: ai-testing-swarm
|
|
3
|
+
Version: 0.1.14
|
|
4
|
+
Summary: AI-powered testing swarm
|
|
5
|
+
Author-email: Arif Shah <ashah7775@gmail.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Requires-Python: >=3.9
|
|
8
|
+
Description-Content-Type: text/markdown
|
|
9
|
+
Provides-Extra: openapi
|
|
10
|
+
Requires-Dist: jsonschema>=4.0; extra == "openapi"
|
|
11
|
+
|
|
1
12
|
# AI Testing Swarm
|
|
2
13
|
|
|
3
14
|
AI Testing Swarm is a **super-advanced, mutation-driven API testing framework** (with optional OpenAPI + OpenAI augmentation) built on top of **pytest**.
|
|
4
15
|
|
|
5
|
-
It generates a large set of deterministic negative/edge/security test cases for an API request, executes them (optionally in parallel, with retries/throttling), and produces a JSON
|
|
16
|
+
It generates a large set of deterministic negative/edge/security test cases for an API request, executes them (optionally in parallel, with retries/throttling), and produces a report (JSON/Markdown/HTML) with summaries.
|
|
6
17
|
|
|
7
18
|
> Notes:
|
|
8
19
|
> - UI testing is not the focus of the current releases.
|
|
@@ -16,6 +27,12 @@ It generates a large set of deterministic negative/edge/security test cases for
|
|
|
16
27
|
pip install ai-testing-swarm
|
|
17
28
|
```
|
|
18
29
|
|
|
30
|
+
Optional (OpenAPI JSON schema validation for responses):
|
|
31
|
+
|
|
32
|
+
```bash
|
|
33
|
+
pip install "ai-testing-swarm[openapi]"
|
|
34
|
+
```
|
|
35
|
+
|
|
19
36
|
CLI entrypoint:
|
|
20
37
|
|
|
21
38
|
```bash
|
|
@@ -40,9 +57,15 @@ Run:
|
|
|
40
57
|
ai-test --input request.json
|
|
41
58
|
```
|
|
42
59
|
|
|
43
|
-
|
|
60
|
+
Choose a report format:
|
|
61
|
+
|
|
62
|
+
```bash
|
|
63
|
+
ai-test --input request.json --report-format html
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
A report is written under:
|
|
44
67
|
|
|
45
|
-
- `./ai_swarm_reports/<METHOD>_<endpoint>/<METHOD>_<endpoint>_<timestamp
|
|
68
|
+
- `./ai_swarm_reports/<METHOD>_<endpoint>/<METHOD>_<endpoint>_<timestamp>.<json|md|html>`
|
|
46
69
|
|
|
47
70
|
Reports include:
|
|
48
71
|
- per-test results
|
|
@@ -88,6 +111,8 @@ Reports include:
|
|
|
88
111
|
- OpenAPI **JSON** works by default.
|
|
89
112
|
- OpenAPI **YAML** requires `PyYAML` installed.
|
|
90
113
|
- Base URL is read from `spec.servers[0].url`.
|
|
114
|
+
- When using OpenAPI input, the swarm will also *optionally* validate response status codes against `operation.responses`.
|
|
115
|
+
- If `jsonschema` is installed (via `ai-testing-swarm[openapi]`) and the response is JSON, response bodies are validated against the OpenAPI `application/json` schema.
|
|
91
116
|
- Override with `AI_SWARM_OPENAPI_BASE_URL` if your spec doesn’t include servers.
|
|
92
117
|
|
|
93
118
|
---
|
{ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm.egg-info/SOURCES.txt
RENAMED
|
@@ -7,6 +7,7 @@ src/ai_testing_swarm.egg-info/PKG-INFO
|
|
|
7
7
|
src/ai_testing_swarm.egg-info/SOURCES.txt
|
|
8
8
|
src/ai_testing_swarm.egg-info/dependency_links.txt
|
|
9
9
|
src/ai_testing_swarm.egg-info/entry_points.txt
|
|
10
|
+
src/ai_testing_swarm.egg-info/requires.txt
|
|
10
11
|
src/ai_testing_swarm.egg-info/top_level.txt
|
|
11
12
|
src/ai_testing_swarm/agents/__init__.py
|
|
12
13
|
src/ai_testing_swarm/agents/execution_agent.py
|
|
@@ -22,9 +23,11 @@ src/ai_testing_swarm/core/config.py
|
|
|
22
23
|
src/ai_testing_swarm/core/curl_parser.py
|
|
23
24
|
src/ai_testing_swarm/core/openai_client.py
|
|
24
25
|
src/ai_testing_swarm/core/openapi_loader.py
|
|
26
|
+
src/ai_testing_swarm/core/openapi_validator.py
|
|
25
27
|
src/ai_testing_swarm/core/safety.py
|
|
26
28
|
src/ai_testing_swarm/reporting/__init__.py
|
|
27
29
|
src/ai_testing_swarm/reporting/report_writer.py
|
|
28
30
|
tests/test_openapi_loader.py
|
|
31
|
+
tests/test_openapi_validator.py
|
|
29
32
|
tests/test_policy_expected_negatives.py
|
|
30
33
|
tests/test_swarm_api.py
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
|
|
3
|
+
from ai_testing_swarm.core.openapi_validator import validate_openapi_response
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def _spec_with_schema():
|
|
7
|
+
return {
|
|
8
|
+
"openapi": "3.0.0",
|
|
9
|
+
"info": {"title": "t", "version": "1"},
|
|
10
|
+
"paths": {
|
|
11
|
+
"/pets": {
|
|
12
|
+
"get": {
|
|
13
|
+
"responses": {
|
|
14
|
+
"200": {
|
|
15
|
+
"description": "ok",
|
|
16
|
+
"content": {
|
|
17
|
+
"application/json": {
|
|
18
|
+
"schema": {
|
|
19
|
+
"type": "object",
|
|
20
|
+
"properties": {"id": {"type": "integer"}},
|
|
21
|
+
"required": ["id"],
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
},
|
|
25
|
+
},
|
|
26
|
+
"400": {"description": "bad"},
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
},
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def test_openapi_status_validation_fails_on_undeclared_status():
|
|
35
|
+
spec = _spec_with_schema()
|
|
36
|
+
issues = validate_openapi_response(
|
|
37
|
+
spec=spec,
|
|
38
|
+
path="/pets",
|
|
39
|
+
method="GET",
|
|
40
|
+
status_code=418,
|
|
41
|
+
response_headers={"content-type": "application/json"},
|
|
42
|
+
response_json={"id": 1},
|
|
43
|
+
)
|
|
44
|
+
assert any(i.type == "openapi_status" for i in issues)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def test_openapi_status_validation_supports_wildcard_2xx():
|
|
48
|
+
spec = {
|
|
49
|
+
"openapi": "3.0.0",
|
|
50
|
+
"info": {"title": "t", "version": "1"},
|
|
51
|
+
"paths": {"/x": {"get": {"responses": {"2XX": {"description": "ok"}}}}},
|
|
52
|
+
}
|
|
53
|
+
issues = validate_openapi_response(
|
|
54
|
+
spec=spec,
|
|
55
|
+
path="/x",
|
|
56
|
+
method="GET",
|
|
57
|
+
status_code=201,
|
|
58
|
+
response_headers={"content-type": "application/json"},
|
|
59
|
+
response_json={"ok": True},
|
|
60
|
+
)
|
|
61
|
+
assert not any(i.type == "openapi_status" for i in issues)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def test_openapi_schema_validation_flags_mismatch_when_jsonschema_installed():
|
|
65
|
+
pytest.importorskip("jsonschema")
|
|
66
|
+
spec = _spec_with_schema()
|
|
67
|
+
|
|
68
|
+
issues = validate_openapi_response(
|
|
69
|
+
spec=spec,
|
|
70
|
+
path="/pets",
|
|
71
|
+
method="GET",
|
|
72
|
+
status_code=200,
|
|
73
|
+
response_headers={"content-type": "application/json"},
|
|
74
|
+
response_json={"id": "not-an-int"},
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
assert any(i.type == "openapi_schema" for i in issues)
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
__version__ = "0.1.12"
|
|
@@ -1,147 +0,0 @@
|
|
|
1
|
-
import json
|
|
2
|
-
import os
|
|
3
|
-
import re
|
|
4
|
-
from datetime import datetime
|
|
5
|
-
from pathlib import Path
|
|
6
|
-
from urllib.parse import urlparse
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
# ============================================================
|
|
10
|
-
# 🔍 FIND CALLER PROJECT ROOT (NOT PACKAGE ROOT)
|
|
11
|
-
# ============================================================
|
|
12
|
-
def find_execution_root() -> Path:
|
|
13
|
-
"""
|
|
14
|
-
Resolve project root based on WHERE tests are executed from,
|
|
15
|
-
not where this package lives.
|
|
16
|
-
"""
|
|
17
|
-
current = Path.cwd().resolve()
|
|
18
|
-
|
|
19
|
-
while current != current.parent:
|
|
20
|
-
if any(
|
|
21
|
-
(current / marker).exists()
|
|
22
|
-
for marker in (
|
|
23
|
-
"pyproject.toml",
|
|
24
|
-
"setup.py",
|
|
25
|
-
"requirements.txt",
|
|
26
|
-
".git",
|
|
27
|
-
)
|
|
28
|
-
):
|
|
29
|
-
return current
|
|
30
|
-
current = current.parent
|
|
31
|
-
|
|
32
|
-
# Fallback: use cwd directly
|
|
33
|
-
return Path.cwd().resolve()
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
PROJECT_ROOT = find_execution_root()
|
|
37
|
-
REPORTS_DIR = PROJECT_ROOT / "ai_swarm_reports"
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
# ============================================================
|
|
41
|
-
# 🧹 UTILS
|
|
42
|
-
# ============================================================
|
|
43
|
-
def extract_endpoint_name(method: str, url: str) -> str:
|
|
44
|
-
"""
|
|
45
|
-
POST https://preprod-api.getepichome.in/api/validate-gst/
|
|
46
|
-
-> POST_validate-gst
|
|
47
|
-
"""
|
|
48
|
-
parsed = urlparse(url)
|
|
49
|
-
parts = [p for p in parsed.path.split("/") if p]
|
|
50
|
-
|
|
51
|
-
endpoint = parts[-1] if parts else "root"
|
|
52
|
-
endpoint = re.sub(r"[^a-zA-Z0-9_-]", "-", endpoint)
|
|
53
|
-
|
|
54
|
-
return f"{method}_{endpoint}"
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
# ============================================================
|
|
58
|
-
# 📝 REPORT WRITER
|
|
59
|
-
# ============================================================
|
|
60
|
-
from ai_testing_swarm.core.config import AI_SWARM_SLA_MS
|
|
61
|
-
|
|
62
|
-
SENSITIVE_HEADER_KEYS = {
|
|
63
|
-
"authorization",
|
|
64
|
-
"cookie",
|
|
65
|
-
"set-cookie",
|
|
66
|
-
"api-token",
|
|
67
|
-
"api-secret-key",
|
|
68
|
-
"x-api-key",
|
|
69
|
-
}
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
def _redact_headers(headers: dict) -> dict:
|
|
73
|
-
if not isinstance(headers, dict):
|
|
74
|
-
return headers
|
|
75
|
-
out = {}
|
|
76
|
-
for k, v in headers.items():
|
|
77
|
-
if str(k).lower() in SENSITIVE_HEADER_KEYS and v is not None:
|
|
78
|
-
out[k] = "***REDACTED***"
|
|
79
|
-
else:
|
|
80
|
-
out[k] = v
|
|
81
|
-
return out
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
def _redact_results(results: list) -> list:
|
|
85
|
-
redacted = []
|
|
86
|
-
for r in results:
|
|
87
|
-
r = dict(r)
|
|
88
|
-
req = dict(r.get("request") or {})
|
|
89
|
-
req["headers"] = _redact_headers(req.get("headers") or {})
|
|
90
|
-
r["request"] = req
|
|
91
|
-
redacted.append(r)
|
|
92
|
-
return redacted
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
def write_report(request: dict, results: list, *, meta: dict | None = None) -> str:
|
|
96
|
-
REPORTS_DIR.mkdir(exist_ok=True)
|
|
97
|
-
|
|
98
|
-
method = request.get("method", "UNKNOWN")
|
|
99
|
-
url = request.get("url", "")
|
|
100
|
-
|
|
101
|
-
endpoint_name = extract_endpoint_name(method, url)
|
|
102
|
-
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
103
|
-
|
|
104
|
-
# report_path = REPORTS_DIR / f"{timestamp}_{endpoint_name}.json"
|
|
105
|
-
# 🔥 PER-ENDPOINT FOLDER
|
|
106
|
-
endpoint_dir = REPORTS_DIR / endpoint_name
|
|
107
|
-
endpoint_dir.mkdir(parents=True, exist_ok=True)
|
|
108
|
-
|
|
109
|
-
# 🔥 FILE NAME FORMAT
|
|
110
|
-
report_path = endpoint_dir / f"{endpoint_name}_{timestamp}.json"
|
|
111
|
-
|
|
112
|
-
safe_results = _redact_results(results)
|
|
113
|
-
|
|
114
|
-
# Summary (counts + performance)
|
|
115
|
-
summary = {
|
|
116
|
-
"counts_by_failure_type": {},
|
|
117
|
-
"counts_by_status_code": {},
|
|
118
|
-
"slow_tests": [],
|
|
119
|
-
}
|
|
120
|
-
|
|
121
|
-
for r in safe_results:
|
|
122
|
-
ft = r.get("failure_type") or "unknown"
|
|
123
|
-
summary["counts_by_failure_type"][ft] = summary["counts_by_failure_type"].get(ft, 0) + 1
|
|
124
|
-
|
|
125
|
-
sc = (r.get("response") or {}).get("status_code")
|
|
126
|
-
sc_key = str(sc)
|
|
127
|
-
summary["counts_by_status_code"][sc_key] = summary["counts_by_status_code"].get(sc_key, 0) + 1
|
|
128
|
-
|
|
129
|
-
elapsed = (r.get("response") or {}).get("elapsed_ms")
|
|
130
|
-
if isinstance(elapsed, int) and elapsed > AI_SWARM_SLA_MS:
|
|
131
|
-
summary["slow_tests"].append({"name": r.get("name"), "elapsed_ms": elapsed})
|
|
132
|
-
|
|
133
|
-
report = {
|
|
134
|
-
"endpoint": f"{method} {url}",
|
|
135
|
-
"run_time": timestamp,
|
|
136
|
-
"total_tests": len(safe_results),
|
|
137
|
-
"summary": summary,
|
|
138
|
-
"meta": meta or {},
|
|
139
|
-
"results": safe_results,
|
|
140
|
-
}
|
|
141
|
-
|
|
142
|
-
with open(report_path, "w") as f:
|
|
143
|
-
json.dump(report, f, indent=2)
|
|
144
|
-
|
|
145
|
-
print(f"📄 Swarm JSON report written to: {report_path}")
|
|
146
|
-
|
|
147
|
-
return str(report_path)
|
|
File without changes
|
|
File without changes
|
{ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/agents/learning_agent.py
RENAMED
|
File without changes
|
|
File without changes
|
{ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/agents/test_writer_agent.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/core/curl_parser.py
RENAMED
|
File without changes
|
{ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/core/openai_client.py
RENAMED
|
File without changes
|
{ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/core/openapi_loader.py
RENAMED
|
File without changes
|
|
File without changes
|
{ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm/reporting/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
{ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm.egg-info/entry_points.txt
RENAMED
|
File without changes
|
{ai_testing_swarm-0.1.12 → ai_testing_swarm-0.1.14}/src/ai_testing_swarm.egg-info/top_level.txt
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|