@kirrosh/zond 0.16.0 → 0.17.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +132 -112
- package/README.md +3 -10
- package/package.json +2 -3
- package/src/cli/commands/export.ts +144 -0
- package/src/cli/commands/generate.ts +31 -0
- package/src/cli/commands/run.ts +22 -5
- package/src/cli/commands/sync.ts +240 -0
- package/src/cli/index.ts +54 -10
- package/src/core/diagnostics/db-analysis.ts +79 -7
- package/src/core/diagnostics/failure-hints.ts +39 -0
- package/src/core/exporter/postman.ts +963 -0
- package/src/core/generator/data-factory.ts +38 -3
- package/src/core/generator/index.ts +1 -1
- package/src/core/generator/openapi-reader.ts +6 -0
- package/src/core/generator/serializer.ts +17 -2
- package/src/core/generator/suite-generator.ts +163 -14
- package/src/core/generator/types.ts +1 -0
- package/src/core/meta/meta-store.ts +78 -0
- package/src/core/meta/types.ts +21 -0
- package/src/core/parser/schema.ts +12 -2
- package/src/core/parser/types.ts +12 -1
- package/src/core/parser/variables.ts +3 -0
- package/src/core/parser/yaml-parser.ts +2 -1
- package/src/core/runner/assertions.ts +44 -20
- package/src/core/runner/execute-run.ts +31 -8
- package/src/core/runner/executor.ts +34 -8
- package/src/core/runner/http-client.ts +1 -1
- package/src/core/runner/types.ts +1 -0
- package/src/core/sync/spec-differ.ts +38 -0
- package/src/cli/commands/mcp.ts +0 -16
- package/src/mcp/descriptions.ts +0 -47
- package/src/mcp/server.ts +0 -38
- package/src/mcp/tools/ci-init.ts +0 -54
- package/src/mcp/tools/coverage-analysis.ts +0 -141
- package/src/mcp/tools/describe-endpoint.ts +0 -27
- package/src/mcp/tools/manage-server.ts +0 -86
- package/src/mcp/tools/query-db.ts +0 -84
- package/src/mcp/tools/run-tests.ts +0 -116
- package/src/mcp/tools/send-request.ts +0 -51
- package/src/mcp/tools/setup-api.ts +0 -88
|
@@ -0,0 +1,240 @@
|
|
|
1
|
+
import { join } from "path";
|
|
2
|
+
import { mkdir } from "fs/promises";
|
|
3
|
+
import {
|
|
4
|
+
readOpenApiSpec,
|
|
5
|
+
extractEndpoints,
|
|
6
|
+
extractSecuritySchemes,
|
|
7
|
+
serializeSuite,
|
|
8
|
+
} from "../../core/generator/index.ts";
|
|
9
|
+
import { generateSuites } from "../../core/generator/suite-generator.ts";
|
|
10
|
+
import { filterByTag } from "../../core/generator/chunker.ts";
|
|
11
|
+
import { readMeta, writeMeta, hashSpec, buildFileMeta } from "../../core/meta/meta-store.ts";
|
|
12
|
+
import { diffEndpoints } from "../../core/sync/spec-differ.ts";
|
|
13
|
+
import { printError, printSuccess, printWarning } from "../output.ts";
|
|
14
|
+
import { jsonOk, jsonError, printJson } from "../json-envelope.ts";
|
|
15
|
+
import { version as ZOND_VERSION } from "../../../package.json";
|
|
16
|
+
import { getDb } from "../../db/schema.ts";
|
|
17
|
+
import { findCollectionByTestPath, updateCollection } from "../../db/queries.ts";
|
|
18
|
+
|
|
19
|
+
export interface SyncOptions {
|
|
20
|
+
specPath: string;
|
|
21
|
+
testsDir: string;
|
|
22
|
+
dryRun?: boolean;
|
|
23
|
+
tag?: string;
|
|
24
|
+
json?: boolean;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
export async function syncCommand(options: SyncOptions): Promise<number> {
|
|
28
|
+
try {
|
|
29
|
+
// Load existing metadata
|
|
30
|
+
const meta = await readMeta(options.testsDir);
|
|
31
|
+
if (!meta) {
|
|
32
|
+
const msg =
|
|
33
|
+
"No .zond-meta.json found. Run `zond generate <spec> --output <dir>` first to initialize metadata.";
|
|
34
|
+
if (options.json) {
|
|
35
|
+
printJson(jsonError("sync", [msg]));
|
|
36
|
+
} else {
|
|
37
|
+
printError(msg);
|
|
38
|
+
}
|
|
39
|
+
return 2;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
// Load current spec
|
|
43
|
+
const doc = await readOpenApiSpec(options.specPath);
|
|
44
|
+
const specContent = JSON.stringify(doc);
|
|
45
|
+
const currentHash = hashSpec(specContent);
|
|
46
|
+
|
|
47
|
+
if (currentHash === meta.specHash) {
|
|
48
|
+
const msg = "Spec unchanged — nothing to sync.";
|
|
49
|
+
if (options.json) {
|
|
50
|
+
printJson(jsonOk("sync", { newEndpoints: [], generatedFiles: [], removedKeys: [], specChanged: false }, [msg]));
|
|
51
|
+
} else {
|
|
52
|
+
console.log(msg);
|
|
53
|
+
}
|
|
54
|
+
return 0;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
// Extract current endpoints
|
|
58
|
+
let currentEndpoints = extractEndpoints(doc);
|
|
59
|
+
const securitySchemes = extractSecuritySchemes(doc);
|
|
60
|
+
|
|
61
|
+
if (options.tag) {
|
|
62
|
+
currentEndpoints = filterByTag(currentEndpoints, options.tag);
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
// Collect all previously known endpoint keys from meta
|
|
66
|
+
const prevKeys = Object.values(meta.files).flatMap((f) => f.endpoints);
|
|
67
|
+
|
|
68
|
+
// Compute diff
|
|
69
|
+
const { newEndpoints, removedKeys } = diffEndpoints(prevKeys, currentEndpoints);
|
|
70
|
+
|
|
71
|
+
const warnings: string[] = [];
|
|
72
|
+
|
|
73
|
+
if (removedKeys.length > 0) {
|
|
74
|
+
for (const key of removedKeys) {
|
|
75
|
+
warnings.push(`Removed endpoint not deleted from tests (review manually): ${key}`);
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
if (newEndpoints.length === 0) {
|
|
80
|
+
const msg = "Spec changed (hash differs) but no new endpoints detected. Existing tests may need manual review.";
|
|
81
|
+
warnings.push(msg);
|
|
82
|
+
if (options.json) {
|
|
83
|
+
printJson(jsonOk("sync", {
|
|
84
|
+
newEndpoints: [],
|
|
85
|
+
removedKeys,
|
|
86
|
+
generatedFiles: [],
|
|
87
|
+
specChanged: true,
|
|
88
|
+
}, warnings));
|
|
89
|
+
} else {
|
|
90
|
+
console.log(msg);
|
|
91
|
+
for (const w of warnings) {
|
|
92
|
+
printWarning(w);
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
return 0;
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
// Generate suites for new endpoints only
|
|
99
|
+
const suites = generateSuites({ endpoints: newEndpoints, securitySchemes });
|
|
100
|
+
|
|
101
|
+
if (options.dryRun) {
|
|
102
|
+
const newEndpointKeys = newEndpoints.map((ep) => `${ep.method.toUpperCase()} ${ep.path}`);
|
|
103
|
+
const plannedFiles = suites.map((s) => ({
|
|
104
|
+
file: `${s.fileStem ?? s.name}.yaml`,
|
|
105
|
+
suite: s.name,
|
|
106
|
+
tests: s.tests.length,
|
|
107
|
+
}));
|
|
108
|
+
|
|
109
|
+
if (options.json) {
|
|
110
|
+
printJson(jsonOk("sync", {
|
|
111
|
+
dryRun: true,
|
|
112
|
+
newEndpoints: newEndpointKeys,
|
|
113
|
+
removedKeys,
|
|
114
|
+
plannedFiles,
|
|
115
|
+
specChanged: true,
|
|
116
|
+
}, warnings));
|
|
117
|
+
} else {
|
|
118
|
+
console.log(`[dry-run] Detected ${newEndpoints.length} new endpoint(s):`);
|
|
119
|
+
for (const ep of newEndpoints) {
|
|
120
|
+
console.log(` + ${ep.method.toUpperCase()} ${ep.path}`);
|
|
121
|
+
}
|
|
122
|
+
console.log(`\nWould generate ${suites.length} new suite file(s):`);
|
|
123
|
+
for (const f of plannedFiles) {
|
|
124
|
+
console.log(` ${f.file} (${f.tests} tests)`);
|
|
125
|
+
}
|
|
126
|
+
if (removedKeys.length > 0) {
|
|
127
|
+
console.log("\nRemoved endpoints (not deleted — review tests):");
|
|
128
|
+
for (const key of removedKeys) {
|
|
129
|
+
console.log(` - ${key}`);
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
console.log("\nNo files written (dry-run).");
|
|
133
|
+
}
|
|
134
|
+
return 0;
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
// Write new files (skip if file already exists)
|
|
138
|
+
await mkdir(options.testsDir, { recursive: true });
|
|
139
|
+
|
|
140
|
+
const generatedFiles: Array<{ file: string; suite: string; tests: number }> = [];
|
|
141
|
+
const skippedFiles: string[] = [];
|
|
142
|
+
const updatedMetaFiles: Record<string, import("../../core/meta/types.ts").FileMeta> = {};
|
|
143
|
+
|
|
144
|
+
for (const suite of suites) {
|
|
145
|
+
const fileName = `${suite.fileStem ?? suite.name}.yaml`;
|
|
146
|
+
const filePath = join(options.testsDir, fileName);
|
|
147
|
+
const existing = Bun.file(filePath);
|
|
148
|
+
|
|
149
|
+
if (await existing.exists()) {
|
|
150
|
+
skippedFiles.push(fileName);
|
|
151
|
+
warnings.push(`Skipped ${fileName} (already exists — add new endpoints manually)`);
|
|
152
|
+
continue;
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
const yaml = serializeSuite(suite);
|
|
156
|
+
await Bun.write(filePath, yaml);
|
|
157
|
+
generatedFiles.push({ file: filePath, suite: suite.name, tests: suite.tests.length });
|
|
158
|
+
updatedMetaFiles[fileName] = buildFileMeta(suite, ZOND_VERSION);
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
// Update metadata: merge new file entries, update hash and timestamp
|
|
162
|
+
await writeMeta(options.testsDir, {
|
|
163
|
+
zondVersion: ZOND_VERSION,
|
|
164
|
+
lastSyncedAt: new Date().toISOString(),
|
|
165
|
+
specUrl: options.specPath,
|
|
166
|
+
specHash: currentHash,
|
|
167
|
+
files: { ...meta.files, ...updatedMetaFiles },
|
|
168
|
+
});
|
|
169
|
+
|
|
170
|
+
// Sync DB collection if one is registered for this tests directory
|
|
171
|
+
try {
|
|
172
|
+
getDb();
|
|
173
|
+
const collection = findCollectionByTestPath(options.testsDir);
|
|
174
|
+
if (collection && collection.openapi_spec !== options.specPath) {
|
|
175
|
+
updateCollection(collection.id, { openapi_spec: options.specPath });
|
|
176
|
+
warnings.push(`Updated collection '${collection.name}' spec reference: ${collection.openapi_spec ?? "(none)"} → ${options.specPath}`);
|
|
177
|
+
}
|
|
178
|
+
} catch {
|
|
179
|
+
// DB unavailable (e.g. no zond.db yet) — not a fatal error for sync
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
const newEndpointKeys = newEndpoints.map((ep) => `${ep.method.toUpperCase()} ${ep.path}`);
|
|
183
|
+
|
|
184
|
+
if (options.json) {
|
|
185
|
+
printJson(jsonOk("sync", {
|
|
186
|
+
newEndpoints: newEndpointKeys,
|
|
187
|
+
removedKeys,
|
|
188
|
+
generatedFiles,
|
|
189
|
+
skippedFiles,
|
|
190
|
+
specChanged: true,
|
|
191
|
+
}, warnings));
|
|
192
|
+
} else {
|
|
193
|
+
console.log(`Spec changed. Detected ${newEndpoints.length} new endpoint(s):`);
|
|
194
|
+
for (const ep of newEndpoints) {
|
|
195
|
+
console.log(` + ${ep.method.toUpperCase()} ${ep.path}`);
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
if (generatedFiles.length > 0) {
|
|
199
|
+
console.log(`\nGenerated ${generatedFiles.length} new suite file(s):`);
|
|
200
|
+
for (const f of generatedFiles) {
|
|
201
|
+
console.log(` ${f.file} (${f.tests} tests)`);
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
if (skippedFiles.length > 0) {
|
|
206
|
+
console.log("\nSkipped (file exists, review manually):");
|
|
207
|
+
for (const f of skippedFiles) {
|
|
208
|
+
console.log(` ${f}`);
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
if (removedKeys.length > 0) {
|
|
213
|
+
console.log("\nRemoved endpoints (not deleted — review tests):");
|
|
214
|
+
for (const key of removedKeys) {
|
|
215
|
+
console.log(` - ${key}`);
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
if (generatedFiles.length > 0) {
|
|
220
|
+
printSuccess(`\nSync complete. ${generatedFiles.length} file(s) written.`);
|
|
221
|
+
} else {
|
|
222
|
+
printWarning("No new files written — all target files already exist.");
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
for (const w of warnings) {
|
|
226
|
+
printWarning(w);
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
return 0;
|
|
231
|
+
} catch (err) {
|
|
232
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
233
|
+
if (options.json) {
|
|
234
|
+
printJson(jsonError("sync", [message]));
|
|
235
|
+
} else {
|
|
236
|
+
printError(message);
|
|
237
|
+
}
|
|
238
|
+
return 2;
|
|
239
|
+
}
|
|
240
|
+
}
|
package/src/cli/index.ts
CHANGED
|
@@ -3,7 +3,6 @@
|
|
|
3
3
|
import { runCommand } from "./commands/run.ts";
|
|
4
4
|
import { validateCommand } from "./commands/validate.ts";
|
|
5
5
|
import { serveCommand } from "./commands/serve.ts";
|
|
6
|
-
import { mcpCommand } from "./commands/mcp.ts";
|
|
7
6
|
import { coverageCommand } from "./commands/coverage.ts";
|
|
8
7
|
import { ciInitCommand } from "./commands/ci-init.ts";
|
|
9
8
|
import { initCommand } from "./commands/init.ts";
|
|
@@ -12,6 +11,8 @@ import { dbCommand } from "./commands/db.ts";
|
|
|
12
11
|
import { requestCommand } from "./commands/request.ts";
|
|
13
12
|
import { guideCommand } from "./commands/guide.ts";
|
|
14
13
|
import { generateCommand } from "./commands/generate.ts";
|
|
14
|
+
import { exportCommand } from "./commands/export.ts";
|
|
15
|
+
import { syncCommand } from "./commands/sync.ts";
|
|
15
16
|
import { printError } from "./output.ts";
|
|
16
17
|
import { getRuntimeInfo } from "./runtime.ts";
|
|
17
18
|
import { getDb } from "../db/schema.ts";
|
|
@@ -103,9 +104,9 @@ Usage:
|
|
|
103
104
|
zond guide <spec> Generate test generation guide from OpenAPI spec
|
|
104
105
|
zond serve Start web dashboard
|
|
105
106
|
zond ui Alias for 'serve --open' (start dashboard & open browser)
|
|
106
|
-
zond mcp Start MCP server (stdio transport for AI agents)
|
|
107
|
-
--dir <path> Set working directory (relative paths resolve here)
|
|
108
107
|
zond ci init Generate CI/CD workflow (GitHub Actions, GitLab CI)
|
|
108
|
+
zond export postman <path> Export YAML tests as Postman Collection v2.1
|
|
109
|
+
zond sync <spec> Detect new/removed endpoints and generate tests for new ones
|
|
109
110
|
|
|
110
111
|
Options for 'run':
|
|
111
112
|
--dry-run Show requests without sending them (exit code always 0)
|
|
@@ -175,6 +176,16 @@ Options for 'ci init':
|
|
|
175
176
|
--dir <path> Project root directory (default: current directory)
|
|
176
177
|
--force Overwrite existing CI config
|
|
177
178
|
|
|
179
|
+
Options for 'export postman':
|
|
180
|
+
--output <file> Output file path (default: collection.postman.json)
|
|
181
|
+
--env <file> Also export .env.yaml as Postman environment
|
|
182
|
+
--collection-name <name> Collection name (default: derived from path)
|
|
183
|
+
|
|
184
|
+
Options for 'sync':
|
|
185
|
+
--tests <dir> Path to test files directory (required)
|
|
186
|
+
--dry-run Show what would be generated without writing files
|
|
187
|
+
--tag <tag> Limit sync to endpoints with this tag
|
|
188
|
+
|
|
178
189
|
General:
|
|
179
190
|
--json Output in JSON envelope format (available for all commands)
|
|
180
191
|
--help, -h Show this help
|
|
@@ -308,13 +319,6 @@ async function main(): Promise<number> {
|
|
|
308
319
|
});
|
|
309
320
|
}
|
|
310
321
|
|
|
311
|
-
case "mcp": {
|
|
312
|
-
return mcpCommand({
|
|
313
|
-
dbPath: typeof flags["db"] === "string" ? flags["db"] : undefined,
|
|
314
|
-
dir: typeof flags["dir"] === "string" ? flags["dir"] : undefined,
|
|
315
|
-
});
|
|
316
|
-
}
|
|
317
|
-
|
|
318
322
|
case "ci": {
|
|
319
323
|
const ciSub = positional[0];
|
|
320
324
|
if (ciSub !== "init") {
|
|
@@ -504,6 +508,46 @@ async function main(): Promise<number> {
|
|
|
504
508
|
});
|
|
505
509
|
}
|
|
506
510
|
|
|
511
|
+
case "export": {
|
|
512
|
+
const subcommand = positional[0];
|
|
513
|
+
if (subcommand !== "postman") {
|
|
514
|
+
printError(`Unknown export subcommand: ${subcommand ?? "(none)"}. Usage: zond export postman <path>`);
|
|
515
|
+
return 2;
|
|
516
|
+
}
|
|
517
|
+
const testsPath = positional[1];
|
|
518
|
+
if (!testsPath) {
|
|
519
|
+
printError("Missing tests path. Usage: zond export postman <path> [--output <file>]");
|
|
520
|
+
return 2;
|
|
521
|
+
}
|
|
522
|
+
return exportCommand({
|
|
523
|
+
testsPath,
|
|
524
|
+
output: typeof flags["output"] === "string" ? flags["output"] : "collection.postman.json",
|
|
525
|
+
env: typeof flags["env"] === "string" ? flags["env"] : undefined,
|
|
526
|
+
collectionName: typeof flags["collection-name"] === "string" ? flags["collection-name"] : undefined,
|
|
527
|
+
json: jsonFlag,
|
|
528
|
+
});
|
|
529
|
+
}
|
|
530
|
+
|
|
531
|
+
case "sync": {
|
|
532
|
+
const specPath = positional[0];
|
|
533
|
+
if (!specPath) {
|
|
534
|
+
printError("Missing spec path. Usage: zond sync <spec> --tests <dir> [--dry-run] [--tag <tag>]");
|
|
535
|
+
return 2;
|
|
536
|
+
}
|
|
537
|
+
const testsDir = typeof flags["tests"] === "string" ? flags["tests"] : undefined;
|
|
538
|
+
if (!testsDir) {
|
|
539
|
+
printError("Missing --tests <dir>. Usage: zond sync <spec> --tests <dir>");
|
|
540
|
+
return 2;
|
|
541
|
+
}
|
|
542
|
+
return syncCommand({
|
|
543
|
+
specPath,
|
|
544
|
+
testsDir,
|
|
545
|
+
dryRun: flags["dry-run"] === true,
|
|
546
|
+
tag: typeof flags["tag"] === "string" ? flags["tag"] : undefined,
|
|
547
|
+
json: jsonFlag,
|
|
548
|
+
});
|
|
549
|
+
}
|
|
550
|
+
|
|
507
551
|
default: {
|
|
508
552
|
printError(`Unknown command: ${command}`);
|
|
509
553
|
printUsage();
|
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
import { getDb } from "../../db/schema.ts";
|
|
2
2
|
import { listCollections, listRuns, getRunById, getResultsByRunId, getCollectionById } from "../../db/queries.ts";
|
|
3
3
|
import { join } from "node:path";
|
|
4
|
-
import { statusHint, classifyFailure, envHint, envCategory, schemaHint, computeSharedEnvIssue } from "./failure-hints.ts";
|
|
4
|
+
import { statusHint, classifyFailure, envHint, envCategory, schemaHint, computeSharedEnvIssue, recommendedAction, softDeleteHint, type RecommendedAction } from "./failure-hints.ts";
|
|
5
|
+
import { AUTH_PATH_RE } from "../runner/execute-run.ts";
|
|
5
6
|
|
|
6
7
|
export function truncateErrorMessage(raw: string | null | undefined, verbose?: boolean): string | undefined {
|
|
7
8
|
if (!raw) return undefined;
|
|
@@ -116,11 +117,18 @@ export interface FailureGroup {
|
|
|
116
117
|
pattern: string;
|
|
117
118
|
count: number;
|
|
118
119
|
failure_type: string;
|
|
120
|
+
recommended_action: RecommendedAction;
|
|
119
121
|
hint?: string;
|
|
120
122
|
examples: string[];
|
|
121
123
|
response_status: number | null;
|
|
122
124
|
}
|
|
123
125
|
|
|
126
|
+
export interface CascadeSkipGroup {
|
|
127
|
+
capture_var: string;
|
|
128
|
+
count: number;
|
|
129
|
+
examples: string[];
|
|
130
|
+
}
|
|
131
|
+
|
|
124
132
|
export interface DiagnoseResult {
|
|
125
133
|
run: {
|
|
126
134
|
id: number;
|
|
@@ -136,13 +144,17 @@ export interface DiagnoseResult {
|
|
|
136
144
|
assertion_failures: number;
|
|
137
145
|
network_errors: number;
|
|
138
146
|
};
|
|
147
|
+
agent_directive?: string;
|
|
139
148
|
env_issue?: string;
|
|
149
|
+
auth_hint?: string;
|
|
150
|
+
cascade_skips?: CascadeSkipGroup[];
|
|
140
151
|
failures: Array<{
|
|
141
152
|
suite_name: string;
|
|
142
153
|
test_name: string;
|
|
143
154
|
suite_file?: string;
|
|
144
155
|
status: string;
|
|
145
156
|
failure_type: string;
|
|
157
|
+
recommended_action: RecommendedAction;
|
|
146
158
|
error_message?: string;
|
|
147
159
|
request_method: string | null;
|
|
148
160
|
request_url: string | null;
|
|
@@ -174,8 +186,12 @@ export function diagnoseRun(runId: number, verbose?: boolean, dbPath?: string):
|
|
|
174
186
|
const failures = allResults
|
|
175
187
|
.filter(r => r.status === "fail" || r.status === "error")
|
|
176
188
|
.map(r => {
|
|
177
|
-
const
|
|
189
|
+
const parsedBody = parseBodySafe(r.response_body);
|
|
190
|
+
const hint = envHint(r.request_url, r.error_message, envFilePath) ??
|
|
191
|
+
softDeleteHint(r.response_status, r.request_method, parsedBody) ??
|
|
192
|
+
statusHint(r.response_status);
|
|
178
193
|
const failure_type = classifyFailure(r.status, r.response_status);
|
|
194
|
+
const rec_action = recommendedAction(failure_type, r.response_status);
|
|
179
195
|
const sHint = schemaHint(failure_type, r.response_status);
|
|
180
196
|
return {
|
|
181
197
|
suite_name: r.suite_name,
|
|
@@ -183,13 +199,14 @@ export function diagnoseRun(runId: number, verbose?: boolean, dbPath?: string):
|
|
|
183
199
|
...(r.suite_file ? { suite_file: r.suite_file } : {}),
|
|
184
200
|
status: r.status,
|
|
185
201
|
failure_type,
|
|
202
|
+
recommended_action: rec_action,
|
|
186
203
|
error_message: truncateErrorMessage(r.error_message, verbose),
|
|
187
204
|
request_method: r.request_method,
|
|
188
205
|
request_url: r.request_url,
|
|
189
206
|
response_status: r.response_status,
|
|
190
207
|
...(hint ? { hint } : {}),
|
|
191
208
|
...(sHint ? { schema_hint: sHint } : {}),
|
|
192
|
-
response_body:
|
|
209
|
+
response_body: parsedBody,
|
|
193
210
|
response_headers: filterHeaders(r.response_headers),
|
|
194
211
|
assertions: r.assertions,
|
|
195
212
|
duration_ms: r.duration_ms,
|
|
@@ -198,9 +215,60 @@ export function diagnoseRun(runId: number, verbose?: boolean, dbPath?: string):
|
|
|
198
215
|
|
|
199
216
|
const sharedEnvHint = computeSharedEnvIssue(failures, envFilePath);
|
|
200
217
|
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
218
|
+
let apiErrors = 0, assertionFailures = 0, networkErrors = 0;
|
|
219
|
+
let authFailureCount = 0;
|
|
220
|
+
for (const f of failures) {
|
|
221
|
+
if (f.failure_type === "api_error") apiErrors++;
|
|
222
|
+
else if (f.failure_type === "assertion_failed") assertionFailures++;
|
|
223
|
+
else if (f.failure_type === "network_error") networkErrors++;
|
|
224
|
+
if (f.response_status === 401 || f.response_status === 403) authFailureCount++;
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
let agent_directive: string | undefined;
|
|
228
|
+
if (apiErrors > 0) {
|
|
229
|
+
const fixable = assertionFailures + networkErrors;
|
|
230
|
+
agent_directive =
|
|
231
|
+
`${apiErrors} test${apiErrors === 1 ? "" : "s"} returned 5xx server errors. ` +
|
|
232
|
+
`Do NOT change test expectations to accept 5xx responses. ` +
|
|
233
|
+
`These are backend bugs, not test logic errors. ` +
|
|
234
|
+
`Stop iterating on these tests and report the failures to the API team.` +
|
|
235
|
+
(fixable > 0
|
|
236
|
+
? ` The remaining ${fixable} failure${fixable === 1 ? "" : "s"} may be fixable in test logic.`
|
|
237
|
+
: "");
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
// Cascade skips: skipped tests due to missing captures from failed create steps
|
|
241
|
+
const CASCADE_RE = /^Depends on missing capture: (.+)$/;
|
|
242
|
+
const groupMap = new Map<string, string[]>();
|
|
243
|
+
for (const r of allResults) {
|
|
244
|
+
if (r.status !== "skip") continue;
|
|
245
|
+
const match = CASCADE_RE.exec(r.error_message ?? "");
|
|
246
|
+
if (!match) continue;
|
|
247
|
+
const captureVar = match[1]!;
|
|
248
|
+
const existing = groupMap.get(captureVar) ?? [];
|
|
249
|
+
existing.push(`${r.suite_name}/${r.test_name}`);
|
|
250
|
+
groupMap.set(captureVar, existing);
|
|
251
|
+
}
|
|
252
|
+
const cascade_skips: CascadeSkipGroup[] | undefined = groupMap.size > 0
|
|
253
|
+
? [...groupMap.entries()].map(([capture_var, examples]) => ({
|
|
254
|
+
capture_var,
|
|
255
|
+
count: examples.length,
|
|
256
|
+
examples: examples.slice(0, 3),
|
|
257
|
+
}))
|
|
258
|
+
: undefined;
|
|
259
|
+
|
|
260
|
+
// Auth hint: when many tests fail with 401/403, suggest auth setup
|
|
261
|
+
let auth_hint: string | undefined;
|
|
262
|
+
if (authFailureCount >= 5 && authFailureCount / diagRun.total >= 0.3) {
|
|
263
|
+
const loginEndpoint = allResults.find(
|
|
264
|
+
r => r.request_method?.toUpperCase() === "POST" && AUTH_PATH_RE.test(r.request_url ?? "")
|
|
265
|
+
);
|
|
266
|
+
if (loginEndpoint) {
|
|
267
|
+
auth_hint = `${authFailureCount} tests failed with 401/403. Found auth endpoint: POST ${loginEndpoint.request_url} — add \`setup: true\` to your auth suite so its captured token is shared with all other suites, or set auth_token manually in .env.yaml`;
|
|
268
|
+
} else {
|
|
269
|
+
auth_hint = `${authFailureCount} tests failed with 401/403 — add \`setup: true\` to your auth suite so its captured token is shared with all other suites, or set auth_token in .env.yaml`;
|
|
270
|
+
}
|
|
271
|
+
}
|
|
204
272
|
|
|
205
273
|
const { grouped_failures, compactFailures } = verbose
|
|
206
274
|
? { grouped_failures: undefined, compactFailures: failures }
|
|
@@ -221,13 +289,16 @@ export function diagnoseRun(runId: number, verbose?: boolean, dbPath?: string):
|
|
|
221
289
|
assertion_failures: assertionFailures,
|
|
222
290
|
network_errors: networkErrors,
|
|
223
291
|
},
|
|
292
|
+
...(agent_directive ? { agent_directive } : {}),
|
|
224
293
|
...(sharedEnvHint ? { env_issue: sharedEnvHint } : {}),
|
|
294
|
+
...(auth_hint ? { auth_hint } : {}),
|
|
295
|
+
...(cascade_skips ? { cascade_skips } : {}),
|
|
225
296
|
failures: compactFailures,
|
|
226
297
|
...(grouped_failures ? { grouped_failures } : {}),
|
|
227
298
|
};
|
|
228
299
|
}
|
|
229
300
|
|
|
230
|
-
type FailureItem = { suite_name: string; test_name: string; failure_type: string; hint?: string; response_status: number | null };
|
|
301
|
+
type FailureItem = { suite_name: string; test_name: string; failure_type: string; recommended_action: RecommendedAction; hint?: string; response_status: number | null };
|
|
231
302
|
|
|
232
303
|
/** Group similar failures for compact output. Exported for testing. */
|
|
233
304
|
export function groupFailures<T extends FailureItem>(failures: T[]): { grouped_failures?: FailureGroup[]; compactFailures: T[] } {
|
|
@@ -268,6 +339,7 @@ export function groupFailures<T extends FailureItem>(failures: T[]): { grouped_f
|
|
|
268
339
|
pattern,
|
|
269
340
|
count: group.items.length,
|
|
270
341
|
failure_type: group.failure_type,
|
|
342
|
+
recommended_action: group.items[0]!.recommended_action,
|
|
271
343
|
hint: group.hint,
|
|
272
344
|
examples: group.items.slice(0, 2).map(f => `${f.suite_name}/${f.test_name}`),
|
|
273
345
|
response_status: group.response_status,
|
|
@@ -37,6 +37,26 @@ export function envHint(url: string | null, errorMessage: string | null, envFile
|
|
|
37
37
|
return null;
|
|
38
38
|
}
|
|
39
39
|
|
|
40
|
+
export type RecommendedAction =
|
|
41
|
+
| "report_backend_bug"
|
|
42
|
+
| "fix_auth_config"
|
|
43
|
+
| "fix_test_logic"
|
|
44
|
+
| "fix_network_config";
|
|
45
|
+
|
|
46
|
+
export function recommendedAction(
|
|
47
|
+
failureType: "api_error" | "assertion_failed" | "network_error",
|
|
48
|
+
responseStatus: number | null,
|
|
49
|
+
): RecommendedAction {
|
|
50
|
+
if (failureType === "api_error") return "report_backend_bug";
|
|
51
|
+
if (failureType === "network_error") {
|
|
52
|
+
if (responseStatus === 401 || responseStatus === 403) return "fix_auth_config";
|
|
53
|
+
return "fix_network_config";
|
|
54
|
+
}
|
|
55
|
+
// assertion_failed
|
|
56
|
+
if (responseStatus === 401 || responseStatus === 403) return "fix_auth_config";
|
|
57
|
+
return "fix_test_logic";
|
|
58
|
+
}
|
|
59
|
+
|
|
40
60
|
export function envCategory(hint: string | undefined): string | null {
|
|
41
61
|
if (!hint) return null;
|
|
42
62
|
if (hint.includes("base_url is not set") || hint.includes("base_url is missing") || hint.includes("base_url is not configured")) return "base_url_missing";
|
|
@@ -55,6 +75,25 @@ export function schemaHint(
|
|
|
55
75
|
return null;
|
|
56
76
|
}
|
|
57
77
|
|
|
78
|
+
export function softDeleteHint(
|
|
79
|
+
actualStatus: number | null | undefined,
|
|
80
|
+
requestMethod: string | null | undefined,
|
|
81
|
+
responseBody: unknown,
|
|
82
|
+
): string | null {
|
|
83
|
+
if (actualStatus !== 200 || requestMethod?.toUpperCase() !== "GET") return null;
|
|
84
|
+
if (responseBody && typeof responseBody === "object") {
|
|
85
|
+
const hasStatusField =
|
|
86
|
+
"status" in (responseBody as object) ||
|
|
87
|
+
"state" in (responseBody as object) ||
|
|
88
|
+
"deleted" in (responseBody as object) ||
|
|
89
|
+
"is_deleted" in (responseBody as object);
|
|
90
|
+
if (hasStatusField) {
|
|
91
|
+
return 'GET returned 200 with a status/state field after DELETE — likely soft delete. Update the test: remove the "Verify deleted → 404" step and instead assert the status field value (e.g. status: "cancelled")';
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
return null;
|
|
95
|
+
}
|
|
96
|
+
|
|
58
97
|
export function computeSharedEnvIssue(
|
|
59
98
|
failures: Array<{ hint?: string }>,
|
|
60
99
|
envFilePath?: string,
|