@skyramp/mcp 0.1.5 → 0.1.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/build/index.js +6 -5
- package/build/prompts/initialize-workspace/initializeWorkspacePrompt.js +11 -7
- package/build/prompts/personas.js +2 -1
- package/build/prompts/test-maintenance/drift-analysis-prompt.js +2 -1
- package/build/prompts/test-maintenance/drift-analysis-prompt.test.js +28 -0
- package/build/prompts/test-recommendation/analysisOutputPrompt.js +72 -14
- package/build/prompts/test-recommendation/analysisOutputPrompt.test.js +154 -0
- package/build/prompts/test-recommendation/recommendationSections.js +4 -2
- package/build/prompts/test-recommendation/registerRecommendTestsPrompt.js +20 -4
- package/build/prompts/test-recommendation/test-recommendation-prompt.js +11 -8
- package/build/prompts/test-recommendation/test-recommendation-prompt.test.js +6 -6
- package/build/prompts/testbot/testbot-prompts.js +7 -5
- package/build/prompts/testbot/testbot-prompts.test.js +2 -2
- package/build/resources/analysisResources.js +1 -0
- package/build/services/ScenarioGenerationService.js +2 -1
- package/build/tools/code-refactor/enhanceAssertionsTool.js +2 -1
- package/build/tools/generate-tests/generateBatchScenarioRestTool.js +123 -1
- package/build/tools/generate-tests/generateBatchScenarioRestTool.test.js +205 -9
- package/build/tools/generate-tests/generateContractRestTool.js +19 -19
- package/build/tools/generate-tests/generateIntegrationRestTool.js +9 -2
- package/build/tools/generate-tests/generateUIRestTool.js +23 -8
- package/build/tools/test-management/analyzeChangesTool.js +218 -2
- package/build/tools/test-management/analyzeChangesTool.test.js +233 -1
- package/build/utils/featureFlags.js +7 -0
- package/build/utils/featureFlags.test.js +81 -0
- package/build/utils/httpDefaults.js +17 -0
- package/build/utils/httpDefaults.test.js +21 -0
- package/build/utils/scenarioDrafting.js +37 -15
- package/build/utils/scenarioDrafting.test.js +66 -0
- package/build/utils/telemetry.js +2 -1
- package/build/utils/utils.js +23 -0
- package/package.json +1 -1
|
@@ -4,6 +4,7 @@ import { AnalyticsService } from "../../services/AnalyticsService.js";
|
|
|
4
4
|
import { TestGenerationService, } from "../../services/TestGenerationService.js";
|
|
5
5
|
import { normalizeLanguageParams, resolveParamAliases, } from "../../utils/normalizeParams.js";
|
|
6
6
|
import { getPersonaPrefix } from "../../prompts/personas.js";
|
|
7
|
+
import { isTestbotEnabled } from "../../utils/featureFlags.js";
|
|
7
8
|
const TOOL_NAME = "skyramp_ui_test_generation";
|
|
8
9
|
export class UITestService extends TestGenerationService {
|
|
9
10
|
getTestType() {
|
|
@@ -25,25 +26,39 @@ export class UITestService extends TestGenerationService {
|
|
|
25
26
|
const result = await super.generateTest({ ...params, modularizeCode: false });
|
|
26
27
|
if (result.isError)
|
|
27
28
|
return result;
|
|
29
|
+
if (!params.enhanceAssertions && !params.modularizeCode)
|
|
30
|
+
return result;
|
|
28
31
|
const content = [...result.content];
|
|
29
32
|
content.push({
|
|
30
33
|
type: "text",
|
|
31
|
-
text: this.
|
|
34
|
+
text: this.buildUIPostGenInstructions(!!params.enhanceAssertions, !!params.modularizeCode),
|
|
32
35
|
});
|
|
33
36
|
return { ...result, content };
|
|
34
37
|
}
|
|
35
|
-
|
|
36
|
-
const
|
|
37
|
-
|
|
38
|
-
: "";
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
38
|
+
buildUIPostGenInstructions(enhanceAssertions, modularize) {
|
|
39
|
+
const steps = [];
|
|
40
|
+
if (enhanceAssertions) {
|
|
41
|
+
steps.push(`### Step ${steps.length + 1} — Enhance UI assertions [REQUIRED]\nCall \`skyramp_enhance_assertions\` with \`testFile\` set to the absolute path of the generated test file, \`testType: "ui"\`, and \`enhanceType: "generation"\`. Apply every instruction returned to that file.`);
|
|
42
|
+
}
|
|
43
|
+
if (modularize) {
|
|
44
|
+
steps.push(`### Step ${steps.length + 1} — Modularize [REQUIRED]\nCall \`skyramp_modularization\` with \`testFile\` set to the absolute path of the generated test file, \`language\`, \`testType: "ui"\`, and \`isTraceBased: true\`.`);
|
|
45
|
+
}
|
|
46
|
+
const heading = enhanceAssertions && modularize
|
|
47
|
+
? "Enhance UI assertions and modularize the generated test"
|
|
48
|
+
: enhanceAssertions
|
|
49
|
+
? "Enhance UI assertions in the generated test"
|
|
50
|
+
: "Modularize the generated test";
|
|
51
|
+
return `### CRITICAL NEXT STEP — ${heading}\n\n${steps.join("\n\n")}`;
|
|
42
52
|
}
|
|
43
53
|
}
|
|
54
|
+
const ADD_ASSERTIONS_DEFAULT = isTestbotEnabled();
|
|
44
55
|
// Only include the original params in the schema
|
|
45
56
|
const uiTestSchema = {
|
|
46
57
|
...languageSchema.shape,
|
|
58
|
+
enhanceAssertions: z
|
|
59
|
+
.boolean()
|
|
60
|
+
.default(ADD_ASSERTIONS_DEFAULT)
|
|
61
|
+
.describe("When true, calls skyramp_enhance_assertions after test generation to add richer response-body assertions. Disabled by default. Automatically enabled when running as testbot-feature. Do not override the default value of this parameter, unless the user explicitly asks to enable it."),
|
|
47
62
|
playwrightInput: z
|
|
48
63
|
.string()
|
|
49
64
|
.describe("MUST be absolute path to the playwright trace zip file like /path/to/trace.zip. Capture using browser_* tools + skyramp_export_zip, or via manual trace collection with skyramp_start_trace_collection/skyramp_stop_trace_collection."),
|
|
@@ -3,6 +3,7 @@ import * as crypto from "crypto";
|
|
|
3
3
|
import * as fs from "fs";
|
|
4
4
|
import * as os from "os";
|
|
5
5
|
import * as path from "path";
|
|
6
|
+
import yaml from "js-yaml";
|
|
6
7
|
import { simpleGit } from "simple-git";
|
|
7
8
|
import { logger } from "../../utils/logger.js";
|
|
8
9
|
import { parseWorkspaceAuthType, getDefaultAuthHeader, WorkspaceAuthType, readWorkspaceConfigRaw } from "../../utils/workspaceAuth.js";
|
|
@@ -21,6 +22,15 @@ import { buildAnalysisOutputText } from "../../prompts/test-recommendation/analy
|
|
|
21
22
|
import { parseTraceFile, discoverTraceFiles, discoverPlaywrightZips, } from "../../utils/trace-parser.js";
|
|
22
23
|
import { TestSource } from "../../types/TestAnalysis.js";
|
|
23
24
|
import { parsePRComments } from "../../utils/pr-comment-parser.js";
|
|
25
|
+
/** Exported for testing: maps a parsed trace result to a TraceFile. */
|
|
26
|
+
export function buildTraceFileEntry(tracePath, result) {
|
|
27
|
+
return {
|
|
28
|
+
path: tracePath,
|
|
29
|
+
format: result.format,
|
|
30
|
+
analyzed: true,
|
|
31
|
+
userFlows: result.userFlows.map((f) => f.flowId),
|
|
32
|
+
};
|
|
33
|
+
}
|
|
24
34
|
const TOOL_NAME = "skyramp_analyze_changes";
|
|
25
35
|
// Must match testbot/src/constants.ts BOT_EMAIL
|
|
26
36
|
const BOT_EMAIL = "test-bot@skyramp.dev";
|
|
@@ -149,6 +159,52 @@ const NON_APP_PATTERNS = [
|
|
|
149
159
|
/^renovate\.json$/,
|
|
150
160
|
/^\.pre-commit-config/,
|
|
151
161
|
];
|
|
162
|
+
// ── filterEndpointsBySpec ──────────────────────────────────────────────────
|
|
163
|
+
// Pure helper extracted so unit tests can exercise the filtering + merge logic
|
|
164
|
+
// without spinning up the full analyzeChanges handler.
|
|
165
|
+
export function filterEndpointsBySpec(scannedEndpoints, specPaths, specPathItems, diffChangedPaths) {
|
|
166
|
+
if (!specPaths || specPaths.size === 0)
|
|
167
|
+
return scannedEndpoints;
|
|
168
|
+
const filtered = scannedEndpoints.filter(ep => {
|
|
169
|
+
const normalized = ep.path.replace(/:[a-zA-Z_][a-zA-Z0-9_]*/g, m => `{${m.slice(1)}}`);
|
|
170
|
+
if (specPaths.has(ep.path) || specPaths.has(normalized))
|
|
171
|
+
return true;
|
|
172
|
+
if (diffChangedPaths.has(ep.path))
|
|
173
|
+
return true;
|
|
174
|
+
return false;
|
|
175
|
+
});
|
|
176
|
+
const scannedPathSet = new Set(filtered.map(ep => ep.path));
|
|
177
|
+
// Also track normalized (:param → {param}) forms so Express-style scanned paths
|
|
178
|
+
// (e.g. /api/v1/users/:id) don't produce a duplicate when the spec uses /api/v1/users/{id}.
|
|
179
|
+
const scannedNormalizedSet = new Set(filtered.map(ep => ep.path.replace(/:[a-zA-Z_][a-zA-Z0-9_]*/g, m => `{${m.slice(1)}}`)));
|
|
180
|
+
const HTTP_VERBS = new Set(["get", "post", "put", "patch", "delete", "head", "options"]);
|
|
181
|
+
for (const specPath of specPaths) {
|
|
182
|
+
if (!scannedPathSet.has(specPath) && !scannedNormalizedSet.has(specPath)) {
|
|
183
|
+
const pathObj = specPathItems[specPath];
|
|
184
|
+
const specMethods = Object.keys(pathObj ?? {})
|
|
185
|
+
.filter(k => HTTP_VERBS.has(k))
|
|
186
|
+
.map(m => m.toUpperCase());
|
|
187
|
+
if (specMethods.length > 0) {
|
|
188
|
+
filtered.push({ path: specPath, methods: specMethods, sourceFile: "" });
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
return filtered;
|
|
193
|
+
}
|
|
194
|
+
const GRAPHQL_EXT = /\.(graphql|gql)$/i;
|
|
195
|
+
const GRAPHQL_CONTENT_PATTERN = /^\s*(type\s+(Query|Mutation|Subscription)\s*\{|schema\s*\{|extend\s+type|directive\s+@)/m;
|
|
196
|
+
export async function isGraphQLFile(filePath, repositoryPath) {
|
|
197
|
+
if (GRAPHQL_EXT.test(filePath))
|
|
198
|
+
return true;
|
|
199
|
+
try {
|
|
200
|
+
const absPath = path.join(repositoryPath, filePath);
|
|
201
|
+
const fileContent = await fs.promises.readFile(absPath, "utf-8");
|
|
202
|
+
return GRAPHQL_CONTENT_PATTERN.test(fileContent);
|
|
203
|
+
}
|
|
204
|
+
catch {
|
|
205
|
+
return false;
|
|
206
|
+
}
|
|
207
|
+
}
|
|
152
208
|
function isNonApplicationFile(filePath) {
|
|
153
209
|
return NON_APP_PATTERNS.some((p) => p.test(filePath));
|
|
154
210
|
}
|
|
@@ -370,6 +426,44 @@ to produce a unified state file for the test health workflow.
|
|
|
370
426
|
unmatched: classifiedEndpoints.unmatchedFiles.length,
|
|
371
427
|
});
|
|
372
428
|
}
|
|
429
|
+
// ── Early return: GraphQL-only diff — REST testing not supported ──
|
|
430
|
+
// Uses (userFiles ?? diffData.changedFiles) to match the existing non-app
|
|
431
|
+
// early-return pattern — bot-committed .graphql files must not trigger this.
|
|
432
|
+
if (analysisScope === AnalysisScope.CurrentBranchDiff &&
|
|
433
|
+
classifiedEndpoints &&
|
|
434
|
+
classifiedEndpoints.changedEndpoints.length === 0 &&
|
|
435
|
+
classifiedEndpoints.newEndpoints.length === 0 &&
|
|
436
|
+
classifiedEndpoints.removedEndpoints.length === 0 &&
|
|
437
|
+
diffData) {
|
|
438
|
+
const userFiles = await getUserChangedFiles(params.repositoryPath);
|
|
439
|
+
const filesToCheck = userFiles ?? diffData.changedFiles;
|
|
440
|
+
// Exclude non-application files (docs, CI, configs) before the graphql check.
|
|
441
|
+
// Non-app files are neutral — a README-only diff must NOT fire this early return.
|
|
442
|
+
const appFilesToCheck = filesToCheck.filter(f => !isNonApplicationFile(f));
|
|
443
|
+
const allGraphQL = appFilesToCheck.length > 0 &&
|
|
444
|
+
(await Promise.all(appFilesToCheck.map(f => isGraphQLFile(f, params.repositoryPath)))).every(Boolean);
|
|
445
|
+
if (allGraphQL) {
|
|
446
|
+
logger.info("GraphQL-only diff detected — REST testing not supported", {
|
|
447
|
+
changedFiles: diffData.changedFiles,
|
|
448
|
+
});
|
|
449
|
+
return {
|
|
450
|
+
content: [{
|
|
451
|
+
type: "text",
|
|
452
|
+
text: [
|
|
453
|
+
"**GraphQL-only diff detected.**",
|
|
454
|
+
"",
|
|
455
|
+
"The changed files appear to be GraphQL schema or resolver definitions.",
|
|
456
|
+
"Skyramp currently supports REST API testing only — GraphQL introspection,",
|
|
457
|
+
"query validation, and type-name grounding are not yet supported.",
|
|
458
|
+
"",
|
|
459
|
+
"No test recommendations can be generated for this diff.",
|
|
460
|
+
"",
|
|
461
|
+
`Changed files: ${diffData.changedFiles.join(", ")}`,
|
|
462
|
+
].join("\n"),
|
|
463
|
+
}],
|
|
464
|
+
};
|
|
465
|
+
}
|
|
466
|
+
}
|
|
373
467
|
await sendProgress(50, 100, "Discovering existing tests...");
|
|
374
468
|
// ── Step 3: Discover existing tests ──
|
|
375
469
|
// Compute changedResources from classified endpoints for test discovery filtering.
|
|
@@ -479,6 +573,90 @@ to produce a unified state file for the test health workflow.
|
|
|
479
573
|
wsAuthMethod = wsAuthType ?? "custom";
|
|
480
574
|
}
|
|
481
575
|
}
|
|
576
|
+
// ── Step 4b: Fetch OpenAPI spec and extract valid paths ──
|
|
577
|
+
// spec and specPaths are hoisted so Change 5 (spec merge) can access spec.paths[specPath]
|
|
578
|
+
// outside this try block without a scope error.
|
|
579
|
+
let specPaths;
|
|
580
|
+
let spec;
|
|
581
|
+
let specFetchSucceeded = false;
|
|
582
|
+
if (wsSchemaPath) {
|
|
583
|
+
try {
|
|
584
|
+
const isUrl = wsSchemaPath.startsWith("http://") || wsSchemaPath.startsWith("https://");
|
|
585
|
+
const SPEC_FETCH_TIMEOUT_MS = 10_000;
|
|
586
|
+
let specText;
|
|
587
|
+
if (isUrl) {
|
|
588
|
+
const specRes = await fetch(wsSchemaPath, { signal: AbortSignal.timeout(SPEC_FETCH_TIMEOUT_MS) });
|
|
589
|
+
if (!specRes.ok) {
|
|
590
|
+
throw new Error(`HTTP ${specRes.status} ${specRes.statusText} fetching spec at ${wsSchemaPath}`);
|
|
591
|
+
}
|
|
592
|
+
specText = await specRes.text();
|
|
593
|
+
}
|
|
594
|
+
else {
|
|
595
|
+
specText = fs.readFileSync(path.resolve(params.repositoryPath, wsSchemaPath), "utf-8");
|
|
596
|
+
}
|
|
597
|
+
// js-yaml handles both JSON and YAML specs (dep: js-yaml ^4.1.1)
|
|
598
|
+
spec = yaml.load(specText);
|
|
599
|
+
specPaths = new Set(Object.keys((spec && typeof spec === "object" ? spec.paths : null) ?? {}));
|
|
600
|
+
// Only treat spec as authoritative when it actually has usable path entries
|
|
601
|
+
specFetchSucceeded = spec && typeof spec === "object" &&
|
|
602
|
+
spec.paths !== null && typeof spec.paths === "object" &&
|
|
603
|
+
specPaths.size > 0;
|
|
604
|
+
logger.info("Loaded OpenAPI spec paths", { count: specPaths.size, source: wsSchemaPath });
|
|
605
|
+
}
|
|
606
|
+
catch (err) {
|
|
607
|
+
logger.warning("Could not load OpenAPI spec — continuing without path validation", {
|
|
608
|
+
schemaPath: wsSchemaPath,
|
|
609
|
+
error: err instanceof Error ? err.message : String(err),
|
|
610
|
+
});
|
|
611
|
+
}
|
|
612
|
+
}
|
|
613
|
+
// ── Step 4c: Filter scanned endpoints against spec ──
|
|
614
|
+
// Sequencing: scannedEndpoints is populated at Step 2 (~line 400),
|
|
615
|
+
// but wsSchemaPath and specPaths aren't known until Step 4b (above).
|
|
616
|
+
// All of 1b, 5, 10a, and 8 must run here — after specPaths is assigned
|
|
617
|
+
// and before Step 7 (~line 604) builds skeletonEndpoints.
|
|
618
|
+
if (specPaths && specPaths.size > 0) {
|
|
619
|
+
// Build a set of paths that the current diff explicitly changed — these
|
|
620
|
+
// are preserved even if missing from spec (spec may lag the code on new PRs).
|
|
621
|
+
// removedEndpoints intentionally excluded: deleted paths won't appear in
|
|
622
|
+
// scannedEndpoints (they no longer exist in code), so there is nothing to
|
|
623
|
+
// preserve — including them would only produce spurious spec-lag warnings.
|
|
624
|
+
const diffChangedPaths = new Set([
|
|
625
|
+
...(classifiedEndpoints?.changedEndpoints ?? []).map(ep => ep.path),
|
|
626
|
+
...(classifiedEndpoints?.newEndpoints ?? []).map(ep => ep.path),
|
|
627
|
+
]);
|
|
628
|
+
const beforeCount = scannedEndpoints.length;
|
|
629
|
+
// ── Steps 4c + 4c-merge: filter against spec, merge spec-only paths ──
|
|
630
|
+
scannedEndpoints = filterEndpointsBySpec(scannedEndpoints, specPaths, spec.paths ?? {}, diffChangedPaths);
|
|
631
|
+
logger.info("Filtered scanned endpoints against OpenAPI spec", {
|
|
632
|
+
before: beforeCount, after: scannedEndpoints.length,
|
|
633
|
+
delta: scannedEndpoints.length - beforeCount, // positive = net added (spec merge), negative = net removed
|
|
634
|
+
});
|
|
635
|
+
// Warn when diff-changed endpoints were missing from spec — indicates spec lag
|
|
636
|
+
const specLagPaths = [...diffChangedPaths].filter(p => !specPaths.has(p) &&
|
|
637
|
+
!specPaths.has(p.replace(/:[a-zA-Z_][a-zA-Z0-9_]*/g, m => `{${m.slice(1)}}`)));
|
|
638
|
+
if (specLagPaths.length > 0) {
|
|
639
|
+
logger.warning("Spec may be lagging code — diff-changed paths missing from spec (kept in catalog)", {
|
|
640
|
+
paths: specLagPaths, schemaPath: wsSchemaPath,
|
|
641
|
+
});
|
|
642
|
+
}
|
|
643
|
+
}
|
|
644
|
+
// ── Step 4d: Filter unsupported protocol endpoints (GraphQL) ──
|
|
645
|
+
// Must run AFTER spec-merge above — Directus spec includes /graphql and
|
|
646
|
+
// the merge step would re-add it if this ran earlier.
|
|
647
|
+
// Use segment check to catch /api/graphql, /v1/graphql, etc.
|
|
648
|
+
{
|
|
649
|
+
const beforeUnsupported = scannedEndpoints.length;
|
|
650
|
+
scannedEndpoints = scannedEndpoints.filter(ep => {
|
|
651
|
+
const normalized = ep.path.replace(/\/+$/, "").toLowerCase();
|
|
652
|
+
return !normalized.split("/").some(seg => seg === "graphql");
|
|
653
|
+
});
|
|
654
|
+
if (scannedEndpoints.length < beforeUnsupported) {
|
|
655
|
+
logger.info("Filtered unsupported protocol endpoints (GraphQL)", {
|
|
656
|
+
removed: beforeUnsupported - scannedEndpoints.length,
|
|
657
|
+
});
|
|
658
|
+
}
|
|
659
|
+
}
|
|
482
660
|
// ── Step 5: Detect project metadata ──
|
|
483
661
|
const projectMeta = detectProjectMetadata(params.repositoryPath);
|
|
484
662
|
// ── Step 6: Trace files ──
|
|
@@ -583,7 +761,21 @@ to produce a unified state file for the test health workflow.
|
|
|
583
761
|
path: ep.path,
|
|
584
762
|
sourceFile: ep.sourceFile,
|
|
585
763
|
}))) ?? [];
|
|
586
|
-
|
|
764
|
+
// Full-repo mode: no diff context, so seed scenario drafting from the entire
|
|
765
|
+
// skeletonEndpoints catalog. We gate on analysisScope (not just array length)
|
|
766
|
+
// to avoid drafting catalog-wide scenarios for PR-mode diffs that happened to
|
|
767
|
+
// add zero new endpoints (only changed or removed existing ones).
|
|
768
|
+
const fullRepoMode = analysisScope !== AnalysisScope.CurrentBranchDiff;
|
|
769
|
+
const scenarioDraftSeed = newEndpointsForDrafting.length > 0
|
|
770
|
+
? newEndpointsForDrafting
|
|
771
|
+
: fullRepoMode
|
|
772
|
+
? skeletonEndpoints.flatMap(ep => ep.methods.map(m => ({
|
|
773
|
+
method: typeof m === "string" ? m : m.method,
|
|
774
|
+
path: ep.path,
|
|
775
|
+
sourceFile: m.sourceFile ?? "",
|
|
776
|
+
})))
|
|
777
|
+
: [];
|
|
778
|
+
const codeInferredScenarios = draftScenariosFromEndpoints(skeletonEndpoints, scenarioDraftSeed);
|
|
587
779
|
let allDraftedScenarios = codeInferredScenarios;
|
|
588
780
|
if (traceResult && traceResult.userFlows.length > 0) {
|
|
589
781
|
const traceScenarios = traceResult.userFlows
|
|
@@ -742,7 +934,7 @@ to produce a unified state file for the test health workflow.
|
|
|
742
934
|
artifacts: {
|
|
743
935
|
openApiSpecs: wsSchemaPath ? [{ path: wsSchemaPath, version: "from-workspace-config", endpointCount: 0, baseUrl: wsBaseUrl, authType: wsAuthMethod }] : [],
|
|
744
936
|
playwrightRecordings: discoverPlaywrightZips(params.repositoryPath).map(p => ({ path: p, description: "" })),
|
|
745
|
-
traceFiles: traceResult ? [traceResult] : [],
|
|
937
|
+
traceFiles: traceResult ? [buildTraceFileEntry(traceFiles[0], traceResult)] : [],
|
|
746
938
|
notFound: [],
|
|
747
939
|
},
|
|
748
940
|
apiEndpoints: {
|
|
@@ -790,6 +982,25 @@ to produce a unified state file for the test health workflow.
|
|
|
790
982
|
const candidateRouteFiles = analysisScope !== AnalysisScope.CurrentBranchDiff
|
|
791
983
|
? findCandidateRouteFiles(params.repositoryPath)
|
|
792
984
|
: undefined;
|
|
985
|
+
// Read router mount files server-side (size-capped) so the LLM has them
|
|
986
|
+
// inline and doesn't need an extra read step when no spec is available.
|
|
987
|
+
const ROUTER_INLINE_LIMIT = 4096; // bytes — skip files larger than ~4 KB
|
|
988
|
+
const ROUTER_INLINE_MAX_FILES = 3;
|
|
989
|
+
const routerFileContents = routerMountContext
|
|
990
|
+
.slice(0, ROUTER_INLINE_MAX_FILES)
|
|
991
|
+
.flatMap((f) => {
|
|
992
|
+
try {
|
|
993
|
+
const absPath = path.isAbsolute(f) ? f : path.join(params.repositoryPath, f);
|
|
994
|
+
const stat = fs.statSync(absPath);
|
|
995
|
+
if (stat.size > ROUTER_INLINE_LIMIT)
|
|
996
|
+
return [];
|
|
997
|
+
const content = fs.readFileSync(absPath, "utf-8").trimEnd();
|
|
998
|
+
return [{ file: f, content }];
|
|
999
|
+
}
|
|
1000
|
+
catch {
|
|
1001
|
+
return [];
|
|
1002
|
+
}
|
|
1003
|
+
});
|
|
793
1004
|
const unifiedState = {
|
|
794
1005
|
existingTests,
|
|
795
1006
|
newEndpoints: newEndpointsForDrafting,
|
|
@@ -803,7 +1014,9 @@ to produce a unified state file for the test health workflow.
|
|
|
803
1014
|
wsAuthScheme,
|
|
804
1015
|
wsSchemaPath,
|
|
805
1016
|
wsAuthMethod,
|
|
1017
|
+
specFetchSucceeded,
|
|
806
1018
|
scenarios: allDraftedScenarios,
|
|
1019
|
+
testLocations: testLocationsByType,
|
|
807
1020
|
diff: classifiedEndpoints
|
|
808
1021
|
? {
|
|
809
1022
|
currentBranch: classifiedEndpoints.currentBranch,
|
|
@@ -980,7 +1193,10 @@ to produce a unified state file for the test health workflow.
|
|
|
980
1193
|
wsAuthHeader: wsAuthHeader ?? "",
|
|
981
1194
|
wsAuthType: wsAuthType ?? "",
|
|
982
1195
|
wsSchemaPath,
|
|
1196
|
+
specFetchSucceeded,
|
|
983
1197
|
routerMountContext,
|
|
1198
|
+
routerFileContents,
|
|
1199
|
+
unmatchedFiles: classifiedEndpoints?.unmatchedFiles,
|
|
984
1200
|
nextTool: "skyramp_analyze_test_health",
|
|
985
1201
|
});
|
|
986
1202
|
return {
|
|
@@ -56,7 +56,7 @@ jest.mock("../../utils/workspaceAuth.js", () => ({
|
|
|
56
56
|
parseWorkspaceAuthType: jest.fn(),
|
|
57
57
|
}));
|
|
58
58
|
jest.mock("../../utils/logger.js", () => ({
|
|
59
|
-
logger: { info: jest.fn(), debug: jest.fn(), error: jest.fn(), warn: jest.fn() },
|
|
59
|
+
logger: { info: jest.fn(), debug: jest.fn(), error: jest.fn(), warn: jest.fn(), warning: jest.fn() },
|
|
60
60
|
}));
|
|
61
61
|
jest.mock("@modelcontextprotocol/sdk/server/mcp.js", () => ({
|
|
62
62
|
McpServer: jest.fn(),
|
|
@@ -118,3 +118,235 @@ describe("automatic state file cleanup", () => {
|
|
|
118
118
|
})).resolves.toBeUndefined();
|
|
119
119
|
});
|
|
120
120
|
});
|
|
121
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
122
|
+
// filterEndpointsBySpec — spec filtering + merge logic (Step 4c / 4c-merge)
|
|
123
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
124
|
+
import { filterEndpointsBySpec } from "./analyzeChangesTool.js";
|
|
125
|
+
describe("filterEndpointsBySpec", () => {
|
|
126
|
+
const makeEp = (path, methods = ["GET"]) => ({ path, methods, sourceFile: "" });
|
|
127
|
+
it("removes scanned endpoints that are not in the spec and not diff-changed", () => {
|
|
128
|
+
const scanned = [makeEp("/api/v1/users"), makeEp("/api/v1/phantom")];
|
|
129
|
+
const specPaths = new Set(["/api/v1/users"]);
|
|
130
|
+
const specItems = { "/api/v1/users": { get: {} } };
|
|
131
|
+
const diffChanged = new Set();
|
|
132
|
+
const result = filterEndpointsBySpec(scanned, specPaths, specItems, diffChanged);
|
|
133
|
+
expect(result.map(e => e.path)).toContain("/api/v1/users");
|
|
134
|
+
expect(result.map(e => e.path)).not.toContain("/api/v1/phantom");
|
|
135
|
+
});
|
|
136
|
+
it("returns the endpoint list unchanged when specPaths is empty (early-return guard)", () => {
|
|
137
|
+
const scanned = [makeEp("/api/v1/new-feature")];
|
|
138
|
+
const specPaths = new Set(); // spec lags — new endpoint not there yet
|
|
139
|
+
const specItems = {};
|
|
140
|
+
const diffChanged = new Set(["/api/v1/new-feature"]);
|
|
141
|
+
const result = filterEndpointsBySpec(scanned, specPaths, specItems, diffChanged);
|
|
142
|
+
// specPaths is empty → early-return guard triggers → input returned as-is without filtering
|
|
143
|
+
expect(result.map(e => e.path)).toContain("/api/v1/new-feature");
|
|
144
|
+
});
|
|
145
|
+
it("preserves a diff-changed endpoint that is absent from a non-empty spec (spec-lag path)", () => {
|
|
146
|
+
// This is the real spec-lag scenario: spec exists (non-empty) but the new endpoint
|
|
147
|
+
// added in this PR hasn't been documented yet. filterEndpointsBySpec must keep it.
|
|
148
|
+
const scanned = [makeEp("/api/v1/new-feature")];
|
|
149
|
+
const specPaths = new Set(["/api/v1/users"]); // spec has other paths but not new-feature
|
|
150
|
+
const specItems = { "/api/v1/users": { get: {} } };
|
|
151
|
+
const diffChanged = new Set(["/api/v1/new-feature"]);
|
|
152
|
+
const result = filterEndpointsBySpec(scanned, specPaths, specItems, diffChanged);
|
|
153
|
+
expect(result.map(e => e.path)).toContain("/api/v1/new-feature");
|
|
154
|
+
});
|
|
155
|
+
it("preserves spec-lagging diff-changed path when spec has other paths", () => {
|
|
156
|
+
const scanned = [makeEp("/api/v1/users"), makeEp("/api/v1/new-feature")];
|
|
157
|
+
const specPaths = new Set(["/api/v1/users"]); // spec has users but not new-feature
|
|
158
|
+
const specItems = { "/api/v1/users": { get: {} } };
|
|
159
|
+
const diffChanged = new Set(["/api/v1/new-feature"]);
|
|
160
|
+
const result = filterEndpointsBySpec(scanned, specPaths, specItems, diffChanged);
|
|
161
|
+
expect(result.map(e => e.path)).toContain("/api/v1/users");
|
|
162
|
+
expect(result.map(e => e.path)).toContain("/api/v1/new-feature");
|
|
163
|
+
});
|
|
164
|
+
it("normalises Express :param paths to {param} when matching OpenAPI spec paths", () => {
|
|
165
|
+
const scanned = [makeEp("/api/v1/users/:id")];
|
|
166
|
+
const specPaths = new Set(["/api/v1/users/{id}"]);
|
|
167
|
+
const specItems = { "/api/v1/users/{id}": { get: {} } };
|
|
168
|
+
const diffChanged = new Set();
|
|
169
|
+
const result = filterEndpointsBySpec(scanned, specPaths, specItems, diffChanged);
|
|
170
|
+
const paths = result.map(e => e.path);
|
|
171
|
+
// Express-style path is kept because it normalises to the spec path
|
|
172
|
+
expect(paths).toContain("/api/v1/users/:id");
|
|
173
|
+
// The spec form must NOT be added as a duplicate — would produce two entries for the same route
|
|
174
|
+
expect(paths).not.toContain("/api/v1/users/{id}");
|
|
175
|
+
expect(paths).toHaveLength(1);
|
|
176
|
+
});
|
|
177
|
+
it("merges spec-only paths that the static scan missed", () => {
|
|
178
|
+
const scanned = [makeEp("/api/v1/users")];
|
|
179
|
+
const specPaths = new Set(["/api/v1/users", "/api/v1/products"]);
|
|
180
|
+
const specItems = {
|
|
181
|
+
"/api/v1/users": { get: {} },
|
|
182
|
+
"/api/v1/products": { get: {}, post: {} },
|
|
183
|
+
};
|
|
184
|
+
const diffChanged = new Set();
|
|
185
|
+
const result = filterEndpointsBySpec(scanned, specPaths, specItems, diffChanged);
|
|
186
|
+
const paths = result.map(e => e.path);
|
|
187
|
+
expect(paths).toContain("/api/v1/users");
|
|
188
|
+
expect(paths).toContain("/api/v1/products");
|
|
189
|
+
const products = result.find(e => e.path === "/api/v1/products");
|
|
190
|
+
expect(products?.methods).toEqual(expect.arrayContaining(["GET", "POST"]));
|
|
191
|
+
});
|
|
192
|
+
it("does not add spec-only paths that have no HTTP verb operations", () => {
|
|
193
|
+
const scanned = [];
|
|
194
|
+
const specPaths = new Set(["/api/v1/empty-path"]);
|
|
195
|
+
const specItems = { "/api/v1/empty-path": { "x-custom-field": "value" } }; // no HTTP verbs
|
|
196
|
+
const diffChanged = new Set();
|
|
197
|
+
const result = filterEndpointsBySpec(scanned, specPaths, specItems, diffChanged);
|
|
198
|
+
expect(result.map(e => e.path)).not.toContain("/api/v1/empty-path");
|
|
199
|
+
});
|
|
200
|
+
it("returns the original array unchanged when specPaths is empty", () => {
|
|
201
|
+
const scanned = [makeEp("/api/v1/anything")];
|
|
202
|
+
const result = filterEndpointsBySpec(scanned, new Set(), {}, new Set());
|
|
203
|
+
expect(result).toEqual(scanned);
|
|
204
|
+
});
|
|
205
|
+
});
|
|
206
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
207
|
+
// isGraphQLFile — unit tests
|
|
208
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
209
|
+
import { isGraphQLFile } from "./analyzeChangesTool.js";
|
|
210
|
+
import * as os from "os";
|
|
211
|
+
import * as path from "path";
|
|
212
|
+
import * as fsSync from "fs";
|
|
213
|
+
describe("isGraphQLFile", () => {
|
|
214
|
+
const tmpDir = os.tmpdir();
|
|
215
|
+
it("returns true for .graphql extension without reading content", async () => {
|
|
216
|
+
const result = await isGraphQLFile("schema.graphql", "/any/repo");
|
|
217
|
+
expect(result).toBe(true);
|
|
218
|
+
});
|
|
219
|
+
it("returns true for .gql extension without reading content", async () => {
|
|
220
|
+
const result = await isGraphQLFile("types.gql", "/any/repo");
|
|
221
|
+
expect(result).toBe(true);
|
|
222
|
+
});
|
|
223
|
+
it("returns true for a file containing a GraphQL type Query block", async () => {
|
|
224
|
+
const file = path.join(tmpDir, "graphql-test-query.ts");
|
|
225
|
+
fsSync.writeFileSync(file, 'type Query {\n hello: String\n}\n');
|
|
226
|
+
const result = await isGraphQLFile(path.basename(file), tmpDir);
|
|
227
|
+
expect(result).toBe(true);
|
|
228
|
+
});
|
|
229
|
+
it("returns true for a file containing a GraphQL schema block", async () => {
|
|
230
|
+
const file = path.join(tmpDir, "graphql-test-schema.ts");
|
|
231
|
+
fsSync.writeFileSync(file, 'schema {\n query: Query\n}\n');
|
|
232
|
+
const result = await isGraphQLFile(path.basename(file), tmpDir);
|
|
233
|
+
expect(result).toBe(true);
|
|
234
|
+
});
|
|
235
|
+
it("returns false for a regular TypeScript route file", async () => {
|
|
236
|
+
const file = path.join(tmpDir, "graphql-test-route.ts");
|
|
237
|
+
fsSync.writeFileSync(file, 'import express from "express";\nrouter.get("/users", handler);\n');
|
|
238
|
+
const result = await isGraphQLFile(path.basename(file), tmpDir);
|
|
239
|
+
expect(result).toBe(false);
|
|
240
|
+
});
|
|
241
|
+
it("returns false (does not throw) when the file does not exist", async () => {
|
|
242
|
+
const result = await isGraphQLFile("nonexistent-file.ts", tmpDir);
|
|
243
|
+
expect(result).toBe(false);
|
|
244
|
+
});
|
|
245
|
+
});
|
|
246
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
247
|
+
// analyzeChangesTool handler — GraphQL-only early return (handler-level)
|
|
248
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
249
|
+
import { registerAnalyzeChangesTool, buildTraceFileEntry } from "./analyzeChangesTool.js";
|
|
250
|
+
import { computeBranchDiff } from "../../utils/branchDiff.js";
|
|
251
|
+
import { classifyEndpointsByChangedFiles } from "../../utils/routeParsers.js";
|
|
252
|
+
import { scanRelatedEndpoints } from "../../utils/repoScanner.js";
|
|
253
|
+
import * as fsModule from "fs";
|
|
254
|
+
/** Register the tool against a minimal mock server and return the captured handler.
|
|
255
|
+
* The handler takes (params, extra) — extra is pre-filled with a no-op sendNotification. */
|
|
256
|
+
function captureAnalyzeHandler() {
|
|
257
|
+
let capturedHandler;
|
|
258
|
+
const mockServer = {
|
|
259
|
+
registerTool: jest.fn((_name, _meta, handler) => {
|
|
260
|
+
capturedHandler = handler;
|
|
261
|
+
}),
|
|
262
|
+
};
|
|
263
|
+
registerAnalyzeChangesTool(mockServer);
|
|
264
|
+
const mockExtra = { sendNotification: jest.fn().mockResolvedValue(undefined), _meta: {} };
|
|
265
|
+
return (params) => capturedHandler(params, mockExtra);
|
|
266
|
+
}
|
|
267
|
+
describe("analyzeChangesTool handler — GraphQL-only early return", () => {
|
|
268
|
+
const baseParams = {
|
|
269
|
+
repositoryPath: "/fake/repo",
|
|
270
|
+
scope: "branch_diff",
|
|
271
|
+
};
|
|
272
|
+
beforeEach(() => {
|
|
273
|
+
jest.clearAllMocks();
|
|
274
|
+
// Minimal diffData: two .graphql files changed, no endpoints classified
|
|
275
|
+
computeBranchDiff.mockResolvedValue({
|
|
276
|
+
currentBranch: "feature",
|
|
277
|
+
baseBranch: "main",
|
|
278
|
+
changedFiles: ["schema.graphql", "mutations.graphql"],
|
|
279
|
+
deletedFiles: [],
|
|
280
|
+
diffContent: "",
|
|
281
|
+
});
|
|
282
|
+
scanRelatedEndpoints.mockReturnValue([]);
|
|
283
|
+
classifyEndpointsByChangedFiles.mockReturnValue({
|
|
284
|
+
changedEndpoints: [],
|
|
285
|
+
newEndpoints: [],
|
|
286
|
+
removedEndpoints: [],
|
|
287
|
+
unmatchedFiles: [],
|
|
288
|
+
affectedServices: [],
|
|
289
|
+
});
|
|
290
|
+
});
|
|
291
|
+
it("returns the GraphQL-only early-return message when all changed files are .graphql", async () => {
|
|
292
|
+
const handler = captureAnalyzeHandler();
|
|
293
|
+
const result = await handler(baseParams);
|
|
294
|
+
expect(result.isError).toBeFalsy();
|
|
295
|
+
expect(result.content[0].text).toContain("GraphQL-only diff detected");
|
|
296
|
+
expect(result.content[0].text).toContain("schema.graphql");
|
|
297
|
+
expect(result.content[0].text).toContain("REST API testing");
|
|
298
|
+
});
|
|
299
|
+
it("does NOT early-return when .graphql files are mixed with REST route files", async () => {
|
|
300
|
+
computeBranchDiff.mockResolvedValue({
|
|
301
|
+
currentBranch: "feature",
|
|
302
|
+
baseBranch: "main",
|
|
303
|
+
changedFiles: ["schema.graphql", "src/routes/users.ts"],
|
|
304
|
+
deletedFiles: [],
|
|
305
|
+
diffContent: "",
|
|
306
|
+
});
|
|
307
|
+
// users.ts is not a GraphQL file — isGraphQLFile will read it and return false
|
|
308
|
+
jest.spyOn(fsModule.promises, "readFile").mockResolvedValue('router.get("/users", handler)');
|
|
309
|
+
const handler = captureAnalyzeHandler();
|
|
310
|
+
const result = await handler(baseParams);
|
|
311
|
+
// Should not early-return with GraphQL message
|
|
312
|
+
expect(result.content[0].text).not.toContain("GraphQL-only diff detected");
|
|
313
|
+
});
|
|
314
|
+
it("does NOT early-return when scope is full_repo (only fires for PR diffs)", async () => {
|
|
315
|
+
const { scanAllRepoEndpoints } = require("../../utils/repoScanner.js");
|
|
316
|
+
scanAllRepoEndpoints.mockReturnValue([]);
|
|
317
|
+
const handler = captureAnalyzeHandler();
|
|
318
|
+
const result = await handler({ ...baseParams, scope: "full_repo" });
|
|
319
|
+
expect(result.content[0].text).not.toContain("GraphQL-only diff detected");
|
|
320
|
+
});
|
|
321
|
+
});
|
|
322
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
323
|
+
// buildTraceFileEntry — traceFiles shape mapping
|
|
324
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
325
|
+
describe("buildTraceFileEntry", () => {
|
|
326
|
+
it("maps TraceParseResult to a TraceFile", () => {
|
|
327
|
+
const parsed = {
|
|
328
|
+
format: "har",
|
|
329
|
+
entries: [],
|
|
330
|
+
userFlows: [
|
|
331
|
+
{ flowId: "login-flow", entries: [], durationMs: 0 },
|
|
332
|
+
{ flowId: "checkout-flow", entries: [], durationMs: 0 },
|
|
333
|
+
],
|
|
334
|
+
};
|
|
335
|
+
const entry = buildTraceFileEntry("/repo/trace.json", parsed);
|
|
336
|
+
expect(entry).toEqual({
|
|
337
|
+
path: "/repo/trace.json",
|
|
338
|
+
format: "har",
|
|
339
|
+
analyzed: true,
|
|
340
|
+
userFlows: ["login-flow", "checkout-flow"],
|
|
341
|
+
});
|
|
342
|
+
});
|
|
343
|
+
it("returns an empty userFlows array when the trace has no flows", () => {
|
|
344
|
+
const parsed = {
|
|
345
|
+
format: "har",
|
|
346
|
+
entries: [],
|
|
347
|
+
userFlows: [],
|
|
348
|
+
};
|
|
349
|
+
const entry = buildTraceFileEntry("/empty.json", parsed);
|
|
350
|
+
expect(entry.userFlows).toEqual([]);
|
|
351
|
+
});
|
|
352
|
+
});
|
|
@@ -32,3 +32,10 @@
|
|
|
32
32
|
export function isContractConsumerModeEnabled() {
|
|
33
33
|
return process.env.SKYRAMP_FEATURE_CONTRACT_CONSUMER_MODE === "1";
|
|
34
34
|
}
|
|
35
|
+
/**
|
|
36
|
+
* Returns true when running inside a TestBot environment
|
|
37
|
+
* (SKYRAMP_FEATURE_TESTBOT=1).
|
|
38
|
+
*/
|
|
39
|
+
export function isTestbotEnabled() {
|
|
40
|
+
return process.env.SKYRAMP_FEATURE_TESTBOT === "1";
|
|
41
|
+
}
|