@skyramp/mcp 0.0.55 โ†’ 0.0.57

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,105 +1,131 @@
1
+ import { ResourceTemplate, } from "@modelcontextprotocol/sdk/server/mcp.js";
1
2
  import { z } from "zod";
2
3
  import { logger } from "../../utils/logger.js";
3
- function getTestbotRecommendationPrompt(prTitle, prDescription, summaryOutputFile) {
4
+ import { AnalyticsService } from "../../services/AnalyticsService.js";
5
+ function getTestbotPrompt(prTitle, prDescription, diffFile, testDirectory, summaryOutputFile, repositoryPath) {
4
6
  return `<TITLE>${prTitle}</TITLE>
5
7
  <DESCRIPTION>${prDescription}</DESCRIPTION>
8
+ <CODE CHANGES>${diffFile}</CODE CHANGES>
9
+ <TEST DIRECTORY>${testDirectory}</TEST DIRECTORY>
10
+ <REPOSITORY PATH>${repositoryPath}</REPOSITORY PATH>
6
11
 
7
- Use the MCP tools offered by Skyramp MCP server to analyze the pull request title and description to come up with a business justification
8
- case for this code change. Based on your analysis of the business case, perform
12
+ For all the following work, use the tools offered by Skyramp MCP server.
9
13
 
10
- 1. Get recommendations for new contract, e2e, smoke, load tests that can be generated by Skyramp MCP tools
11
- 2. Use Skyramp MCP to execute the tests and validate the results.
12
- 3. Generate a CONCISE summary to '${summaryOutputFile}' following this format:
14
+ First analyze the pull request title, description, and code changes to determine a business case
15
+ justification for this code change.
13
16
 
14
- ## ๐Ÿค– Skyramp Test Bot Recommendations
17
+ Then perform ALL of the following tasks. Every task is MANDATORY โ€” do NOT skip any task based on your own judgment unless the task itself gives you an explicit condition to skip.
15
18
 
16
- ### ๐Ÿ’ก Suggested Test Coverage
17
- - Brief list of new test types added and for which endpoint.
19
+ ## Task 1: Recommend New Tests (MANDATORY)
18
20
 
19
- ### ๐Ÿงช Test Results
20
- - List each test executed with pass/fail status and brief details
21
+ Read the diff at \`${diffFile}\`. Classify each changed file. A file is application source code if it is any of: a route/controller/handler, a model/schema/validator/serializer/DTO, business logic, middleware, service, utility, test helper, or has a source extension (.py, .ts, .js, .java, .go, .rb, .cs, .kt, .swift, etc.). When in doubt, treat the file as application source code.
21
22
 
22
- ### โš ๏ธ Issues Found
23
- - List any test failures or bugs discovered (if none, write 'None')
23
+ **DEFAULT: You MUST run steps 1โ€“5 below.** The only exception is if you can confirm that EVERY changed file is exclusively a CI workflow YAML, markdown documentation, README, CHANGELOG, or a dependency lock file โ€” and nothing else.
24
24
 
25
- Keep the summary brief and scannable. Avoid repeating code diff details that are already visible in the PR.
25
+ 1. Call \`skyramp_analyze_repository\` with:
26
+ - \`repositoryPath\`: "${repositoryPath}"
27
+ - \`analysisScope\`: "current_branch_diff"
28
+ 2. MANDATORY: Call \`skyramp_map_tests\` with \`stateFile\` (the state file path returned above) and \`analysisScope: "current_branch_diff"\`.
29
+ 3. MANDATORY: Call \`skyramp_recommend_tests\` with the \`stateFile\` returned by \`skyramp_map_tests\`. Use the priority summary and the specific endpoints/files that changed to determine exactly what to test.
30
+ 4. Generate tests using the Skyramp MCP generate tools, in priority order (minimum 3 test types).
31
+ 5. Use Skyramp MCP to execute the generated tests and validate the results.
26
32
 
27
- Use the skyramp-mcp tools available to you for test analysis, generation, and execution.`;
28
- }
29
- function getTestbotMaintenancePrompt(diffFile, testDirectory, summaryOutputFile) {
30
- return `Based on the git diff in ${diffFile} file, perform the following test maintenance tasks:
33
+ ## Task 2: Existing Test Maintenance (MANDATORY)
34
+
35
+ You MUST always run steps 1โ€“4 below. Do NOT skip this task based on your own assessment of whether tests exist or are relevant โ€” use the tools to determine that.
31
36
 
32
- 1. Check if there are any relevant Skyramp tests in the '${testDirectory}' directory that correspond to the code changes.
33
- 2. If there are relevant tests, use Skyramp MCP tools to analyze and modify them to ensure they pass with the new changes.
34
- 3. If there are no tests that can validate the changes, use Skyramp MCP to generate appropriate Skyramp tests for the new or modified code.
35
- 4. Use Skyramp MCP to execute the tests and validate the results.
36
- 5. Generate a CONCISE summary to '${summaryOutputFile}' following this format:
37
+ 1. Call \`skyramp_discover_tests\` with \`repositoryPath\`: "${repositoryPath}" to find all existing Skyramp-generated tests.
38
+ 2. Call \`skyramp_analyze_test_drift\` with the \`stateFile\` returned by \`skyramp_discover_tests\`.
39
+ 3. Call \`skyramp_calculate_health_scores\` with the \`stateFile\` from the previous step.
40
+ 4. Call \`skyramp_actions\` with the updated \`stateFile\` to apply recommended updates.
41
+ 5. Execute any updated or affected tests using Skyramp MCP and validate the results.
42
+ 6. You may skip this task ONLY if \`skyramp_discover_tests\` explicitly returns zero Skyramp-generated tests.
37
43
 
38
- ## ๐Ÿค– Skyramp Test Bot Summary
44
+ ## Task 3: Submit Report (MANDATORY)
39
45
 
40
- **Tests Modified:** [number] | **Tests Created:** [number] | **Tests Executed:** [number] | **Passed:** [number] | **Failed:** [number]
46
+ After completing Tasks 1 and 2, you MUST call the Skyramp MCP tool "skyramp_submit_report" to submit your report.
47
+ Pass '${summaryOutputFile}' as the summaryOutputFile parameter.
41
48
 
42
- ### โœ… Maintenance Summary
43
- - Brief one-line summary of test changes (details are in the commit)
49
+ For the commitMessage parameter, write a succinct summary (under 72 chars) of what you did, without any prefix. Examples:
50
+ - "add contract tests for /products endpoint"
51
+ - "update smoke tests for order API changes"
52
+ - "add smoke and e2e tests for new /reviews endpoint"
44
53
 
45
- ### ๐Ÿงช Test Results
46
- - List each test executed with pass/fail status and brief details
54
+ Do NOT write the report to a file yourself. Do NOT skip this step. The skyramp_submit_report tool is the ONLY way to submit the report.
47
55
 
48
- ### โš ๏ธ Issues Found
49
- - List any test failures or bugs discovered (if none, write 'None')
56
+ ## Report Guidelines
50
57
 
51
- Keep the summary brief and scannable. Avoid repeating code diff details that are already visible in the PR.
58
+ When reporting test results, if you chose to skip executing a test, you MUST explain WHY you skipped it.
59
+ NEVER use the phrase "CI timeout" or imply a timeout occurred unless a tool call actually timed out.
60
+ Instead, set the status to "Skipped" and provide an honest reason in the details, for example:
61
+ - "Skipped: no code changes affect this endpoint"
62
+ - "Skipped: skyramp_discover_tests found no existing Skyramp tests"
63
+ - "Skipped: only CI/config changes in this PR, no API changes"
52
64
 
53
- Use the skyramp-mcp tools available to you for test analysis, generation, and execution.`;
65
+ Reminder: Use the Skyramp MCP tools available to you for test analysis, generation, and execution.`;
54
66
  }
55
67
  export function registerTestbotPrompt(server) {
56
68
  logger.info("Registering testbot prompt");
57
69
  server.registerPrompt("skyramp_testbot", {
58
70
  description: "Run Skyramp TestBot to generate test recommendations and perform test maintenance for a pull request.",
59
71
  argsSchema: {
60
- prTitle: z
61
- .string()
62
- .describe("Pull request title"),
72
+ prTitle: z.string().describe("Pull request title"),
63
73
  prDescription: z
64
74
  .string()
65
75
  .describe("Pull request description/body"),
66
- diffFile: z
67
- .string()
68
- .describe("Path to the git diff file"),
76
+ diffFile: z.string().describe("Path to the git diff file"),
69
77
  testDirectory: z
70
78
  .string()
71
79
  .default("tests")
72
80
  .describe("Directory containing Skyramp tests"),
73
- recommendationSummaryOutputFile: z
81
+ summaryOutputFile: z
74
82
  .string()
75
- .describe("File path where the agent should write the recommendation summary"),
76
- maintenanceSummaryOutputFile: z
83
+ .describe("File path where the agent should write the testbot summary report"),
84
+ repositoryPath: z
77
85
  .string()
78
- .describe("File path where the agent should write the maintenance summary"),
86
+ .default(".")
87
+ .describe("Absolute path to the repository being analyzed"),
79
88
  },
80
89
  }, (args) => {
81
- const recommendationPrompt = getTestbotRecommendationPrompt(args.prTitle, args.prDescription, args.recommendationSummaryOutputFile);
82
- const maintenancePrompt = getTestbotMaintenancePrompt(args.diffFile, args.testDirectory, args.maintenanceSummaryOutputFile);
90
+ const prompt = getTestbotPrompt(args.prTitle, args.prDescription, args.diffFile, args.testDirectory, args.summaryOutputFile, args.repositoryPath);
91
+ AnalyticsService.pushMCPToolEvent("skyramp_testbot_prompt", undefined, {}).catch(() => { });
83
92
  return {
84
93
  messages: [
85
94
  {
86
95
  role: "user",
87
96
  content: {
88
97
  type: "text",
89
- text: `# Skyramp TestBot Workflow
90
-
91
- Follow these two phases in order.
92
-
93
- ## Phase 1: Test Recommendations
94
-
95
- ${recommendationPrompt}
96
-
97
- ## Phase 2: Test Maintenance
98
-
99
- ${maintenancePrompt}`,
98
+ text: prompt,
100
99
  },
101
100
  },
102
101
  ],
103
102
  };
104
103
  });
105
104
  }
105
+ export function registerTestbotResource(server) {
106
+ logger.info("Registering testbot resource");
107
+ // RFC 6570 {+rest} (reserved expansion) captures the entire query string
108
+ // including the leading "?". This avoids the SDK's per-param regex which
109
+ // fails on empty query-param values (e.g. prDescription=).
110
+ // We then parse query params from the URL object which handles URL-decoding
111
+ // and empty values correctly.
112
+ const template = new ResourceTemplate("skyramp://prompts/testbot{+rest}", { list: undefined });
113
+ server.registerResource("skyramp_testbot", template, {
114
+ title: "Skyramp TestBot Prompt",
115
+ description: "Returns task instructions for PR test analysis, generation, and maintenance.",
116
+ mimeType: "text/plain",
117
+ }, (uri) => {
118
+ const param = (name, fallback) => uri.searchParams.get(name) ?? fallback;
119
+ const prompt = getTestbotPrompt(param("prTitle", ""), param("prDescription", ""), param("diffFile", ".skyramp_git_diff"), param("testDirectory", "tests"), param("summaryOutputFile", ""), param("repositoryPath", "."));
120
+ AnalyticsService.pushMCPToolEvent("skyramp_testbot_prompt", undefined, {}).catch(() => { });
121
+ return {
122
+ contents: [
123
+ {
124
+ uri: uri.toString(),
125
+ mimeType: "text/plain",
126
+ text: prompt,
127
+ },
128
+ ],
129
+ };
130
+ });
131
+ }
@@ -2,8 +2,9 @@ import { pushToolEvent } from "@skyramp/skyramp";
2
2
  import * as fs from "fs";
3
3
  import * as path from "path";
4
4
  import { fileURLToPath } from "url";
5
+ import { getEntryPoint, getCIPlatform } from "../utils/telemetry.js";
6
+ import { logger } from "../utils/logger.js";
5
7
  export class AnalyticsService {
6
- static entryPoint = "mcp";
7
8
  static async pushTestGenerationToolEvent(toolName, result, params) {
8
9
  const analyticsResult = {};
9
10
  analyticsResult["prompt"] = params.prompt;
@@ -11,19 +12,29 @@ export class AnalyticsService {
11
12
  this.pushMCPToolEvent(toolName, result, analyticsResult);
12
13
  }
13
14
  static async pushMCPToolEvent(toolName, result, params) {
14
- let errorMessage = "";
15
- if (result && result.isError) {
16
- for (const content of result?.content ?? []) {
17
- if ("text" in content && content.text) {
18
- errorMessage += content.text + ", ";
15
+ try {
16
+ let errorMessage = "";
17
+ if (result && result.isError) {
18
+ for (const content of result?.content ?? []) {
19
+ if ("text" in content && content.text) {
20
+ errorMessage += content.text + ", ";
21
+ }
22
+ }
23
+ if (errorMessage.length > 0) {
24
+ errorMessage = errorMessage.slice(0, -2);
19
25
  }
20
26
  }
21
- if (errorMessage.length > 0) {
22
- errorMessage = errorMessage.slice(0, -2);
27
+ params.mcpServerVersion = getMCPPackageVersion();
28
+ const ciPlatform = getCIPlatform();
29
+ if (ciPlatform) {
30
+ params.ciPlatform = ciPlatform;
23
31
  }
32
+ await pushToolEvent(getEntryPoint(), toolName, errorMessage, params);
33
+ }
34
+ catch (error) {
35
+ logger.error("Error pushing MCP tool event", { error: error });
36
+ // silently ignore
24
37
  }
25
- params.mcpServerVersion = getMCPPackageVersion();
26
- await pushToolEvent(this.entryPoint, toolName, errorMessage, params);
27
38
  }
28
39
  /**
29
40
  * Track server crash events
@@ -34,7 +45,11 @@ export class AnalyticsService {
34
45
  errorStack: errorStack || "no stack trace",
35
46
  mcpServerVersion: getMCPPackageVersion(),
36
47
  };
37
- await pushToolEvent(this.entryPoint, "mcp_server_crash", errorMessage, params);
48
+ const ciPlatform = getCIPlatform();
49
+ if (ciPlatform) {
50
+ params.ciPlatform = ciPlatform;
51
+ }
52
+ await pushToolEvent(getEntryPoint(), "mcp_server_crash", errorMessage, params);
38
53
  }
39
54
  /**
40
55
  * Track tool timeout events
@@ -46,7 +61,11 @@ export class AnalyticsService {
46
61
  timeoutMs: timeoutMs.toString(),
47
62
  mcpServerVersion: getMCPPackageVersion(),
48
63
  };
49
- await pushToolEvent(this.entryPoint, `${toolName}_timeout`, errorMessage, timeoutParams);
64
+ const ciPlatform = getCIPlatform();
65
+ if (ciPlatform) {
66
+ timeoutParams.ciPlatform = ciPlatform;
67
+ }
68
+ await pushToolEvent(getEntryPoint(), `${toolName}_timeout`, errorMessage, timeoutParams);
50
69
  }
51
70
  }
52
71
  /**
@@ -6,7 +6,7 @@ import { stripVTControlCharacters } from "util";
6
6
  import { logger } from "../utils/logger.js";
7
7
  const DEFAULT_TIMEOUT = 300000; // 5 minutes
8
8
  const MAX_CONCURRENT_EXECUTIONS = 5;
9
- const EXECUTOR_DOCKER_IMAGE = "skyramp/executor:v1.3.8";
9
+ export const EXECUTOR_DOCKER_IMAGE = "skyramp/executor:v1.3.10";
10
10
  const DOCKER_PLATFORM = "linux/amd64";
11
11
  const EXECUTION_PROGRESS_INTERVAL = 10000; // 10 seconds between progress updates during execution
12
12
  // Files and directories to exclude when mounting workspace to Docker container
@@ -1,6 +1,7 @@
1
1
  import { SkyrampClient } from "@skyramp/skyramp";
2
2
  import { analyzeOpenAPIWithGivenEndpoint } from "../utils/analyze-openapi.js";
3
- import { getPathParameterValidationError, OUTPUT_DIR_FIELD_NAME, PATH_PARAMS_FIELD_NAME, QUERY_PARAMS_FIELD_NAME, FORM_PARAMS_FIELD_NAME, validateParams, validatePath, validateRequestData, TELEMETRY_entrypoint_FIELD_NAME, } from "../utils/utils.js";
3
+ import { getPathParameterValidationError, OUTPUT_DIR_FIELD_NAME, PATH_PARAMS_FIELD_NAME, QUERY_PARAMS_FIELD_NAME, FORM_PARAMS_FIELD_NAME, validateParams, validatePath, validateRequestData, } from "../utils/utils.js";
4
+ import { getEntryPoint } from "../utils/telemetry.js";
4
5
  import { getLanguageSteps } from "../utils/language-helper.js";
5
6
  import { logger } from "../utils/logger.js";
6
7
  export class TestGenerationService {
@@ -148,6 +149,10 @@ The generated test file remains unchanged and ready to use as-is.
148
149
  }
149
150
  async executeGeneration(generateOptions) {
150
151
  try {
152
+ //if auth header is authorization then exclude auth header from request
153
+ if (generateOptions.authHeader === "Authorization") {
154
+ generateOptions.authHeader = "";
155
+ }
151
156
  const result = await this.client.generateRestTest(generateOptions);
152
157
  // Check if the result indicates failure
153
158
  if (result && result.length > 0) {
@@ -201,7 +206,7 @@ ${result}`;
201
206
  generateInclude: params.include,
202
207
  generateExclude: params.exclude,
203
208
  generateInsecure: params.insecure,
204
- entrypoint: TELEMETRY_entrypoint_FIELD_NAME,
209
+ entrypoint: getEntryPoint(),
205
210
  chainingKey: params.chainingKey,
206
211
  };
207
212
  }
@@ -0,0 +1,106 @@
1
+ import { z } from "zod";
2
+ import { logger } from "../utils/logger.js";
3
+ import * as fs from "fs/promises";
4
+ import * as path from "path";
5
+ import { AnalyticsService } from "../services/AnalyticsService.js";
6
+ const TOOL_NAME = "skyramp_submit_report";
7
+ const DEFAULT_COMMIT_MESSAGE = "Added recommendations by Skyramp Testbot.";
8
+ const testResultSchema = z.object({
9
+ testType: z.string().describe("Type of test: Smoke, Contract, Integration, Fuzz, E2E, Load, etc."),
10
+ endpoint: z.string().describe("HTTP verb and path, e.g. 'GET /api/v1/products'"),
11
+ status: z.enum(["Pass", "Fail", "Skipped"]).describe("Test execution result"),
12
+ details: z.string().describe("Execution time and test file name, e.g. '10.8s, products_smoke_test.py'"),
13
+ });
14
+ const newTestSchema = z.object({
15
+ testType: z.string().describe("Type of test created: Smoke, Contract, Integration, etc."),
16
+ endpoint: z.string().describe("HTTP verb and path, e.g. 'GET /api/v1/products'"),
17
+ fileName: z.string().describe("Name of the generated test file"),
18
+ });
19
+ const descriptionSchema = z.object({
20
+ description: z.string().describe("One-line description"),
21
+ });
22
+ export function registerSubmitReportTool(server) {
23
+ server.registerTool(TOOL_NAME, {
24
+ description: "Submit the final testbot report. Call this tool once after completing all test analysis, generation, and execution. " +
25
+ "This is the ONLY way to submit the report โ€” do NOT write the report to a file manually.",
26
+ inputSchema: {
27
+ summaryOutputFile: z
28
+ .string()
29
+ .describe("The file path where the report should be written (provided in the task instructions)"),
30
+ businessCaseAnalysis: z
31
+ .string()
32
+ .describe("2-3 sentence business justification for this PR"),
33
+ newTestsCreated: z
34
+ .array(newTestSchema)
35
+ .describe("List of new tests created. Use empty array [] if none."),
36
+ testMaintenance: z
37
+ .array(descriptionSchema)
38
+ .describe("List of existing test modifications. Use empty array [] if none."),
39
+ testResults: z
40
+ .array(testResultSchema)
41
+ .describe("List of ALL test execution results. One entry per test executed."),
42
+ issuesFound: z
43
+ .array(descriptionSchema)
44
+ .describe("List of issues, failures, or bugs found. Use empty array [] if none."),
45
+ commitMessage: z
46
+ .string()
47
+ .optional()
48
+ .default(DEFAULT_COMMIT_MESSAGE)
49
+ .describe("Succinct commit message (under 72 chars) summarizing what the testbot did, " +
50
+ "e.g. 'add contract tests for /products endpoint' or 'update smoke tests for order API changes'"),
51
+ },
52
+ _meta: {
53
+ keywords: ["report", "summary", "testbot", "submit"],
54
+ },
55
+ }, async (params) => {
56
+ const startTime = Date.now();
57
+ let errorResult;
58
+ const reportJson = JSON.stringify({
59
+ businessCaseAnalysis: params.businessCaseAnalysis,
60
+ newTestsCreated: params.newTestsCreated,
61
+ testMaintenance: params.testMaintenance,
62
+ testResults: params.testResults,
63
+ issuesFound: params.issuesFound,
64
+ commitMessage: (params.commitMessage ?? "").replace(/[\r\n]+/g, " ").trim().slice(0, 72) || DEFAULT_COMMIT_MESSAGE,
65
+ }, null, 2);
66
+ logger.info("Submitting testbot report", {
67
+ outputFile: params.summaryOutputFile,
68
+ payloadBytes: reportJson.length,
69
+ testResultCount: params.testResults.length,
70
+ });
71
+ try {
72
+ await fs.mkdir(path.dirname(params.summaryOutputFile), { recursive: true });
73
+ await fs.writeFile(params.summaryOutputFile, reportJson, "utf-8");
74
+ const elapsed = Date.now() - startTime;
75
+ logger.info("Testbot report written successfully", {
76
+ outputFile: params.summaryOutputFile,
77
+ elapsedMs: elapsed,
78
+ });
79
+ return {
80
+ content: [
81
+ {
82
+ type: "text",
83
+ text: `Report submitted successfully to ${params.summaryOutputFile}`,
84
+ },
85
+ ],
86
+ };
87
+ }
88
+ catch (error) {
89
+ const elapsed = Date.now() - startTime;
90
+ const errorMessage = `Failed to write report: ${error.message}`;
91
+ logger.error(errorMessage, { error, elapsedMs: elapsed });
92
+ errorResult = {
93
+ content: [{ type: "text", text: errorMessage }],
94
+ isError: true,
95
+ };
96
+ return errorResult;
97
+ }
98
+ finally {
99
+ AnalyticsService.pushMCPToolEvent(TOOL_NAME, errorResult, {
100
+ summary_output_file: params.summaryOutputFile,
101
+ testResultCount: String(params.testResults.length),
102
+ payloadBytes: String(reportJson.length),
103
+ }).catch(() => { });
104
+ }
105
+ });
106
+ }
@@ -0,0 +1,176 @@
1
+ // @ts-ignore
2
+ import { registerSubmitReportTool } from "./submitReportTool.js";
3
+ import * as fs from "fs/promises";
4
+ import * as path from "path";
5
+ import * as os from "os";
6
+ jest.mock("../utils/logger.js", () => ({
7
+ logger: { info: jest.fn(), error: jest.fn() },
8
+ }));
9
+ jest.mock("../services/AnalyticsService.js", () => ({
10
+ AnalyticsService: { pushMCPToolEvent: jest.fn().mockResolvedValue(undefined) },
11
+ }));
12
+ function captureToolHandler() {
13
+ let handler;
14
+ const fakeServer = {
15
+ registerTool: (_name, _opts, fn) => {
16
+ handler = fn;
17
+ },
18
+ };
19
+ registerSubmitReportTool(fakeServer);
20
+ return handler;
21
+ }
22
+ function sampleReportParams(outputFile) {
23
+ return {
24
+ summaryOutputFile: outputFile,
25
+ businessCaseAnalysis: "This PR adds product search. Tests needed for filtering.",
26
+ newTestsCreated: [
27
+ { testType: "Smoke", endpoint: "GET /api/v1/products/search", fileName: "search_smoke_test.py" },
28
+ ],
29
+ testMaintenance: [{ description: "Updated auth flow in existing tests" }],
30
+ testResults: [
31
+ { testType: "Smoke", endpoint: "GET /api/v1/products", status: "Pass", details: "2.1s, products_smoke_test.py" },
32
+ { testType: "Smoke", endpoint: "GET /api/v1/products/search", status: "Fail", details: "3.4s, search_smoke_test.py" },
33
+ ],
34
+ issuesFound: [{ description: "Search endpoint returns 500 with category filter" }],
35
+ };
36
+ }
37
+ describe("registerSubmitReportTool", () => {
38
+ let handler;
39
+ let tmpDirs = [];
40
+ beforeAll(() => {
41
+ handler = captureToolHandler();
42
+ });
43
+ afterEach(async () => {
44
+ for (const dir of tmpDirs) {
45
+ await fs.rm(dir, { recursive: true, force: true });
46
+ }
47
+ tmpDirs = [];
48
+ });
49
+ it("should register the tool on the server", () => {
50
+ const registerToolMock = jest.fn();
51
+ const fakeServer = { registerTool: registerToolMock };
52
+ registerSubmitReportTool(fakeServer);
53
+ expect(registerToolMock).toHaveBeenCalledWith("skyramp_submit_report", expect.objectContaining({
54
+ description: expect.any(String),
55
+ inputSchema: expect.any(Object),
56
+ }), expect.any(Function));
57
+ });
58
+ it("should write JSON report to the specified file", async () => {
59
+ const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "submit-report-test-"));
60
+ tmpDirs.push(tmpDir);
61
+ const outputFile = path.join(tmpDir, "report.json");
62
+ const result = await handler(sampleReportParams(outputFile));
63
+ expect(result.isError).toBeUndefined();
64
+ expect(result.content[0].text).toContain("Report submitted successfully");
65
+ const written = JSON.parse(await fs.readFile(outputFile, "utf-8"));
66
+ expect(written.businessCaseAnalysis).toBe("This PR adds product search. Tests needed for filtering.");
67
+ expect(written.newTestsCreated).toHaveLength(1);
68
+ expect(written.testResults).toHaveLength(2);
69
+ expect(written.issuesFound).toHaveLength(1);
70
+ expect(written.testMaintenance).toHaveLength(1);
71
+ });
72
+ it("should create parent directories if they don't exist", async () => {
73
+ const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "submit-report-test-"));
74
+ tmpDirs.push(tmpDir);
75
+ const outputFile = path.join(tmpDir, "nested", "dirs", "report.json");
76
+ const result = await handler(sampleReportParams(outputFile));
77
+ expect(result.isError).toBeUndefined();
78
+ const written = JSON.parse(await fs.readFile(outputFile, "utf-8"));
79
+ expect(written.businessCaseAnalysis).toBeDefined();
80
+ });
81
+ it("should handle empty arrays for optional sections", async () => {
82
+ const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "submit-report-test-"));
83
+ tmpDirs.push(tmpDir);
84
+ const outputFile = path.join(tmpDir, "report.json");
85
+ const result = await handler({
86
+ summaryOutputFile: outputFile,
87
+ businessCaseAnalysis: "Simple config change.",
88
+ newTestsCreated: [],
89
+ testMaintenance: [],
90
+ testResults: [
91
+ { testType: "Smoke", endpoint: "GET /api/v1/products", status: "Pass", details: "1.0s, test.py" },
92
+ ],
93
+ issuesFound: [],
94
+ });
95
+ expect(result.isError).toBeUndefined();
96
+ const written = JSON.parse(await fs.readFile(outputFile, "utf-8"));
97
+ expect(written.newTestsCreated).toEqual([]);
98
+ expect(written.testMaintenance).toEqual([]);
99
+ expect(written.issuesFound).toEqual([]);
100
+ expect(written.testResults).toHaveLength(1);
101
+ });
102
+ it("should return error for invalid file path", async () => {
103
+ const result = await handler(sampleReportParams("/nonexistent/readonly/path/report.json"));
104
+ expect(result.isError).toBe(true);
105
+ expect(result.content[0].text).toContain("Failed to write report");
106
+ });
107
+ it("should write commitMessage when provided", async () => {
108
+ const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "submit-report-test-"));
109
+ tmpDirs.push(tmpDir);
110
+ const outputFile = path.join(tmpDir, "report.json");
111
+ const result = await handler({
112
+ ...sampleReportParams(outputFile),
113
+ commitMessage: "add contract tests for /products endpoint",
114
+ });
115
+ expect(result.isError).toBeUndefined();
116
+ const written = JSON.parse(await fs.readFile(outputFile, "utf-8"));
117
+ expect(written.commitMessage).toBe("add contract tests for /products endpoint");
118
+ });
119
+ it("should use default commitMessage when omitted", async () => {
120
+ const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "submit-report-test-"));
121
+ tmpDirs.push(tmpDir);
122
+ const outputFile = path.join(tmpDir, "report.json");
123
+ const result = await handler(sampleReportParams(outputFile));
124
+ expect(result.isError).toBeUndefined();
125
+ const written = JSON.parse(await fs.readFile(outputFile, "utf-8"));
126
+ expect(written.commitMessage).toBe("Added recommendations by Skyramp Testbot.");
127
+ });
128
+ it("should sanitize commitMessage (newlines, length)", async () => {
129
+ const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "submit-report-test-"));
130
+ tmpDirs.push(tmpDir);
131
+ const outputFile = path.join(tmpDir, "report.json");
132
+ const result = await handler({
133
+ ...sampleReportParams(outputFile),
134
+ commitMessage: " line one\nline two\r\nline three ",
135
+ });
136
+ expect(result.isError).toBeUndefined();
137
+ const written = JSON.parse(await fs.readFile(outputFile, "utf-8"));
138
+ expect(written.commitMessage).toBe("line one line two line three");
139
+ expect(written.commitMessage.length).toBeLessThanOrEqual(72);
140
+ });
141
+ it("should use default commitMessage when provided as empty string", async () => {
142
+ const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "submit-report-test-"));
143
+ tmpDirs.push(tmpDir);
144
+ const outputFile = path.join(tmpDir, "report.json");
145
+ const result = await handler({
146
+ ...sampleReportParams(outputFile),
147
+ commitMessage: "",
148
+ });
149
+ expect(result.isError).toBeUndefined();
150
+ const written = JSON.parse(await fs.readFile(outputFile, "utf-8"));
151
+ expect(written.commitMessage).toBe("Added recommendations by Skyramp Testbot.");
152
+ });
153
+ it("should use default commitMessage when whitespace-only", async () => {
154
+ const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "submit-report-test-"));
155
+ tmpDirs.push(tmpDir);
156
+ const outputFile = path.join(tmpDir, "report.json");
157
+ const result = await handler({
158
+ ...sampleReportParams(outputFile),
159
+ commitMessage: " \n\r\n ",
160
+ });
161
+ expect(result.isError).toBeUndefined();
162
+ const written = JSON.parse(await fs.readFile(outputFile, "utf-8"));
163
+ expect(written.commitMessage).toBe("Added recommendations by Skyramp Testbot.");
164
+ });
165
+ it("should produce valid JSON with pretty formatting", async () => {
166
+ const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "submit-report-test-"));
167
+ tmpDirs.push(tmpDir);
168
+ const outputFile = path.join(tmpDir, "report.json");
169
+ await handler(sampleReportParams(outputFile));
170
+ const raw = await fs.readFile(outputFile, "utf-8");
171
+ // Pretty-printed JSON should have indentation
172
+ expect(raw).toContain(" ");
173
+ // Should not contain summaryOutputFile in the output
174
+ expect(raw).not.toContain("summaryOutputFile");
175
+ });
176
+ });