@skyramp/mcp 0.0.55 โ†’ 0.0.56

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/build/index.js CHANGED
@@ -32,7 +32,9 @@ import { registerActionsTool } from "./tools/test-maintenance/actionsTool.js";
32
32
  import { registerStateCleanupTool } from "./tools/test-maintenance/stateCleanupTool.js";
33
33
  import { registerTestbotPrompt } from "./prompts/testbot/testbot-prompts.js";
34
34
  import { registerInitTestbotTool } from "./tools/initTestbotTool.js";
35
+ import { registerSubmitReportTool } from "./tools/submitReportTool.js";
35
36
  import { AnalyticsService } from "./services/AnalyticsService.js";
37
+ import { initCheck } from "./utils/initAgent.js";
36
38
  const server = new McpServer({
37
39
  name: "Skyramp MCP Server",
38
40
  version: "1.0.0",
@@ -46,6 +48,24 @@ const server = new McpServer({
46
48
  },
47
49
  },
48
50
  });
51
+ // Check for first-time invocation after version update (runs in background, doesn't block)
52
+ let initCheckCalled = false;
53
+ const originalRegisterTool = server.registerTool.bind(server);
54
+ server.registerTool = function (name, definition, handler) {
55
+ const wrappedHandler = async (...args) => {
56
+ if (!initCheckCalled) {
57
+ initCheck()
58
+ .then(() => {
59
+ initCheckCalled = true; // Only set to true if initCheck succeeds, allowing retry on failure
60
+ })
61
+ .catch((err) => {
62
+ logger.error("Background initialization check failed", { error: err });
63
+ });
64
+ }
65
+ return handler(...args);
66
+ };
67
+ return originalRegisterTool(name, definition, wrappedHandler);
68
+ };
49
69
  // Register prompts
50
70
  logger.info("Starting prompt registration process");
51
71
  const prompts = [
@@ -55,7 +75,7 @@ const prompts = [
55
75
  ];
56
76
  if (process.env.SKYRAMP_FEATURE_TESTBOT === "1") {
57
77
  prompts.push(registerTestbotPrompt);
58
- logger.info("TestBot prompt enabled via SKYRAMP_ENABLE_TESTBOT");
78
+ logger.info("TestBot prompt enabled via SKYRAMP_FEATURE_TESTBOT");
59
79
  }
60
80
  prompts.forEach((registerPrompt) => registerPrompt(server));
61
81
  logger.info("All prompts registered successfully");
@@ -99,7 +119,8 @@ const infrastructureTools = [
99
119
  ];
100
120
  if (process.env.SKYRAMP_FEATURE_TESTBOT === "1") {
101
121
  infrastructureTools.push(registerInitTestbotTool);
102
- logger.info("TestBot init tool enabled via SKYRAMP_FEATURE_TESTBOT");
122
+ infrastructureTools.push(registerSubmitReportTool);
123
+ logger.info("TestBot tools enabled via SKYRAMP_FEATURE_TESTBOT");
103
124
  }
104
125
  infrastructureTools.forEach((registerTool) => registerTool(server));
105
126
  // Global error handlers for crash telemetry
@@ -22,13 +22,21 @@ export function registerStartTraceCollectionPrompt(mcpServer) {
22
22
  **Playwright Configuration Options:**
23
23
  When playwright is enabled for trace collection, you can optionally configure:
24
24
 
25
- 1. **Playwright Storage Path** (playwrightStoragePath):
25
+ 1. **Browser** (browser):
26
+ - Choose which browser to use for trace collection
27
+ - Supported browsers:
28
+ * 'chromium' - Chrome/Chromium browser (default)
29
+ * 'firefox' - Mozilla Firefox browser
30
+ * 'webkit' - Safari/WebKit browser
31
+ - Use firefox or webkit when you need to test cross-browser compatibility or specific browser behaviors
32
+
33
+ 2. **Playwright Storage Path** (playwrightStoragePath):
26
34
  - Path to a playwright session storage file containing authentication data (cookies, localStorage, sessionStorage, etc.)
27
35
  - MUST be an absolute path like /path/to/storage.json
28
36
  - Use this when you have manually created a session from the login flow and want to reuse it for future trace collections to avoid manual login every time
29
37
  - The session file should be created beforehand using Playwright's storageState feature during the login flow
30
38
 
31
- 2. **Playwright Viewport Size** (playwrightViewportSize):
39
+ 3. **Playwright Viewport Size** (playwrightViewportSize):
32
40
  - Defines the browser window size for trace collection
33
41
  - Supported formats:
34
42
  * 'hd' - 1280x720
@@ -47,6 +55,11 @@ When playwright is enabled for trace collection, you can optionally configure:
47
55
  **Example usage prompt for trace collection with playwright storage and viewport:**
48
56
  * Start playwright trace collection with storage path /Users/dev/session-storage.json and viewport size full-hd
49
57
 
58
+ **Example usage prompt for trace collection with specific browser:**
59
+ * Start trace collection with Firefox browser
60
+ * Collect UI traces using webkit browser
61
+ * Start playwright trace collection with chromium browser (default)
62
+
50
63
  **CRITICAL: NEVER SHOW THE CLI COMMANDS.**
51
64
  `,
52
65
  },
@@ -1,102 +1,69 @@
1
1
  import { z } from "zod";
2
2
  import { logger } from "../../utils/logger.js";
3
- function getTestbotRecommendationPrompt(prTitle, prDescription, summaryOutputFile) {
3
+ function getTestbotPrompt(prTitle, prDescription, diffFile, testDirectory, summaryOutputFile) {
4
4
  return `<TITLE>${prTitle}</TITLE>
5
5
  <DESCRIPTION>${prDescription}</DESCRIPTION>
6
+ <CODE CHANGES>${diffFile}</CODE CHANGES>
7
+ <TEST DIRECTORY>${testDirectory}</TEST DIRECTORY>
6
8
 
7
- Use the MCP tools offered by Skyramp MCP server to analyze the pull request title and description to come up with a business justification
8
- case for this code change. Based on your analysis of the business case, perform
9
+ For all the following work, use the tools offered by Skyramp MCP server.
9
10
 
10
- 1. Get recommendations for new contract, e2e, smoke, load tests that can be generated by Skyramp MCP tools
11
- 2. Use Skyramp MCP to execute the tests and validate the results.
12
- 3. Generate a CONCISE summary to '${summaryOutputFile}' following this format:
11
+ First analyze the pull request title, description, and code changes to determine a business case
12
+ justification for this code change.
13
13
 
14
- ## ๐Ÿค– Skyramp Test Bot Recommendations
14
+ Then perform the following tasks based on your analysis:
15
15
 
16
- ### ๐Ÿ’ก Suggested Test Coverage
17
- - Brief list of new test types added and for which endpoint.
16
+ ## Task 1: New Test Recommendations
18
17
 
19
- ### ๐Ÿงช Test Results
20
- - List each test executed with pass/fail status and brief details
18
+ 1. If, and only if, there are new endpoints in the code changes, get recommendations
19
+ for new contract, e2e, smoke, load tests specific to the code changes
20
+ and business case analysis. DO NOT use the whole repository for generating recommendations.
21
+ 2. If, and only if, there are recommendations for new tests, then use Skyramp MCP generate
22
+ tools to generate contract, e2e, smoke and load tests.
23
+ 3. Use Skyramp MCP to execute the generated tests and validate the results.
21
24
 
22
- ### โš ๏ธ Issues Found
23
- - List any test failures or bugs discovered (if none, write 'None')
24
-
25
- Keep the summary brief and scannable. Avoid repeating code diff details that are already visible in the PR.
26
-
27
- Use the skyramp-mcp tools available to you for test analysis, generation, and execution.`;
28
- }
29
- function getTestbotMaintenancePrompt(diffFile, testDirectory, summaryOutputFile) {
30
- return `Based on the git diff in ${diffFile} file, perform the following test maintenance tasks:
25
+ ## Task 2: Existing Test Maintenance
31
26
 
32
27
  1. Check if there are any relevant Skyramp tests in the '${testDirectory}' directory that correspond to the code changes.
33
28
  2. If there are relevant tests, use Skyramp MCP tools to analyze and modify them to ensure they pass with the new changes.
34
- 3. If there are no tests that can validate the changes, use Skyramp MCP to generate appropriate Skyramp tests for the new or modified code.
35
- 4. Use Skyramp MCP to execute the tests and validate the results.
36
- 5. Generate a CONCISE summary to '${summaryOutputFile}' following this format:
37
-
38
- ## ๐Ÿค– Skyramp Test Bot Summary
29
+ 3. Use Skyramp MCP to execute the updated tests and validate the results.
39
30
 
40
- **Tests Modified:** [number] | **Tests Created:** [number] | **Tests Executed:** [number] | **Passed:** [number] | **Failed:** [number]
31
+ ## Task 3: Submit Report
41
32
 
42
- ### โœ… Maintenance Summary
43
- - Brief one-line summary of test changes (details are in the commit)
33
+ After completing Tasks 1 and 2, you MUST call the Skyramp MCP tool "skyramp_submit_report" to submit your report.
34
+ Pass '${summaryOutputFile}' as the summaryOutputFile parameter.
44
35
 
45
- ### ๐Ÿงช Test Results
46
- - List each test executed with pass/fail status and brief details
36
+ Do NOT write the report to a file yourself. Do NOT skip this step. The skyramp_submit_report tool is the ONLY way to submit the report.
47
37
 
48
- ### โš ๏ธ Issues Found
49
- - List any test failures or bugs discovered (if none, write 'None')
50
-
51
- Keep the summary brief and scannable. Avoid repeating code diff details that are already visible in the PR.
52
-
53
- Use the skyramp-mcp tools available to you for test analysis, generation, and execution.`;
38
+ Reminder: Use the Skyramp MCP tools available to you for test analysis, generation, and execution.`;
54
39
  }
55
40
  export function registerTestbotPrompt(server) {
56
41
  logger.info("Registering testbot prompt");
57
42
  server.registerPrompt("skyramp_testbot", {
58
43
  description: "Run Skyramp TestBot to generate test recommendations and perform test maintenance for a pull request.",
59
44
  argsSchema: {
60
- prTitle: z
61
- .string()
62
- .describe("Pull request title"),
45
+ prTitle: z.string().describe("Pull request title"),
63
46
  prDescription: z
64
47
  .string()
65
48
  .describe("Pull request description/body"),
66
- diffFile: z
67
- .string()
68
- .describe("Path to the git diff file"),
49
+ diffFile: z.string().describe("Path to the git diff file"),
69
50
  testDirectory: z
70
51
  .string()
71
52
  .default("tests")
72
53
  .describe("Directory containing Skyramp tests"),
73
- recommendationSummaryOutputFile: z
74
- .string()
75
- .describe("File path where the agent should write the recommendation summary"),
76
- maintenanceSummaryOutputFile: z
54
+ summaryOutputFile: z
77
55
  .string()
78
- .describe("File path where the agent should write the maintenance summary"),
56
+ .describe("File path where the agent should write the testbot summary report"),
79
57
  },
80
58
  }, (args) => {
81
- const recommendationPrompt = getTestbotRecommendationPrompt(args.prTitle, args.prDescription, args.recommendationSummaryOutputFile);
82
- const maintenancePrompt = getTestbotMaintenancePrompt(args.diffFile, args.testDirectory, args.maintenanceSummaryOutputFile);
59
+ const prompt = getTestbotPrompt(args.prTitle, args.prDescription, args.diffFile, args.testDirectory, args.summaryOutputFile);
83
60
  return {
84
61
  messages: [
85
62
  {
86
63
  role: "user",
87
64
  content: {
88
65
  type: "text",
89
- text: `# Skyramp TestBot Workflow
90
-
91
- Follow these two phases in order.
92
-
93
- ## Phase 1: Test Recommendations
94
-
95
- ${recommendationPrompt}
96
-
97
- ## Phase 2: Test Maintenance
98
-
99
- ${maintenancePrompt}`,
66
+ text: prompt,
100
67
  },
101
68
  },
102
69
  ],
@@ -6,7 +6,7 @@ import { stripVTControlCharacters } from "util";
6
6
  import { logger } from "../utils/logger.js";
7
7
  const DEFAULT_TIMEOUT = 300000; // 5 minutes
8
8
  const MAX_CONCURRENT_EXECUTIONS = 5;
9
- const EXECUTOR_DOCKER_IMAGE = "skyramp/executor:v1.3.8";
9
+ const EXECUTOR_DOCKER_IMAGE = "skyramp/executor:v1.3.9";
10
10
  const DOCKER_PLATFORM = "linux/amd64";
11
11
  const EXECUTION_PROGRESS_INTERVAL = 10000; // 10 seconds between progress updates during execution
12
12
  // Files and directories to exclude when mounting workspace to Docker container
@@ -0,0 +1,97 @@
1
+ import { z } from "zod";
2
+ import { logger } from "../utils/logger.js";
3
+ import * as fs from "fs/promises";
4
+ import * as path from "path";
5
+ import { AnalyticsService } from "../services/AnalyticsService.js";
6
+ const TOOL_NAME = "skyramp_submit_report";
7
+ const testResultSchema = z.object({
8
+ testType: z.string().describe("Type of test: Smoke, Contract, Integration, Fuzz, E2E, Load, etc."),
9
+ endpoint: z.string().describe("HTTP verb and path, e.g. 'GET /api/v1/products'"),
10
+ status: z.enum(["Pass", "Fail", "Skipped"]).describe("Test execution result"),
11
+ details: z.string().describe("Execution time and test file name, e.g. '10.8s, products_smoke_test.py'"),
12
+ });
13
+ const newTestSchema = z.object({
14
+ testType: z.string().describe("Type of test created: Smoke, Contract, Integration, etc."),
15
+ endpoint: z.string().describe("HTTP verb and path, e.g. 'GET /api/v1/products'"),
16
+ fileName: z.string().describe("Name of the generated test file"),
17
+ });
18
+ const descriptionSchema = z.object({
19
+ description: z.string().describe("One-line description"),
20
+ });
21
+ export function registerSubmitReportTool(server) {
22
+ server.registerTool(TOOL_NAME, {
23
+ description: "Submit the final testbot report. Call this tool once after completing all test analysis, generation, and execution. " +
24
+ "This is the ONLY way to submit the report โ€” do NOT write the report to a file manually.",
25
+ inputSchema: {
26
+ summaryOutputFile: z
27
+ .string()
28
+ .describe("The file path where the report should be written (provided in the task instructions)"),
29
+ businessCaseAnalysis: z
30
+ .string()
31
+ .describe("2-3 sentence business justification for this PR"),
32
+ newTestsCreated: z
33
+ .array(newTestSchema)
34
+ .describe("List of new tests created. Use empty array [] if none."),
35
+ testMaintenance: z
36
+ .array(descriptionSchema)
37
+ .describe("List of existing test modifications. Use empty array [] if none."),
38
+ testResults: z
39
+ .array(testResultSchema)
40
+ .describe("List of ALL test execution results. One entry per test executed."),
41
+ issuesFound: z
42
+ .array(descriptionSchema)
43
+ .describe("List of issues, failures, or bugs found. Use empty array [] if none."),
44
+ },
45
+ _meta: {
46
+ keywords: ["report", "summary", "testbot", "submit"],
47
+ },
48
+ }, async (params) => {
49
+ const startTime = Date.now();
50
+ let errorResult;
51
+ const reportJson = JSON.stringify({
52
+ businessCaseAnalysis: params.businessCaseAnalysis,
53
+ newTestsCreated: params.newTestsCreated,
54
+ testMaintenance: params.testMaintenance,
55
+ testResults: params.testResults,
56
+ issuesFound: params.issuesFound,
57
+ }, null, 2);
58
+ logger.info("Submitting testbot report", {
59
+ outputFile: params.summaryOutputFile,
60
+ payloadBytes: reportJson.length,
61
+ testResultCount: params.testResults.length,
62
+ });
63
+ try {
64
+ await fs.mkdir(path.dirname(params.summaryOutputFile), { recursive: true });
65
+ await fs.writeFile(params.summaryOutputFile, reportJson, "utf-8");
66
+ const elapsed = Date.now() - startTime;
67
+ logger.info("Testbot report written successfully", {
68
+ outputFile: params.summaryOutputFile,
69
+ elapsedMs: elapsed,
70
+ });
71
+ return {
72
+ content: [
73
+ {
74
+ type: "text",
75
+ text: `Report submitted successfully to ${params.summaryOutputFile}`,
76
+ },
77
+ ],
78
+ };
79
+ }
80
+ catch (error) {
81
+ const elapsed = Date.now() - startTime;
82
+ const errorMessage = `Failed to write report: ${error.message}`;
83
+ logger.error(errorMessage, { error, elapsedMs: elapsed });
84
+ errorResult = {
85
+ content: [{ type: "text", text: errorMessage }],
86
+ isError: true,
87
+ };
88
+ return errorResult;
89
+ }
90
+ finally {
91
+ const recordParams = {
92
+ summary_output_file: params.summaryOutputFile,
93
+ };
94
+ AnalyticsService.pushMCPToolEvent(TOOL_NAME, errorResult, recordParams);
95
+ }
96
+ });
97
+ }
@@ -0,0 +1,118 @@
1
+ // @ts-ignore
2
+ import { registerSubmitReportTool } from "./submitReportTool.js";
3
+ import * as fs from "fs/promises";
4
+ import * as path from "path";
5
+ import * as os from "os";
6
+ jest.mock("../utils/logger.js", () => ({
7
+ logger: { info: jest.fn(), error: jest.fn() },
8
+ }));
9
+ jest.mock("../services/AnalyticsService.js", () => ({
10
+ AnalyticsService: { pushMCPToolEvent: jest.fn() },
11
+ }));
12
+ function captureToolHandler() {
13
+ let handler;
14
+ const fakeServer = {
15
+ registerTool: (_name, _opts, fn) => {
16
+ handler = fn;
17
+ },
18
+ };
19
+ registerSubmitReportTool(fakeServer);
20
+ return handler;
21
+ }
22
+ function sampleReportParams(outputFile) {
23
+ return {
24
+ summaryOutputFile: outputFile,
25
+ businessCaseAnalysis: "This PR adds product search. Tests needed for filtering.",
26
+ newTestsCreated: [
27
+ { testType: "Smoke", endpoint: "GET /api/v1/products/search", fileName: "search_smoke_test.py" },
28
+ ],
29
+ testMaintenance: [{ description: "Updated auth flow in existing tests" }],
30
+ testResults: [
31
+ { testType: "Smoke", endpoint: "GET /api/v1/products", status: "Pass", details: "2.1s, products_smoke_test.py" },
32
+ { testType: "Smoke", endpoint: "GET /api/v1/products/search", status: "Fail", details: "3.4s, search_smoke_test.py" },
33
+ ],
34
+ issuesFound: [{ description: "Search endpoint returns 500 with category filter" }],
35
+ };
36
+ }
37
+ describe("registerSubmitReportTool", () => {
38
+ let handler;
39
+ let tmpDirs = [];
40
+ beforeAll(() => {
41
+ handler = captureToolHandler();
42
+ });
43
+ afterEach(async () => {
44
+ for (const dir of tmpDirs) {
45
+ await fs.rm(dir, { recursive: true, force: true });
46
+ }
47
+ tmpDirs = [];
48
+ });
49
+ it("should register the tool on the server", () => {
50
+ const registerToolMock = jest.fn();
51
+ const fakeServer = { registerTool: registerToolMock };
52
+ registerSubmitReportTool(fakeServer);
53
+ expect(registerToolMock).toHaveBeenCalledWith("skyramp_submit_report", expect.objectContaining({
54
+ description: expect.any(String),
55
+ inputSchema: expect.any(Object),
56
+ }), expect.any(Function));
57
+ });
58
+ it("should write JSON report to the specified file", async () => {
59
+ const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "submit-report-test-"));
60
+ tmpDirs.push(tmpDir);
61
+ const outputFile = path.join(tmpDir, "report.json");
62
+ const result = await handler(sampleReportParams(outputFile));
63
+ expect(result.isError).toBeUndefined();
64
+ expect(result.content[0].text).toContain("Report submitted successfully");
65
+ const written = JSON.parse(await fs.readFile(outputFile, "utf-8"));
66
+ expect(written.businessCaseAnalysis).toBe("This PR adds product search. Tests needed for filtering.");
67
+ expect(written.newTestsCreated).toHaveLength(1);
68
+ expect(written.testResults).toHaveLength(2);
69
+ expect(written.issuesFound).toHaveLength(1);
70
+ expect(written.testMaintenance).toHaveLength(1);
71
+ });
72
+ it("should create parent directories if they don't exist", async () => {
73
+ const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "submit-report-test-"));
74
+ tmpDirs.push(tmpDir);
75
+ const outputFile = path.join(tmpDir, "nested", "dirs", "report.json");
76
+ const result = await handler(sampleReportParams(outputFile));
77
+ expect(result.isError).toBeUndefined();
78
+ const written = JSON.parse(await fs.readFile(outputFile, "utf-8"));
79
+ expect(written.businessCaseAnalysis).toBeDefined();
80
+ });
81
+ it("should handle empty arrays for optional sections", async () => {
82
+ const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "submit-report-test-"));
83
+ tmpDirs.push(tmpDir);
84
+ const outputFile = path.join(tmpDir, "report.json");
85
+ const result = await handler({
86
+ summaryOutputFile: outputFile,
87
+ businessCaseAnalysis: "Simple config change.",
88
+ newTestsCreated: [],
89
+ testMaintenance: [],
90
+ testResults: [
91
+ { testType: "Smoke", endpoint: "GET /api/v1/products", status: "Pass", details: "1.0s, test.py" },
92
+ ],
93
+ issuesFound: [],
94
+ });
95
+ expect(result.isError).toBeUndefined();
96
+ const written = JSON.parse(await fs.readFile(outputFile, "utf-8"));
97
+ expect(written.newTestsCreated).toEqual([]);
98
+ expect(written.testMaintenance).toEqual([]);
99
+ expect(written.issuesFound).toEqual([]);
100
+ expect(written.testResults).toHaveLength(1);
101
+ });
102
+ it("should return error for invalid file path", async () => {
103
+ const result = await handler(sampleReportParams("/nonexistent/readonly/path/report.json"));
104
+ expect(result.isError).toBe(true);
105
+ expect(result.content[0].text).toContain("Failed to write report");
106
+ });
107
+ it("should produce valid JSON with pretty formatting", async () => {
108
+ const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "submit-report-test-"));
109
+ tmpDirs.push(tmpDir);
110
+ const outputFile = path.join(tmpDir, "report.json");
111
+ await handler(sampleReportParams(outputFile));
112
+ const raw = await fs.readFile(outputFile, "utf-8");
113
+ // Pretty-printed JSON should have indentation
114
+ expect(raw).toContain(" ");
115
+ // Should not contain summaryOutputFile in the output
116
+ expect(raw).not.toContain("summaryOutputFile");
117
+ });
118
+ });
@@ -30,6 +30,7 @@ For detailed documentation visit: https://www.skyramp.dev/docs/load-test/advance
30
30
  .boolean()
31
31
  .describe("Whether to enable Playwright for trace collection. Set to true for UI interactions, false for API-only tracing")
32
32
  .default(true),
33
+ browser: basePlaywrightSchema.shape.browser,
33
34
  playwrightStoragePath: basePlaywrightSchema.shape.playwrightStoragePath,
34
35
  playwrightSaveStoragePath: basePlaywrightSchema.shape.playwrightSaveStoragePath,
35
36
  playwrightViewportSize: basePlaywrightSchema.shape.playwrightViewportSize,
@@ -85,6 +86,7 @@ For detailed documentation visit: https://www.skyramp.dev/docs/load-test/advance
85
86
  };
86
87
  logger.info("Starting trace collection", {
87
88
  playwright: params.playwright,
89
+ browser: params.browser,
88
90
  runtime: params.runtime,
89
91
  include: params.include,
90
92
  exclude: params.exclude,
@@ -116,6 +118,7 @@ For detailed documentation visit: https://www.skyramp.dev/docs/load-test/advance
116
118
  generateNoProxy: params.noProxy,
117
119
  unblock: true,
118
120
  playwright: params.playwright,
121
+ browser: params.browser,
119
122
  playwrightStoragePath: params.playwrightStoragePath,
120
123
  playwrightViewportSize: params.playwrightViewportSize,
121
124
  entrypoint: TELEMETRY_entrypoint_FIELD_NAME,
@@ -96,6 +96,10 @@ export const basePlaywrightSchema = z.object({
96
96
  .string()
97
97
  .default("")
98
98
  .describe("Viewport size for playwright browser. THE VALUE MUST BE IN THE FORMAT ['', 'hd', 'full-hd', '2k', 'x,y' e.g. '1920,1080' for 1920x1080 resolution]. DEFAULT VALUE IS ''. If set to '', the browser will use its default viewport size."),
99
+ browser: z
100
+ .enum(["chromium", "firefox", "webkit"])
101
+ .default("chromium")
102
+ .describe("Browser to use for Playwright trace collection. Supported browsers: 'chromium' (default), 'firefox', 'webkit'. Choose based on your testing requirements and target browser compatibility."),
99
103
  });
100
104
  export const baseTraceSchema = z.object({
101
105
  trace: z
@@ -0,0 +1,34 @@
1
+ import * as fs from "fs";
2
+ import * as path from "path";
3
+ import { fileURLToPath } from "url";
4
+ import { SkyrampClient } from "@skyramp/skyramp";
5
+ import { logger } from "./logger.js";
6
+ /**
7
+ * Get the current MCP package version from package.json
8
+ */
9
+ export function getPackageVersion() {
10
+ const __filename = fileURLToPath(import.meta.url);
11
+ const __dirname = path.dirname(__filename);
12
+ const packageJson = fs.readFileSync(path.join(__dirname, "../../package.json"), "utf8");
13
+ const packageJsonData = JSON.parse(packageJson);
14
+ return packageJsonData.version;
15
+ }
16
+ /**
17
+ * Checks if this is the first time after extension install and initializes the agent if needed.
18
+ */
19
+ export async function initCheck() {
20
+ const currentVersion = getPackageVersion();
21
+ try {
22
+ const client = new SkyrampClient();
23
+ // @ts-ignore - Backend will be updated to add initAgent method that accepts version and entryPoint parameters
24
+ const initOutput = await client.initAgent({
25
+ version: currentVersion,
26
+ entryPoint: "mcp",
27
+ });
28
+ logger.info("Skyramp MCP agent initialized", { initOutput });
29
+ }
30
+ catch (error) {
31
+ logger.error("Error during first-time initialization", { error });
32
+ throw error;
33
+ }
34
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@skyramp/mcp",
3
- "version": "0.0.55",
3
+ "version": "0.0.56",
4
4
  "main": "build/index.js",
5
5
  "type": "module",
6
6
  "bin": {
@@ -46,7 +46,7 @@
46
46
  "dependencies": {
47
47
  "@modelcontextprotocol/sdk": "^1.24.3",
48
48
  "@playwright/test": "^1.55.0",
49
- "@skyramp/skyramp": "1.3.8",
49
+ "@skyramp/skyramp": "1.3.9",
50
50
  "dockerode": "^4.0.6",
51
51
  "fast-glob": "^3.3.3",
52
52
  "simple-git": "^3.30.0",