@gaffer-sh/mcp 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,163 @@
1
+ # @gaffer-sh/mcp
2
+
3
+ MCP (Model Context Protocol) server for [Gaffer](https://gaffer.sh) - give your AI assistant memory of your tests.
4
+
5
+ ## What is this?
6
+
7
+ This MCP server connects AI coding assistants like Claude Code and Cursor to your Gaffer test history. It allows AI to:
8
+
9
+ - Check your project's test health (pass rate, flaky tests, trends)
10
+ - Look up the history of specific tests to understand stability
11
+ - Get context about test failures when debugging
12
+
13
+ ## Prerequisites
14
+
15
+ 1. A [Gaffer](https://gaffer.sh) account with test results uploaded
16
+ 2. An API key from your project settings
17
+
18
+ ## Setup
19
+
20
+ ### Claude Code
21
+
22
+ Add to your Claude Code settings (`~/.claude.json` or project `.claude/settings.json`):
23
+
24
+ ```json
25
+ {
26
+ "mcpServers": {
27
+ "gaffer": {
28
+ "command": "npx",
29
+ "args": ["-y", "@gaffer-sh/mcp"],
30
+ "env": {
31
+ "GAFFER_API_KEY": "gfr_your_api_key_here"
32
+ }
33
+ }
34
+ }
35
+ }
36
+ ```
37
+
38
+ ### Cursor
39
+
40
+ Add to `.cursor/mcp.json` in your project:
41
+
42
+ ```json
43
+ {
44
+ "mcpServers": {
45
+ "gaffer": {
46
+ "command": "npx",
47
+ "args": ["-y", "@gaffer-sh/mcp"],
48
+ "env": {
49
+ "GAFFER_API_KEY": "gfr_your_api_key_here"
50
+ }
51
+ }
52
+ }
53
+ }
54
+ ```
55
+
56
+ ## Available Tools
57
+
58
+ ### `get_project_health`
59
+
60
+ Get the health metrics for your project.
61
+
62
+ **Input:**
63
+ - `days` (optional): Number of days to analyze (default: 30)
64
+
65
+ **Returns:**
66
+ - Health score (0-100)
67
+ - Pass rate percentage
68
+ - Number of test runs
69
+ - Flaky test count
70
+ - Trend (up/down/stable)
71
+
72
+ **Example prompt:** "What's the health of my test suite?"
73
+
74
+ ### `get_test_history`
75
+
76
+ Get the pass/fail history for a specific test.
77
+
78
+ **Input (one required):**
79
+ - `testName`: Exact test name to search for
80
+ - `filePath`: File path containing the test
81
+ - `limit` (optional): Max results (default: 20)
82
+
83
+ **Returns:**
84
+ - History of test runs with pass/fail status
85
+ - Duration of each run
86
+ - Branch and commit info
87
+ - Error messages for failures
88
+ - Summary statistics
89
+
90
+ **Example prompts:**
91
+ - "Is the login test flaky? Check its history"
92
+ - "Show me the history for tests in auth.test.ts"
93
+
94
+ ### `get_flaky_tests`
95
+
96
+ Get the list of flaky tests in your project.
97
+
98
+ **Input:**
99
+ - `threshold` (optional): Minimum flip rate to be considered flaky (0-1, default: 0.1)
100
+ - `limit` (optional): Max results (default: 50)
101
+ - `days` (optional): Analysis period in days (default: 30)
102
+
103
+ **Returns:**
104
+ - List of flaky tests with flip rates
105
+ - Number of status transitions
106
+ - Total runs analyzed
107
+ - When each test last ran
108
+
109
+ **Example prompts:**
110
+ - "Which tests are flaky in my project?"
111
+ - "Show me the most unstable tests"
112
+
113
+ ### `list_test_runs`
114
+
115
+ List recent test runs with optional filtering.
116
+
117
+ **Input (all optional):**
118
+ - `commitSha`: Filter by commit SHA (supports prefix matching)
119
+ - `branch`: Filter by branch name
120
+ - `status`: Filter by "passed" (no failures) or "failed" (has failures)
121
+ - `limit`: Max results (default: 20)
122
+
123
+ **Returns:**
124
+ - List of test runs with pass/fail/skip counts
125
+ - Commit SHA and branch info
126
+ - Pagination info
127
+
128
+ **Example prompts:**
129
+ - "What tests failed in the last commit?"
130
+ - "Show me test runs on the main branch"
131
+ - "Did any tests fail on my feature branch?"
132
+
133
+ ## Environment Variables
134
+
135
+ | Variable | Required | Description |
136
+ |----------|----------|-------------|
137
+ | `GAFFER_API_KEY` | Yes | Your Gaffer API key (starts with `gfr_`) |
138
+ | `GAFFER_API_URL` | No | API base URL (default: `https://app.gaffer.sh`) |
139
+
140
+ ## Local Development
141
+
142
+ ```bash
143
+ # From monorepo root
144
+ pnpm install
145
+ pnpm --filter @gaffer-sh/mcp build
146
+
147
+ # Test locally with Claude Code (use absolute path)
148
+ {
149
+ "mcpServers": {
150
+ "gaffer": {
151
+ "command": "node",
152
+ "args": ["/path/to/gaffer-v2/packages/mcp-server/dist/index.js"],
153
+ "env": {
154
+ "GAFFER_API_KEY": "gfr_..."
155
+ }
156
+ }
157
+ }
158
+ }
159
+ ```
160
+
161
+ ## License
162
+
163
+ MIT
@@ -0,0 +1,2 @@
1
+
2
+ export { }
package/dist/index.js ADDED
@@ -0,0 +1,482 @@
1
+ #!/usr/bin/env node
2
+
3
+ // src/index.ts
4
+ import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
5
+ import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
6
+
7
+ // src/api-client.ts
8
+ var REQUEST_TIMEOUT_MS = 3e4;
9
+ var GafferApiClient = class _GafferApiClient {
10
+ apiKey;
11
+ baseUrl;
12
+ constructor(config) {
13
+ this.apiKey = config.apiKey;
14
+ this.baseUrl = config.baseUrl.replace(/\/$/, "");
15
+ }
16
+ /**
17
+ * Create client from environment variables
18
+ */
19
+ static fromEnv() {
20
+ const apiKey = process.env.GAFFER_API_KEY;
21
+ if (!apiKey) {
22
+ throw new Error("GAFFER_API_KEY environment variable is required");
23
+ }
24
+ const baseUrl = process.env.GAFFER_API_URL || "https://app.gaffer.sh";
25
+ return new _GafferApiClient({ apiKey, baseUrl });
26
+ }
27
+ /**
28
+ * Make authenticated request to Gaffer API
29
+ */
30
+ async request(endpoint, params) {
31
+ const url = new URL(`/api/v1${endpoint}`, this.baseUrl);
32
+ if (params) {
33
+ for (const [key, value] of Object.entries(params)) {
34
+ if (value !== void 0 && value !== null) {
35
+ url.searchParams.set(key, String(value));
36
+ }
37
+ }
38
+ }
39
+ const controller = new AbortController();
40
+ const timeoutId = setTimeout(() => controller.abort(), REQUEST_TIMEOUT_MS);
41
+ try {
42
+ const response = await fetch(url.toString(), {
43
+ method: "GET",
44
+ headers: {
45
+ "X-API-Key": this.apiKey,
46
+ "Accept": "application/json",
47
+ "User-Agent": "gaffer-mcp/0.1.0"
48
+ },
49
+ signal: controller.signal
50
+ });
51
+ if (!response.ok) {
52
+ const errorData = await response.json().catch(() => ({}));
53
+ const errorMessage = errorData.error?.message || `API request failed: ${response.status}`;
54
+ throw new Error(errorMessage);
55
+ }
56
+ return response.json();
57
+ } catch (error) {
58
+ if (error instanceof Error && error.name === "AbortError") {
59
+ throw new Error(`Request timed out after ${REQUEST_TIMEOUT_MS}ms`);
60
+ }
61
+ throw error;
62
+ } finally {
63
+ clearTimeout(timeoutId);
64
+ }
65
+ }
66
+ /**
67
+ * Get project health analytics
68
+ */
69
+ async getProjectHealth(options = {}) {
70
+ return this.request("/project/analytics", {
71
+ days: options.days || 30
72
+ });
73
+ }
74
+ /**
75
+ * Get test history for a specific test
76
+ */
77
+ async getTestHistory(options) {
78
+ const testName = options.testName?.trim();
79
+ const filePath = options.filePath?.trim();
80
+ if (!testName && !filePath) {
81
+ throw new Error("Either testName or filePath is required (and must not be empty)");
82
+ }
83
+ return this.request("/project/test-history", {
84
+ ...testName && { testName },
85
+ ...filePath && { filePath },
86
+ ...options.limit && { limit: options.limit }
87
+ });
88
+ }
89
+ /**
90
+ * Get flaky tests for the project
91
+ */
92
+ async getFlakyTests(options = {}) {
93
+ return this.request("/project/flaky-tests", {
94
+ ...options.threshold && { threshold: options.threshold },
95
+ ...options.limit && { limit: options.limit },
96
+ ...options.days && { days: options.days }
97
+ });
98
+ }
99
+ /**
100
+ * List test runs for the project
101
+ */
102
+ async getTestRuns(options = {}) {
103
+ return this.request("/project/test-runs", {
104
+ ...options.commitSha && { commitSha: options.commitSha },
105
+ ...options.branch && { branch: options.branch },
106
+ ...options.status && { status: options.status },
107
+ ...options.limit && { limit: options.limit }
108
+ });
109
+ }
110
+ };
111
+
112
+ // src/tools/get-flaky-tests.ts
113
+ import { z } from "zod";
114
+ var getFlakyTestsInputSchema = {
115
+ threshold: z.number().min(0).max(1).optional().describe("Minimum flip rate to be considered flaky (0-1, default: 0.1 = 10%)"),
116
+ limit: z.number().int().min(1).max(100).optional().describe("Maximum number of flaky tests to return (default: 50)"),
117
+ days: z.number().int().min(1).max(365).optional().describe("Analysis period in days (default: 30)")
118
+ };
119
+ var getFlakyTestsOutputSchema = {
120
+ flakyTests: z.array(z.object({
121
+ name: z.string(),
122
+ flipRate: z.number(),
123
+ flipCount: z.number(),
124
+ totalRuns: z.number(),
125
+ lastSeen: z.string()
126
+ })),
127
+ summary: z.object({
128
+ threshold: z.number(),
129
+ totalFlaky: z.number(),
130
+ period: z.number()
131
+ })
132
+ };
133
+ async function executeGetFlakyTests(client, input) {
134
+ const response = await client.getFlakyTests({
135
+ threshold: input.threshold,
136
+ limit: input.limit,
137
+ days: input.days
138
+ });
139
+ return {
140
+ flakyTests: response.flakyTests,
141
+ summary: response.summary
142
+ };
143
+ }
144
+ var getFlakyTestsMetadata = {
145
+ name: "get_flaky_tests",
146
+ title: "Get Flaky Tests",
147
+ description: `Get the list of flaky tests in the project.
148
+
149
+ A test is considered flaky if it frequently switches between pass and fail states
150
+ (high "flip rate"). This helps identify unreliable tests that need attention.
151
+
152
+ Returns:
153
+ - List of flaky tests with:
154
+ - name: Test name
155
+ - flipRate: How often the test flips between pass/fail (0-1)
156
+ - flipCount: Number of status transitions
157
+ - totalRuns: Total test executions analyzed
158
+ - lastSeen: When the test last ran
159
+ - Summary with threshold used and total count
160
+
161
+ Use this after get_project_health shows flaky tests exist, to identify which
162
+ specific tests are flaky and need investigation.`
163
+ };
164
+
165
+ // src/tools/get-project-health.ts
166
+ import { z as z2 } from "zod";
167
+ var getProjectHealthInputSchema = {
168
+ days: z2.number().int().min(1).max(365).optional().describe("Number of days to analyze (default: 30)")
169
+ };
170
+ var getProjectHealthOutputSchema = {
171
+ projectName: z2.string(),
172
+ healthScore: z2.number(),
173
+ passRate: z2.number().nullable(),
174
+ testRunCount: z2.number(),
175
+ flakyTestCount: z2.number(),
176
+ trend: z2.enum(["up", "down", "stable"]),
177
+ period: z2.object({
178
+ days: z2.number(),
179
+ start: z2.string(),
180
+ end: z2.string()
181
+ })
182
+ };
183
+ async function executeGetProjectHealth(client, input) {
184
+ const response = await client.getProjectHealth({ days: input.days });
185
+ return {
186
+ projectName: response.analytics.projectName,
187
+ healthScore: response.analytics.healthScore,
188
+ passRate: response.analytics.passRate,
189
+ testRunCount: response.analytics.testRunCount,
190
+ flakyTestCount: response.analytics.flakyTestCount,
191
+ trend: response.analytics.trend,
192
+ period: response.analytics.period
193
+ };
194
+ }
195
+ var getProjectHealthMetadata = {
196
+ name: "get_project_health",
197
+ title: "Get Project Health",
198
+ description: `Get the health metrics for the project associated with your API key.
199
+
200
+ Returns:
201
+ - Health score (0-100): Overall project health based on pass rate and trend
202
+ - Pass rate: Percentage of tests passing
203
+ - Test run count: Number of test runs in the period
204
+ - Flaky test count: Number of tests with inconsistent results
205
+ - Trend: Whether test health is improving (up), declining (down), or stable
206
+
207
+ Use this to understand the current state of your test suite.`
208
+ };
209
+
210
+ // src/tools/get-test-history.ts
211
+ import { z as z3 } from "zod";
212
+ var getTestHistoryInputSchema = {
213
+ testName: z3.string().optional().describe("Exact test name to search for"),
214
+ filePath: z3.string().optional().describe("File path containing the test"),
215
+ limit: z3.number().int().min(1).max(100).optional().describe("Maximum number of results (default: 20)")
216
+ };
217
+ var getTestHistoryOutputSchema = {
218
+ history: z3.array(z3.object({
219
+ testRunId: z3.string(),
220
+ createdAt: z3.string(),
221
+ branch: z3.string().optional(),
222
+ commitSha: z3.string().optional(),
223
+ status: z3.enum(["passed", "failed", "skipped", "pending"]),
224
+ durationMs: z3.number(),
225
+ message: z3.string().optional()
226
+ })),
227
+ summary: z3.object({
228
+ totalRuns: z3.number(),
229
+ passedRuns: z3.number(),
230
+ failedRuns: z3.number(),
231
+ passRate: z3.number().nullable()
232
+ })
233
+ };
234
+ async function executeGetTestHistory(client, input) {
235
+ if (!input.testName && !input.filePath) {
236
+ throw new Error("Either testName or filePath is required");
237
+ }
238
+ const response = await client.getTestHistory({
239
+ testName: input.testName,
240
+ filePath: input.filePath,
241
+ limit: input.limit || 20
242
+ });
243
+ return {
244
+ history: response.history.map((entry) => ({
245
+ testRunId: entry.testRunId,
246
+ createdAt: entry.createdAt,
247
+ branch: entry.branch,
248
+ commitSha: entry.commitSha,
249
+ status: entry.test.status,
250
+ durationMs: entry.test.durationMs,
251
+ message: entry.test.message || void 0
252
+ // Convert null to undefined for schema compliance
253
+ })),
254
+ summary: {
255
+ totalRuns: response.summary.totalRuns,
256
+ passedRuns: response.summary.passedRuns,
257
+ failedRuns: response.summary.failedRuns,
258
+ passRate: response.summary.passRate
259
+ }
260
+ };
261
+ }
262
+ var getTestHistoryMetadata = {
263
+ name: "get_test_history",
264
+ title: "Get Test History",
265
+ description: `Get the pass/fail history for a specific test.
266
+
267
+ Search by either:
268
+ - testName: The exact name of the test (e.g., "should handle user login")
269
+ - filePath: The file path containing the test (e.g., "tests/auth.test.ts")
270
+
271
+ Returns:
272
+ - History of test runs showing pass/fail status over time
273
+ - Duration of each run
274
+ - Branch and commit information
275
+ - Error messages for failed runs
276
+ - Summary statistics (pass rate, total runs)
277
+
278
+ Use this to investigate flaky tests or understand test stability.`
279
+ };
280
+
281
+ // src/tools/list-test-runs.ts
282
+ import { z as z4 } from "zod";
283
+ var listTestRunsInputSchema = {
284
+ commitSha: z4.string().optional().describe("Filter by commit SHA (exact or prefix match)"),
285
+ branch: z4.string().optional().describe("Filter by branch name"),
286
+ status: z4.enum(["passed", "failed"]).optional().describe('Filter by status: "passed" (no failures) or "failed" (has failures)'),
287
+ limit: z4.number().int().min(1).max(100).optional().describe("Maximum number of test runs to return (default: 20)")
288
+ };
289
+ var listTestRunsOutputSchema = {
290
+ testRuns: z4.array(z4.object({
291
+ id: z4.string(),
292
+ commitSha: z4.string().optional(),
293
+ branch: z4.string().optional(),
294
+ passedCount: z4.number(),
295
+ failedCount: z4.number(),
296
+ skippedCount: z4.number(),
297
+ totalCount: z4.number(),
298
+ createdAt: z4.string()
299
+ })),
300
+ pagination: z4.object({
301
+ total: z4.number(),
302
+ hasMore: z4.boolean()
303
+ })
304
+ };
305
+ async function executeListTestRuns(client, input) {
306
+ const response = await client.getTestRuns({
307
+ commitSha: input.commitSha,
308
+ branch: input.branch,
309
+ status: input.status,
310
+ limit: input.limit || 20
311
+ });
312
+ return {
313
+ testRuns: response.testRuns.map((run) => ({
314
+ id: run.id,
315
+ commitSha: run.commitSha || void 0,
316
+ branch: run.branch || void 0,
317
+ passedCount: run.summary.passed,
318
+ failedCount: run.summary.failed,
319
+ skippedCount: run.summary.skipped,
320
+ totalCount: run.summary.total,
321
+ createdAt: run.createdAt
322
+ })),
323
+ pagination: {
324
+ total: response.pagination.total,
325
+ hasMore: response.pagination.hasMore
326
+ }
327
+ };
328
+ }
329
+ var listTestRunsMetadata = {
330
+ name: "list_test_runs",
331
+ title: "List Test Runs",
332
+ description: `List recent test runs for the project with optional filtering.
333
+
334
+ Filter by:
335
+ - commitSha: Filter by commit SHA (supports prefix matching)
336
+ - branch: Filter by branch name
337
+ - status: Filter by "passed" (no failures) or "failed" (has failures)
338
+
339
+ Returns:
340
+ - List of test runs with:
341
+ - id: Test run ID (can be used with get_test_run for details)
342
+ - commitSha: Git commit SHA
343
+ - branch: Git branch name
344
+ - passedCount/failedCount/skippedCount: Test counts
345
+ - createdAt: When the test run was created
346
+ - Pagination info (total count, hasMore flag)
347
+
348
+ Use cases:
349
+ - "What tests failed in commit abc123?"
350
+ - "Show me recent test runs on main branch"
351
+ - "What's the status of tests on my feature branch?"`
352
+ };
353
+
354
+ // src/index.ts
355
+ async function main() {
356
+ if (!process.env.GAFFER_API_KEY) {
357
+ console.error("Error: GAFFER_API_KEY environment variable is required");
358
+ console.error("");
359
+ console.error("Get your API key from: https://app.gaffer.sh/settings/api-keys");
360
+ console.error("");
361
+ console.error("Then configure Claude Code or Cursor with:");
362
+ console.error(JSON.stringify({
363
+ mcpServers: {
364
+ gaffer: {
365
+ command: "npx",
366
+ args: ["-y", "@gaffer-sh/mcp"],
367
+ env: {
368
+ GAFFER_API_KEY: "your-api-key-here"
369
+ }
370
+ }
371
+ }
372
+ }, null, 2));
373
+ process.exit(1);
374
+ }
375
+ const client = GafferApiClient.fromEnv();
376
+ const server = new McpServer({
377
+ name: "gaffer",
378
+ version: "0.1.0"
379
+ });
380
+ server.registerTool(
381
+ getProjectHealthMetadata.name,
382
+ {
383
+ title: getProjectHealthMetadata.title,
384
+ description: getProjectHealthMetadata.description,
385
+ inputSchema: getProjectHealthInputSchema,
386
+ outputSchema: getProjectHealthOutputSchema
387
+ },
388
+ async (input) => {
389
+ try {
390
+ const output = await executeGetProjectHealth(client, input);
391
+ return {
392
+ content: [{ type: "text", text: JSON.stringify(output, null, 2) }],
393
+ structuredContent: output
394
+ };
395
+ } catch (error) {
396
+ const message = error instanceof Error ? error.message : "Unknown error";
397
+ return {
398
+ content: [{ type: "text", text: `Error: ${message}` }],
399
+ isError: true
400
+ };
401
+ }
402
+ }
403
+ );
404
+ server.registerTool(
405
+ getTestHistoryMetadata.name,
406
+ {
407
+ title: getTestHistoryMetadata.title,
408
+ description: getTestHistoryMetadata.description,
409
+ inputSchema: getTestHistoryInputSchema,
410
+ outputSchema: getTestHistoryOutputSchema
411
+ },
412
+ async (input) => {
413
+ try {
414
+ const output = await executeGetTestHistory(client, input);
415
+ return {
416
+ content: [{ type: "text", text: JSON.stringify(output, null, 2) }],
417
+ structuredContent: output
418
+ };
419
+ } catch (error) {
420
+ const message = error instanceof Error ? error.message : "Unknown error";
421
+ return {
422
+ content: [{ type: "text", text: `Error: ${message}` }],
423
+ isError: true
424
+ };
425
+ }
426
+ }
427
+ );
428
+ server.registerTool(
429
+ getFlakyTestsMetadata.name,
430
+ {
431
+ title: getFlakyTestsMetadata.title,
432
+ description: getFlakyTestsMetadata.description,
433
+ inputSchema: getFlakyTestsInputSchema,
434
+ outputSchema: getFlakyTestsOutputSchema
435
+ },
436
+ async (input) => {
437
+ try {
438
+ const output = await executeGetFlakyTests(client, input);
439
+ return {
440
+ content: [{ type: "text", text: JSON.stringify(output, null, 2) }],
441
+ structuredContent: output
442
+ };
443
+ } catch (error) {
444
+ const message = error instanceof Error ? error.message : "Unknown error";
445
+ return {
446
+ content: [{ type: "text", text: `Error: ${message}` }],
447
+ isError: true
448
+ };
449
+ }
450
+ }
451
+ );
452
+ server.registerTool(
453
+ listTestRunsMetadata.name,
454
+ {
455
+ title: listTestRunsMetadata.title,
456
+ description: listTestRunsMetadata.description,
457
+ inputSchema: listTestRunsInputSchema,
458
+ outputSchema: listTestRunsOutputSchema
459
+ },
460
+ async (input) => {
461
+ try {
462
+ const output = await executeListTestRuns(client, input);
463
+ return {
464
+ content: [{ type: "text", text: JSON.stringify(output, null, 2) }],
465
+ structuredContent: output
466
+ };
467
+ } catch (error) {
468
+ const message = error instanceof Error ? error.message : "Unknown error";
469
+ return {
470
+ content: [{ type: "text", text: `Error: ${message}` }],
471
+ isError: true
472
+ };
473
+ }
474
+ }
475
+ );
476
+ const transport = new StdioServerTransport();
477
+ await server.connect(transport);
478
+ }
479
+ main().catch((error) => {
480
+ console.error("Fatal error:", error);
481
+ process.exit(1);
482
+ });
package/package.json ADDED
@@ -0,0 +1,54 @@
1
+ {
2
+ "name": "@gaffer-sh/mcp",
3
+ "type": "module",
4
+ "version": "0.1.0",
5
+ "description": "MCP server for Gaffer test history - give your AI assistant memory of your tests",
6
+ "license": "MIT",
7
+ "author": "Gaffer <hello@gaffer.sh>",
8
+ "homepage": "https://gaffer.sh",
9
+ "repository": {
10
+ "type": "git",
11
+ "url": "https://github.com/gaffer-sh/gaffer-v2.git",
12
+ "directory": "packages/mcp-server"
13
+ },
14
+ "bugs": {
15
+ "url": "https://github.com/gaffer-sh/gaffer-v2/issues"
16
+ },
17
+ "keywords": [
18
+ "mcp",
19
+ "model-context-protocol",
20
+ "gaffer",
21
+ "test-results",
22
+ "claude",
23
+ "cursor",
24
+ "ai-assistant"
25
+ ],
26
+ "main": "./dist/index.js",
27
+ "types": "./dist/index.d.ts",
28
+ "bin": {
29
+ "gaffer-mcp": "./dist/index.js"
30
+ },
31
+ "files": [
32
+ "dist"
33
+ ],
34
+ "engines": {
35
+ "node": ">=18"
36
+ },
37
+ "dependencies": {
38
+ "@modelcontextprotocol/sdk": "^1.0.0",
39
+ "zod": "^3.23.0"
40
+ },
41
+ "devDependencies": {
42
+ "@types/node": "^22.0.0",
43
+ "bumpp": "^10.1.0",
44
+ "tsup": "^8.3.5",
45
+ "typescript": "^5.7.2"
46
+ },
47
+ "scripts": {
48
+ "build": "tsup",
49
+ "postbuild": "chmod +x dist/index.js",
50
+ "dev": "tsup --watch",
51
+ "typecheck": "tsc --noEmit",
52
+ "release": "bumpp --tag 'mcp-v%s' --commit 'chore(mcp): release v%s' && git push --follow-tags"
53
+ }
54
+ }