codex-review-mcp 2.3.4 → 2.3.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/mcp-server.js +17 -14
- package/dist/mcp-server.test.js +287 -0
- package/package.json +1 -1
package/dist/mcp-server.js
CHANGED
@@ -32,41 +32,44 @@ server.registerTool('perform_code_review', {
|
|
32
32
|
|
33
33
|
🎯 USAGE:
|
34
34
|
|
35
|
-
1. Get the code
|
35
|
+
1. Get the workspace directory and code to review:
|
36
|
+
const workspaceDir = await getWorkspaceDirectory();
|
36
37
|
const diff = await runCommand("git diff");
|
37
38
|
|
38
|
-
2.
|
39
|
-
const rules = await readFile(".cursor/rules/project.mdc");
|
40
|
-
|
41
|
-
3. Call with content:
|
39
|
+
2. Call with content AND workspaceDir for context gathering:
|
42
40
|
await perform_code_review({
|
43
41
|
content: diff, // REQUIRED
|
42
|
+
workspaceDir: workspaceDir, // ⭐ CRITICAL for .cursor/rules!
|
44
43
|
contentType: "diff", // "diff" or "code"
|
45
|
-
customContext: rules, // Optional: saves tokens!
|
46
44
|
focus: "security and performance" // Optional: specific focus
|
47
45
|
});
|
48
46
|
|
49
47
|
💡 KEY BENEFITS:
|
50
48
|
- Expert Reviews: GPT-5 Codex with project-specific context
|
51
|
-
- Codebase-Aware:
|
49
|
+
- Codebase-Aware: Auto-finds .cursor/rules, package.json, tsconfig.json
|
50
|
+
- Prioritized: .cursor/rules are ALWAYS checked first
|
52
51
|
- Efficient: Provide customContext to skip auto-gathering
|
53
52
|
|
53
|
+
⚠️ IMPORTANT:
|
54
|
+
- workspaceDir is CRITICAL for finding .cursor/rules and project files
|
55
|
+
- Without it, context gathering looks in the wrong directory!
|
56
|
+
|
54
57
|
📋 PARAMETERS:
|
55
58
|
- content: Code or diff to review (REQUIRED)
|
59
|
+
- workspaceDir: Project directory (CRITICAL for .cursor/rules context)
|
56
60
|
- contentType: "diff" | "code"
|
57
|
-
- customContext:
|
58
|
-
- skipContextGathering: Skip auto-gathering
|
59
|
-
- focus: Specific areas to review (e.g., "security", "performance")
|
60
|
-
- workspaceDir: Base directory for context gathering (defaults to cwd)`,
|
61
|
+
- customContext: Manual context (bypasses auto-gathering if provided)
|
62
|
+
- skipContextGathering: Skip auto-gathering (only use if no context needed)
|
63
|
+
- focus: Specific areas to review (e.g., "security", "performance")`,
|
61
64
|
inputSchema: {
|
62
65
|
// REQUIRED
|
63
66
|
content: z.string().describe('Code or diff content to review (REQUIRED). Agent should run git commands or read files to get this.'),
|
64
67
|
// CONTENT TYPE
|
65
68
|
contentType: z.enum(['diff', 'code']).optional().describe('Type of content: "diff" for git diffs, "code" for static code review'),
|
66
69
|
// CONTEXT OPTIONS
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
+
workspaceDir: z.string().optional().describe('⭐ CRITICAL: Project root directory for finding .cursor/rules and config files. Without this, context gathering looks in wrong directory!'),
|
71
|
+
customContext: z.string().optional().describe('Optional: Provide manual context to skip auto-gathering (.cursor/rules, CODE_REVIEW.md, etc)'),
|
72
|
+
skipContextGathering: z.boolean().optional().describe('Set to true to skip automatic context gathering (only if no context needed)'),
|
70
73
|
// REVIEW OPTIONS
|
71
74
|
focus: z.string().optional().describe('Specific areas to focus on (e.g., "security and performance")'),
|
72
75
|
maxTokens: z.number().optional().describe('Maximum tokens for review response'),
|
@@ -0,0 +1,287 @@
|
|
1
|
+
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
|
2
|
+
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
|
3
|
+
import { Client } from '@modelcontextprotocol/sdk/client/index.js';
|
4
|
+
import { InMemoryTransport } from '@modelcontextprotocol/sdk/inMemory.js';
|
5
|
+
import { z } from 'zod';
|
6
|
+
// Mock the performCodeReview function
|
7
|
+
vi.mock('./tools/performCodeReview.js', () => ({
|
8
|
+
performCodeReview: vi.fn()
|
9
|
+
}));
|
10
|
+
import { performCodeReview } from './tools/performCodeReview.js';
|
11
|
+
describe('MCP Server Integration Tests', () => {
|
12
|
+
let server;
|
13
|
+
let client;
|
14
|
+
let serverTransport;
|
15
|
+
let clientTransport;
|
16
|
+
beforeEach(async () => {
|
17
|
+
// Reset mocks
|
18
|
+
vi.clearAllMocks();
|
19
|
+
// Create linked in-memory transports
|
20
|
+
[clientTransport, serverTransport] = InMemoryTransport.createLinkedPair();
|
21
|
+
// Import and set up the server (we'll need to refactor mcp-server.ts slightly)
|
22
|
+
// For now, create a test server with the same configuration
|
23
|
+
server = new McpServer({ name: 'codex-review-mcp', version: '2.3.4' });
|
24
|
+
// Register the same tools as in mcp-server.ts
|
25
|
+
server.registerTool('get_version', {
|
26
|
+
title: 'Get Version',
|
27
|
+
description: 'Get the version of the codex-review-mcp server.',
|
28
|
+
inputSchema: {},
|
29
|
+
}, async () => {
|
30
|
+
return {
|
31
|
+
content: [{
|
32
|
+
type: 'text',
|
33
|
+
text: `codex-review-mcp version 2.3.4`,
|
34
|
+
mimeType: 'text/plain'
|
35
|
+
}],
|
36
|
+
_meta: { version: '2.3.4' }
|
37
|
+
};
|
38
|
+
});
|
39
|
+
server.registerTool('perform_code_review', {
|
40
|
+
title: 'Perform Code Review',
|
41
|
+
description: 'Review code/diffs using GPT-5 Codex',
|
42
|
+
inputSchema: {
|
43
|
+
content: z.string().describe('Code or diff content to review'),
|
44
|
+
contentType: z.enum(['diff', 'code']).optional(),
|
45
|
+
workspaceDir: z.string().optional(),
|
46
|
+
customContext: z.string().optional(),
|
47
|
+
skipContextGathering: z.boolean().optional(),
|
48
|
+
focus: z.string().optional(),
|
49
|
+
maxTokens: z.number().optional(),
|
50
|
+
},
|
51
|
+
}, async (input, extra) => {
|
52
|
+
const onProgress = async (message, progress, total) => {
|
53
|
+
// Only send progress notifications if we have a valid progress token
|
54
|
+
const progressToken = extra?._meta?.progressToken ?? extra?.requestId;
|
55
|
+
if (progressToken === undefined || progressToken === null) {
|
56
|
+
// Skip progress notifications if no token available
|
57
|
+
return;
|
58
|
+
}
|
59
|
+
await server.server.notification({
|
60
|
+
method: 'notifications/progress',
|
61
|
+
params: {
|
62
|
+
progressToken,
|
63
|
+
progress,
|
64
|
+
total,
|
65
|
+
message,
|
66
|
+
},
|
67
|
+
}, extra?.requestId ? { relatedRequestId: extra.requestId } : undefined);
|
68
|
+
};
|
69
|
+
try {
|
70
|
+
const markdown = await performCodeReview({
|
71
|
+
content: input.content,
|
72
|
+
contentType: input.contentType,
|
73
|
+
workspaceDir: input.workspaceDir,
|
74
|
+
customContext: input.customContext,
|
75
|
+
skipContextGathering: input.skipContextGathering,
|
76
|
+
focus: input.focus,
|
77
|
+
maxTokens: input.maxTokens,
|
78
|
+
}, onProgress);
|
79
|
+
return {
|
80
|
+
content: [{ type: 'text', text: markdown, mimeType: 'text/markdown' }],
|
81
|
+
_meta: { version: '2.3.4' }
|
82
|
+
};
|
83
|
+
}
|
84
|
+
catch (error) {
|
85
|
+
const errorMessage = error?.message || String(error);
|
86
|
+
return {
|
87
|
+
content: [{
|
88
|
+
type: 'text',
|
89
|
+
text: `❌ Code Review Failed\n\n**Error:** ${errorMessage}\n\n**Version:** 2.3.4\n\nCheck MCP server logs for details.`,
|
90
|
+
mimeType: 'text/markdown'
|
91
|
+
}],
|
92
|
+
isError: true,
|
93
|
+
_meta: { version: '2.3.4', error: errorMessage }
|
94
|
+
};
|
95
|
+
}
|
96
|
+
});
|
97
|
+
// Create client
|
98
|
+
client = new Client({
|
99
|
+
name: 'test-client',
|
100
|
+
version: '1.0.0'
|
101
|
+
}, {
|
102
|
+
capabilities: {}
|
103
|
+
});
|
104
|
+
// Connect both
|
105
|
+
await server.connect(serverTransport);
|
106
|
+
await client.connect(clientTransport);
|
107
|
+
});
|
108
|
+
afterEach(async () => {
|
109
|
+
await client.close();
|
110
|
+
await server.close();
|
111
|
+
});
|
112
|
+
describe('Server Capabilities', () => {
|
113
|
+
it('should list all registered tools', async () => {
|
114
|
+
const tools = await client.listTools();
|
115
|
+
expect(tools.tools).toHaveLength(2);
|
116
|
+
expect(tools.tools.map(t => t.name)).toContain('get_version');
|
117
|
+
expect(tools.tools.map(t => t.name)).toContain('perform_code_review');
|
118
|
+
});
|
119
|
+
it('should have correct tool schemas', async () => {
|
120
|
+
const tools = await client.listTools();
|
121
|
+
const getVersionTool = tools.tools.find(t => t.name === 'get_version');
|
122
|
+
expect(getVersionTool).toBeDefined();
|
123
|
+
expect(getVersionTool?.description).toContain('version');
|
124
|
+
const reviewTool = tools.tools.find(t => t.name === 'perform_code_review');
|
125
|
+
expect(reviewTool).toBeDefined();
|
126
|
+
expect(reviewTool?.description).toContain('Review code');
|
127
|
+
});
|
128
|
+
});
|
129
|
+
describe('get_version Tool', () => {
|
130
|
+
it('should return the server version', async () => {
|
131
|
+
const result = await client.callTool({
|
132
|
+
name: 'get_version',
|
133
|
+
arguments: {}
|
134
|
+
});
|
135
|
+
expect(result.content).toHaveLength(1);
|
136
|
+
expect(result.content[0]).toMatchObject({
|
137
|
+
type: 'text',
|
138
|
+
mimeType: 'text/plain'
|
139
|
+
});
|
140
|
+
const text = result.content[0].text;
|
141
|
+
expect(text).toContain('codex-review-mcp version');
|
142
|
+
expect(text).toContain('2.3.4');
|
143
|
+
});
|
144
|
+
it('should include version in metadata', async () => {
|
145
|
+
const result = await client.callTool({
|
146
|
+
name: 'get_version',
|
147
|
+
arguments: {}
|
148
|
+
});
|
149
|
+
expect(result._meta).toBeDefined();
|
150
|
+
expect(result._meta?.version).toBe('2.3.4');
|
151
|
+
});
|
152
|
+
});
|
153
|
+
describe('perform_code_review Tool', () => {
|
154
|
+
beforeEach(() => {
|
155
|
+
// Mock performCodeReview to return a simple response
|
156
|
+
vi.mocked(performCodeReview).mockResolvedValue('# Code Review\n\nLooks good!');
|
157
|
+
});
|
158
|
+
it('should call performCodeReview with correct parameters', async () => {
|
159
|
+
await client.callTool({
|
160
|
+
name: 'perform_code_review',
|
161
|
+
arguments: {
|
162
|
+
content: 'console.log("test");',
|
163
|
+
contentType: 'code',
|
164
|
+
focus: 'security'
|
165
|
+
}
|
166
|
+
});
|
167
|
+
expect(performCodeReview).toHaveBeenCalledWith(expect.objectContaining({
|
168
|
+
content: 'console.log("test");',
|
169
|
+
contentType: 'code',
|
170
|
+
focus: 'security'
|
171
|
+
}), expect.any(Function) // onProgress callback
|
172
|
+
);
|
173
|
+
});
|
174
|
+
it('should return markdown content', async () => {
|
175
|
+
const result = await client.callTool({
|
176
|
+
name: 'perform_code_review',
|
177
|
+
arguments: {
|
178
|
+
content: 'console.log("test");'
|
179
|
+
}
|
180
|
+
});
|
181
|
+
expect(result.content).toHaveLength(1);
|
182
|
+
expect(result.content[0]).toMatchObject({
|
183
|
+
type: 'text',
|
184
|
+
mimeType: 'text/markdown'
|
185
|
+
});
|
186
|
+
const text = result.content[0].text;
|
187
|
+
expect(text).toContain('Code Review');
|
188
|
+
});
|
189
|
+
it('should handle errors gracefully', async () => {
|
190
|
+
// Mock an error
|
191
|
+
vi.mocked(performCodeReview).mockRejectedValue(new Error('Test error'));
|
192
|
+
const result = await client.callTool({
|
193
|
+
name: 'perform_code_review',
|
194
|
+
arguments: {
|
195
|
+
content: 'console.log("test");'
|
196
|
+
}
|
197
|
+
});
|
198
|
+
expect(result.isError).toBe(true);
|
199
|
+
const text = result.content[0].text;
|
200
|
+
expect(text).toContain('Code Review Failed');
|
201
|
+
expect(text).toContain('Test error');
|
202
|
+
});
|
203
|
+
});
|
204
|
+
describe('Progress Notifications', () => {
|
205
|
+
it('should call onProgress callback with proper parameters', async () => {
|
206
|
+
let onProgressCalled = false;
|
207
|
+
let progressMessages = [];
|
208
|
+
// Mock performCodeReview to call onProgress
|
209
|
+
vi.mocked(performCodeReview).mockImplementation(async (_input, onProgress) => {
|
210
|
+
if (onProgress) {
|
211
|
+
onProgressCalled = true;
|
212
|
+
await onProgress('Starting...', 10, 100);
|
213
|
+
progressMessages.push('Starting...');
|
214
|
+
await onProgress('Processing...', 50, 100);
|
215
|
+
progressMessages.push('Processing...');
|
216
|
+
await onProgress('Done', 100, 100);
|
217
|
+
progressMessages.push('Done');
|
218
|
+
}
|
219
|
+
return '# Review Complete';
|
220
|
+
});
|
221
|
+
await client.callTool({
|
222
|
+
name: 'perform_code_review',
|
223
|
+
arguments: {
|
224
|
+
content: 'test code'
|
225
|
+
}
|
226
|
+
});
|
227
|
+
// Verify onProgress was called with proper sequence
|
228
|
+
expect(onProgressCalled).toBe(true);
|
229
|
+
expect(progressMessages).toEqual(['Starting...', 'Processing...', 'Done']);
|
230
|
+
});
|
231
|
+
it('should handle progress with value 0 correctly (verifies explicit nullish check)', async () => {
|
232
|
+
// This test verifies we use explicit nullish check (=== null/undefined)
|
233
|
+
// not falsy check (!progressToken) which would incorrectly filter 0
|
234
|
+
let progressValues = [];
|
235
|
+
vi.mocked(performCodeReview).mockImplementation(async (_input, onProgress) => {
|
236
|
+
if (onProgress) {
|
237
|
+
await onProgress('Start', 0, 100); // 0 is a valid progress value
|
238
|
+
progressValues.push(0);
|
239
|
+
await onProgress('Middle', 50, 100);
|
240
|
+
progressValues.push(50);
|
241
|
+
await onProgress('End', 100, 100);
|
242
|
+
progressValues.push(100);
|
243
|
+
}
|
244
|
+
return '# Review Complete';
|
245
|
+
});
|
246
|
+
await client.callTool({
|
247
|
+
name: 'perform_code_review',
|
248
|
+
arguments: {
|
249
|
+
content: 'test code'
|
250
|
+
}
|
251
|
+
});
|
252
|
+
// Should accept 0 as a valid progress value
|
253
|
+
expect(progressValues).toContain(0);
|
254
|
+
expect(progressValues).toContain(50);
|
255
|
+
expect(progressValues).toContain(100);
|
256
|
+
});
|
257
|
+
});
|
258
|
+
describe('Error Handling', () => {
|
259
|
+
it('should catch and report errors with stack trace', async () => {
|
260
|
+
const testError = new Error('Detailed test error');
|
261
|
+
vi.mocked(performCodeReview).mockRejectedValue(testError);
|
262
|
+
const result = await client.callTool({
|
263
|
+
name: 'perform_code_review',
|
264
|
+
arguments: {
|
265
|
+
content: 'test code'
|
266
|
+
}
|
267
|
+
});
|
268
|
+
expect(result.isError).toBe(true);
|
269
|
+
const text = result.content[0].text;
|
270
|
+
expect(text).toContain('❌ Code Review Failed');
|
271
|
+
expect(text).toContain('Detailed test error');
|
272
|
+
expect(result._meta?.error).toBe('Detailed test error');
|
273
|
+
});
|
274
|
+
it('should handle non-Error objects', async () => {
|
275
|
+
vi.mocked(performCodeReview).mockRejectedValue('String error');
|
276
|
+
const result = await client.callTool({
|
277
|
+
name: 'perform_code_review',
|
278
|
+
arguments: {
|
279
|
+
content: 'test code'
|
280
|
+
}
|
281
|
+
});
|
282
|
+
expect(result.isError).toBe(true);
|
283
|
+
const text = result.content[0].text;
|
284
|
+
expect(text).toContain('String error');
|
285
|
+
});
|
286
|
+
});
|
287
|
+
});
|