codex-review-mcp 1.3.1 โ 1.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -3
- package/dist/review/buildPrompt.js +37 -3
- package/dist/review/gatherContext.js +100 -12
- package/package.json +1 -1
package/README.md
CHANGED
@@ -11,6 +11,7 @@ With this MCP server, your AI assistant can call out for a code review **right i
|
|
11
11
|
## Features
|
12
12
|
|
13
13
|
- ๐ฏ **Zero Configuration** - Just add your OpenAI API key
|
14
|
+
- ๐ **Respects Your Cursor Rules** - Automatically reads `.cursor/rules/` and enforces YOUR coding standards
|
14
15
|
- ๐ Automatically reviews your uncommitted changes
|
15
16
|
- ๐ค Powered by GPT-5 Codex for intelligent code analysis
|
16
17
|
- ๐ Returns actionable feedback in Markdown format
|
@@ -100,9 +101,9 @@ Most users won't need these, but they're available if you want more control:
|
|
100
101
|
|
101
102
|
1. **Detects Repository**: Automatically finds your git repository root
|
102
103
|
2. **Collects Diff**: Runs git diff to get the changes
|
103
|
-
3. **Gathers Context**:
|
104
|
-
4. **AI Review**: Sends to GPT-5 Codex
|
105
|
-
5. **Returns Feedback**: Provides actionable Markdown feedback
|
104
|
+
3. **Gathers Context**: Reads your `.cursor/rules/`, `CODE_REVIEW.md`, `.eslintrc`, and other project files
|
105
|
+
4. **AI Review**: Sends to GPT-5 Codex with YOUR project's coding standards
|
106
|
+
5. **Returns Feedback**: Provides actionable Markdown feedback that respects YOUR rules
|
106
107
|
|
107
108
|
## Requirements
|
108
109
|
|
@@ -2,12 +2,46 @@ export function buildPrompt({ diffText, context, focus }) {
|
|
2
2
|
const focusLine = focus ? `Focus areas: ${focus}.` : '';
|
3
3
|
return [
|
4
4
|
'You are an expert AI code reviewer. Be concise, specific, and actionable.',
|
5
|
-
'Prefer minimal diffs and direct fixes.
|
5
|
+
'Prefer minimal diffs and direct fixes.',
|
6
|
+
'',
|
7
|
+
'ENVIRONMENT ASSUMPTION: The runtime and tooling are POSIX (macOS/Linux). Do NOT suggest Windows/PowerShell/cmd-specific commands or paths.',
|
8
|
+
'',
|
9
|
+
'๐จ CRITICAL REPO-SPECIFIC GUIDANCE HIERARCHY:',
|
10
|
+
'1. HIGHEST PRIORITY: Project documentation (.cursor/rules/*, CODE_REVIEW.md, CONTRIBUTING.md, etc.)',
|
11
|
+
'2. Configuration files (tsconfig.json, .eslintrc, prettier config, etc.)',
|
12
|
+
'3. Existing patterns in similar files within the same directory',
|
13
|
+
'4. General best practices (ONLY when not conflicting with above)',
|
14
|
+
'',
|
15
|
+
'MANDATORY REVIEW REQUIREMENTS:',
|
16
|
+
'- ๐ EXAMINE existing code patterns in the same directory/module before suggesting changes',
|
17
|
+
'- ๐ CHECK for existing utility functions, abstractions, and helpers before creating new ones',
|
18
|
+
'- ๐ฏ FOLLOW the exact naming conventions, file structure, and import patterns used in this repo',
|
19
|
+
'- ๐งช MATCH existing test patterns and testing approaches when reviewing test files',
|
20
|
+
'- โ๏ธ VALIDATE against linting and formatting rules defined in the project config',
|
21
|
+
'- ๐ซ NEVER suggest patterns that are inconsistent with the existing codebase',
|
22
|
+
'- ๐ซ NEVER introduce external libraries/patterns not already in use',
|
23
|
+
'- โ
PREFER reusing existing abstractions over creating new ones',
|
24
|
+
'',
|
25
|
+
'SPECIFIC TECHNICAL GUIDELINES:',
|
26
|
+
'- Follow ALL guidelines, conventions, and architectural patterns defined in the project documentation below',
|
27
|
+
'- Maintain consistency with the existing codebase styling, naming conventions, and structural themes',
|
28
|
+
'- Base your recommendations on the internal documentation, .cursor/rules files, and configuration files provided',
|
29
|
+
'- Do NOT suggest changes that deviate from the project\'s established patterns or introduce external patterns',
|
30
|
+
'- When project guidelines conflict with general best practices, prioritize the project guidelines',
|
31
|
+
'- For accessibility audits: apply axe-core best practices and WCAG 2.1 Level AA guidelines',
|
32
|
+
'- CRITICAL Z-INDEX WARNING: Be extremely cautious with z-index changes on modals, overlays, or components. Analyze the entire z-index hierarchy before suggesting changes. Flag any z-index modifications as HIGH RISK and verify they won\'t break stacking context upstream or downstream. Always check for existing z-index patterns in the codebase first.',
|
6
33
|
focusLine,
|
34
|
+
'',
|
35
|
+
'BEFORE making ANY suggestion, ask yourself:',
|
36
|
+
'1. Does this pattern already exist in the codebase? (Check similar files)',
|
37
|
+
'2. Is there an existing utility/helper that could be reused?',
|
38
|
+
'3. Does this follow the exact conventions shown in the project documentation?',
|
39
|
+
'4. Would this change maintain consistency with the surrounding code?',
|
40
|
+
'',
|
7
41
|
'\n---\nScope and Diff (unified=0):\n',
|
8
42
|
diffText,
|
9
|
-
context ? '\n---\nProject context and guidelines:\n' + context : '',
|
43
|
+
context ? '\n---\nProject context and guidelines (FOLLOW THESE STRICTLY):\n' + context : '',
|
10
44
|
'\n---\nOutput strictly as Markdown with the following sections:\n',
|
11
|
-
'1) Title + scope summary\n2) Quick Summary (3โ6 bullets)\n3) Issues table: severity | file:lines | category | explanation | suggested fix\n4) Inline suggested edits for top issues\n5) Positive notes\n6) Next steps',
|
45
|
+
'1) Title + scope summary\n2) Quick Summary (3โ6 bullets)\n3) Issues table: severity | file:lines | category | explanation | suggested fix\n4) Inline suggested edits for top issues\n5) Positive notes (mention when code follows project guidelines well)\n6) Next steps',
|
12
46
|
].join('\n');
|
13
47
|
}
|
@@ -3,17 +3,37 @@ import { join } from 'node:path';
|
|
3
3
|
const CANDIDATE_FILES = [
|
4
4
|
'.cursor/rules/tsx.mdc',
|
5
5
|
'.cursor/rules/project.mdc',
|
6
|
-
'.cursor/rules
|
6
|
+
'.cursor/rules/', // directory to scan for all .md and .mdc files
|
7
7
|
'CODE_REVIEW.md',
|
8
8
|
'CONTRIBUTING.md',
|
9
|
+
'ARCHITECTURE.md',
|
10
|
+
'STYLE_GUIDE.md',
|
11
|
+
'CODING_STANDARDS.md',
|
12
|
+
'GUIDELINES.md',
|
9
13
|
'SECURITY.md',
|
10
14
|
'.eslintrc',
|
11
15
|
'.eslintrc.cjs',
|
12
16
|
'.eslintrc.js',
|
13
17
|
'.eslintrc.json',
|
18
|
+
'.prettierrc',
|
19
|
+
'.prettierrc.json',
|
20
|
+
'.prettierrc.js',
|
21
|
+
'prettier.config.js',
|
14
22
|
'package.json',
|
15
23
|
'tsconfig.json',
|
24
|
+
'vitest.config.ts',
|
25
|
+
'jest.config.js',
|
16
26
|
];
|
27
|
+
// Directories to skip when scanning
|
28
|
+
const SKIP_DIRS = new Set([
|
29
|
+
'node_modules',
|
30
|
+
'.git',
|
31
|
+
'dist',
|
32
|
+
'build',
|
33
|
+
'coverage',
|
34
|
+
'.next',
|
35
|
+
'.turbo',
|
36
|
+
]);
|
17
37
|
async function readIfExists(path) {
|
18
38
|
try {
|
19
39
|
const data = await fs.readFile(path, 'utf8');
|
@@ -23,21 +43,89 @@ async function readIfExists(path) {
|
|
23
43
|
return null;
|
24
44
|
}
|
25
45
|
}
|
46
|
+
async function scanDirectory(dirPath, extensions, visited = new Set()) {
|
47
|
+
const files = [];
|
48
|
+
// Protect against circular symlinks
|
49
|
+
try {
|
50
|
+
const realPath = await fs.realpath(dirPath);
|
51
|
+
if (visited.has(realPath))
|
52
|
+
return files;
|
53
|
+
visited.add(realPath);
|
54
|
+
}
|
55
|
+
catch {
|
56
|
+
// Can't resolve real path, skip
|
57
|
+
return files;
|
58
|
+
}
|
59
|
+
try {
|
60
|
+
const entries = await fs.readdir(dirPath, { withFileTypes: true });
|
61
|
+
for (const entry of entries) {
|
62
|
+
// Skip common large directories
|
63
|
+
if (entry.isDirectory() && SKIP_DIRS.has(entry.name)) {
|
64
|
+
continue;
|
65
|
+
}
|
66
|
+
const fullPath = join(dirPath, entry.name);
|
67
|
+
if (entry.isDirectory()) {
|
68
|
+
// Recursively scan subdirectories
|
69
|
+
const subFiles = await scanDirectory(fullPath, extensions, visited);
|
70
|
+
files.push(...subFiles);
|
71
|
+
}
|
72
|
+
else if (entry.isFile()) {
|
73
|
+
// Check if file has one of the desired extensions
|
74
|
+
if (extensions.some(ext => entry.name.endsWith(ext))) {
|
75
|
+
files.push(fullPath);
|
76
|
+
}
|
77
|
+
}
|
78
|
+
}
|
79
|
+
}
|
80
|
+
catch {
|
81
|
+
// Directory doesn't exist or can't be read
|
82
|
+
}
|
83
|
+
return files;
|
84
|
+
}
|
26
85
|
export async function gatherContext() {
|
27
86
|
const chunks = [];
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
const data = await readIfExists(join(process.cwd(), rel));
|
33
|
-
if (!data)
|
34
|
-
continue;
|
35
|
-
// bound each file contribution to ~8k chars
|
36
|
-
const capped = data.slice(0, 8000);
|
37
|
-
chunks.push(`\n<!-- ${rel} -->\n${capped}`);
|
38
|
-
// global size guard ~50k chars
|
87
|
+
const processedPaths = new Set();
|
88
|
+
const cwd = process.cwd();
|
89
|
+
for (const pattern of CANDIDATE_FILES) {
|
90
|
+
// Check global size guard before processing each pattern
|
39
91
|
if (chunks.join('').length > 50_000)
|
40
92
|
break;
|
93
|
+
// Handle directory paths - scan for .md and .mdc files
|
94
|
+
if (pattern.endsWith('/')) {
|
95
|
+
const dirPath = join(cwd, pattern);
|
96
|
+
const files = await scanDirectory(dirPath, ['.md', '.mdc']);
|
97
|
+
for (const filePath of files) {
|
98
|
+
const relativePath = filePath.replace(cwd + '/', '');
|
99
|
+
if (processedPaths.has(relativePath))
|
100
|
+
continue;
|
101
|
+
processedPaths.add(relativePath);
|
102
|
+
const data = await readIfExists(filePath);
|
103
|
+
if (!data)
|
104
|
+
continue;
|
105
|
+
const capped = data.slice(0, 8000);
|
106
|
+
chunks.push(`\n<!-- ${relativePath} -->\n${capped}`);
|
107
|
+
// Check size after each file
|
108
|
+
if (chunks.join('').length > 50_000)
|
109
|
+
break;
|
110
|
+
}
|
111
|
+
// Check size after processing directory
|
112
|
+
if (chunks.join('').length > 50_000)
|
113
|
+
break;
|
114
|
+
}
|
115
|
+
else {
|
116
|
+
// Handle literal file paths
|
117
|
+
if (processedPaths.has(pattern))
|
118
|
+
continue;
|
119
|
+
processedPaths.add(pattern);
|
120
|
+
const data = await readIfExists(join(cwd, pattern));
|
121
|
+
if (!data)
|
122
|
+
continue;
|
123
|
+
const capped = data.slice(0, 8000);
|
124
|
+
chunks.push(`\n<!-- ${pattern} -->\n${capped}`);
|
125
|
+
// Check size after each literal file
|
126
|
+
if (chunks.join('').length > 50_000)
|
127
|
+
break;
|
128
|
+
}
|
41
129
|
}
|
42
130
|
return chunks.join('\n');
|
43
131
|
}
|