codeast 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,335 @@
1
+ import { ANALYZER_URL } from '../services/analyzer.js';
2
+ import { queryImportedBy, queryImports, queryCallers, queryCalls, queryComplexity, queryCycles, queryDuplicates, querySymbols, queryFileInfo, querySearchFiles } from '../services/queries.js';
3
+ import { resolve, isAbsolute } from 'path';
4
+ // ============ Path Resolution ============
5
+ /**
6
+ * Resolve a path to absolute, with validation
7
+ * Returns { path, warning? } or { error }
8
+ */
9
+ function resolvePath(inputPath) {
10
+ // Already absolute
11
+ if (isAbsolute(inputPath)) {
12
+ return { path: inputPath };
13
+ }
14
+ // Resolve relative path from CWD
15
+ const resolved = resolve(inputPath);
16
+ // Check if it's trying to analyze the MCP server itself
17
+ if (resolved.includes('codebase-analysis') && (resolved.includes('middleware') ||
18
+ resolved.includes('analyzer') && resolved.includes('src'))) {
19
+ return {
20
+ error: `Cannot analyze the MCP server itself. The path "${inputPath}" resolved to "${resolved}". Please provide an absolute path to your target project.`
21
+ };
22
+ }
23
+ // Warn about relative paths
24
+ const warning = `Relative path "${inputPath}" resolved to "${resolved}". For clarity, consider using absolute paths.`;
25
+ return { path: resolved, warning };
26
+ }
27
+ /**
28
+ * Resolve projectPath for query options
29
+ */
30
+ function resolveProjectPath(projectPath) {
31
+ const result = resolvePath(projectPath);
32
+ if ('error' in result)
33
+ return result;
34
+ return { path: result.path, warning: result.warning };
35
+ }
36
+ function textResult(text, isError = false) {
37
+ return {
38
+ content: [{ type: 'text', text }],
39
+ isError
40
+ };
41
+ }
42
+ function jsonResult(data) {
43
+ return textResult(JSON.stringify(data, null, 2));
44
+ }
45
+ async function handleStatus() {
46
+ // Check analyzer health
47
+ let analyzerStatus = 'down';
48
+ try {
49
+ const res = await fetch(`${ANALYZER_URL}/health`, {
50
+ signal: AbortSignal.timeout(2000)
51
+ });
52
+ if (res.ok)
53
+ analyzerStatus = 'up';
54
+ }
55
+ catch {
56
+ // Analyzer is down
57
+ }
58
+ return jsonResult({
59
+ analyzer: {
60
+ status: analyzerStatus,
61
+ url: ANALYZER_URL
62
+ },
63
+ ready: analyzerStatus === 'up',
64
+ hint: analyzerStatus === 'up'
65
+ ? 'Ready! Use get_imports, get_symbols, etc. with projectPath parameter.'
66
+ : 'Analyzer is down. Start it with: cd analyzer && cargo run -- serve --port 31415'
67
+ });
68
+ }
69
+ export async function handleToolCall(name, args) {
70
+ try {
71
+ switch (name) {
72
+ case 'status':
73
+ return await handleStatus();
74
+ case 'run_tests':
75
+ return await handleRunTests(args);
76
+ // Targeted query tools (live analysis)
77
+ case 'get_imported_by':
78
+ return await handleGetImportedBy(args);
79
+ case 'get_imports':
80
+ return await handleGetImports(args);
81
+ case 'get_callers':
82
+ return await handleGetCallers(args);
83
+ case 'get_calls':
84
+ return await handleGetCalls(args);
85
+ case 'get_complexity':
86
+ return await handleGetComplexity(args);
87
+ case 'get_cycles':
88
+ return await handleGetCycles(args);
89
+ case 'get_duplicates':
90
+ return await handleGetDuplicates(args);
91
+ case 'get_symbols':
92
+ return await handleGetSymbols(args);
93
+ case 'get_file_info':
94
+ return await handleGetFileInfo(args);
95
+ case 'search_files':
96
+ return await handleSearchFiles(args);
97
+ default:
98
+ return textResult(`Unknown tool: ${name}`, true);
99
+ }
100
+ }
101
+ catch (error) {
102
+ const message = error instanceof Error ? error.message : String(error);
103
+ return textResult(`Error: ${message}`, true);
104
+ }
105
+ }
106
+ async function handleRunTests(args) {
107
+ const projectPath = args.path;
108
+ if (!projectPath) {
109
+ return textResult('Error: path is required', true);
110
+ }
111
+ const customCommand = args.command;
112
+ const timeout = (args.timeout ?? 120) * 1000; // Convert to ms
113
+ const { exec } = await import('child_process');
114
+ const { promisify } = await import('util');
115
+ const { existsSync } = await import('fs');
116
+ const path = await import('path');
117
+ const execAsync = promisify(exec);
118
+ // Detect project type and determine test command
119
+ let testCommand;
120
+ if (customCommand) {
121
+ testCommand = customCommand;
122
+ }
123
+ else {
124
+ const hasPackageJson = existsSync(path.join(projectPath, 'package.json'));
125
+ const hasCargoToml = existsSync(path.join(projectPath, 'Cargo.toml'));
126
+ const hasPytest = existsSync(path.join(projectPath, 'pytest.ini')) ||
127
+ existsSync(path.join(projectPath, 'pyproject.toml')) ||
128
+ existsSync(path.join(projectPath, 'setup.py'));
129
+ if (hasCargoToml) {
130
+ testCommand = 'cargo test';
131
+ }
132
+ else if (hasPackageJson) {
133
+ testCommand = 'npm test';
134
+ }
135
+ else if (hasPytest) {
136
+ testCommand = 'pytest';
137
+ }
138
+ else {
139
+ return textResult('Error: Could not detect project type. Use "command" parameter to specify test command.', true);
140
+ }
141
+ }
142
+ try {
143
+ const startTime = Date.now();
144
+ const { stdout, stderr } = await execAsync(testCommand, {
145
+ cwd: projectPath,
146
+ timeout,
147
+ maxBuffer: 10 * 1024 * 1024 // 10MB
148
+ });
149
+ const duration = ((Date.now() - startTime) / 1000).toFixed(2);
150
+ return jsonResult({
151
+ success: true,
152
+ command: testCommand,
153
+ duration: `${duration}s`,
154
+ stdout: stdout.trim(),
155
+ stderr: stderr.trim() || undefined
156
+ });
157
+ }
158
+ catch (error) {
159
+ const duration = error.killed ? 'timeout' : 'failed';
160
+ return jsonResult({
161
+ success: false,
162
+ command: testCommand,
163
+ duration,
164
+ exitCode: error.code,
165
+ stdout: error.stdout?.trim() || '',
166
+ stderr: error.stderr?.trim() || error.message
167
+ });
168
+ }
169
+ }
170
+ // ============ Targeted Query Handlers ============
171
+ // All handlers use live mode (real-time analysis)
172
+ // projectPath is required for all queries
173
+ /**
174
+ * Helper to build QueryOptions from args
175
+ * Requires projectPath
176
+ */
177
+ function buildQueryOptions(args) {
178
+ const inputProjectPath = args.projectPath;
179
+ if (!inputProjectPath) {
180
+ return { error: 'projectPath is required' };
181
+ }
182
+ // Resolve and validate projectPath
183
+ const resolved = resolveProjectPath(inputProjectPath);
184
+ if ('error' in resolved) {
185
+ return { error: resolved.error };
186
+ }
187
+ return {
188
+ projectPath: resolved.path
189
+ };
190
+ }
191
+ async function handleGetImportedBy(args) {
192
+ const file = args.file;
193
+ if (!file) {
194
+ return textResult('Error: file is required', true);
195
+ }
196
+ const options = buildQueryOptions(args);
197
+ if ('error' in options) {
198
+ return textResult(`Error: ${options.error}`, true);
199
+ }
200
+ const files = await queryImportedBy(file, options);
201
+ return jsonResult({ files });
202
+ }
203
+ async function handleGetImports(args) {
204
+ const file = args.file;
205
+ if (!file) {
206
+ return textResult('Error: file is required', true);
207
+ }
208
+ const options = buildQueryOptions(args);
209
+ if ('error' in options) {
210
+ return textResult(`Error: ${options.error}`, true);
211
+ }
212
+ const result = await queryImports(file, options);
213
+ return jsonResult(result);
214
+ }
215
+ async function handleGetCallers(args) {
216
+ const file = args.file;
217
+ const functionName = args.functionName;
218
+ if (!file) {
219
+ return textResult('Error: file is required', true);
220
+ }
221
+ if (!functionName) {
222
+ return textResult('Error: functionName is required', true);
223
+ }
224
+ const options = buildQueryOptions(args);
225
+ if ('error' in options) {
226
+ return textResult(`Error: ${options.error}`, true);
227
+ }
228
+ const callers = await queryCallers(file, functionName, options);
229
+ return jsonResult({ callers });
230
+ }
231
+ async function handleGetCalls(args) {
232
+ const file = args.file;
233
+ const functionName = args.functionName;
234
+ if (!file) {
235
+ return textResult('Error: file is required', true);
236
+ }
237
+ if (!functionName) {
238
+ return textResult('Error: functionName is required', true);
239
+ }
240
+ const options = buildQueryOptions(args);
241
+ if ('error' in options) {
242
+ return textResult(`Error: ${options.error}`, true);
243
+ }
244
+ const calls = await queryCalls(file, functionName, options);
245
+ return jsonResult({ calls });
246
+ }
247
+ async function handleGetComplexity(args) {
248
+ const file = args.file;
249
+ if (!file) {
250
+ return textResult('Error: file is required', true);
251
+ }
252
+ const functionName = args.functionName;
253
+ const options = buildQueryOptions(args);
254
+ if ('error' in options) {
255
+ return textResult(`Error: ${options.error}`, true);
256
+ }
257
+ const result = await queryComplexity(file, functionName, options);
258
+ if (!result) {
259
+ return textResult(`No complexity data found for ${file}${functionName ? `:${functionName}` : ''}`, true);
260
+ }
261
+ return jsonResult(result);
262
+ }
263
+ async function handleGetCycles(args) {
264
+ const file = args.file;
265
+ const options = buildQueryOptions(args);
266
+ if ('error' in options) {
267
+ return textResult(`Error: ${options.error}`, true);
268
+ }
269
+ const result = await queryCycles(file, options);
270
+ return jsonResult(result);
271
+ }
272
+ async function handleGetDuplicates(args) {
273
+ const file = args.file;
274
+ const options = buildQueryOptions(args);
275
+ if ('error' in options) {
276
+ return textResult(`Error: ${options.error}`, true);
277
+ }
278
+ // Add pagination support
279
+ options.limit = args.limit ?? 20;
280
+ options.offset = args.offset ?? 0;
281
+ const result = await queryDuplicates(file, options);
282
+ return jsonResult({
283
+ ...result,
284
+ pagination: {
285
+ limit: options.limit,
286
+ offset: options.offset,
287
+ returned: result.duplicates.length
288
+ }
289
+ });
290
+ }
291
+ async function handleGetSymbols(args) {
292
+ const file = args.file;
293
+ if (!file) {
294
+ return textResult('Error: file is required', true);
295
+ }
296
+ const options = buildQueryOptions(args);
297
+ if ('error' in options) {
298
+ return textResult(`Error: ${options.error}`, true);
299
+ }
300
+ const result = await querySymbols(file, options);
301
+ return jsonResult(result);
302
+ }
303
+ async function handleGetFileInfo(args) {
304
+ const file = args.file;
305
+ if (!file) {
306
+ return textResult('Error: file is required', true);
307
+ }
308
+ const options = buildQueryOptions(args);
309
+ if ('error' in options) {
310
+ return textResult(`Error: ${options.error}`, true);
311
+ }
312
+ const result = await queryFileInfo(file, options);
313
+ if (!result) {
314
+ return textResult(`File not found: ${file}`, true);
315
+ }
316
+ return jsonResult(result);
317
+ }
318
+ async function handleSearchFiles(args) {
319
+ const pattern = args.pattern;
320
+ if (!pattern) {
321
+ return textResult('Error: pattern is required', true);
322
+ }
323
+ const options = buildQueryOptions(args);
324
+ if ('error' in options) {
325
+ return textResult(`Error: ${options.error}`, true);
326
+ }
327
+ const limit = args.limit ?? 50;
328
+ const allFiles = await querySearchFiles(pattern, options);
329
+ const files = allFiles.slice(0, limit);
330
+ return jsonResult({
331
+ files,
332
+ total: allFiles.length,
333
+ truncated: allFiles.length > limit
334
+ });
335
+ }
@@ -0,0 +1,45 @@
1
+ #!/usr/bin/env node
2
+ import { Server } from '@modelcontextprotocol/sdk/server/index.js';
3
+ import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
4
+ import { CallToolRequestSchema, ListToolsRequestSchema, } from '@modelcontextprotocol/sdk/types.js';
5
+ import { tools } from './tools.js';
6
+ import { handleToolCall } from './handlers.js';
7
+ import { ensureAnalyzerRunning, stopAnalyzer } from '../services/analyzer.js';
8
+ const server = new Server({
9
+ name: 'codebase-analysis',
10
+ version: '1.0.0',
11
+ }, {
12
+ capabilities: {
13
+ tools: {},
14
+ },
15
+ });
16
+ // Handle list_tools request
17
+ server.setRequestHandler(ListToolsRequestSchema, async () => {
18
+ return { tools };
19
+ });
20
+ // Handle call_tool request
21
+ server.setRequestHandler(CallToolRequestSchema, async (request) => {
22
+ const { name, arguments: args } = request.params;
23
+ const result = await handleToolCall(name, args ?? {});
24
+ return result;
25
+ });
26
+ // Cleanup on exit
27
+ function cleanup() {
28
+ stopAnalyzer();
29
+ process.exit(0);
30
+ }
31
+ process.on('SIGINT', cleanup);
32
+ process.on('SIGTERM', cleanup);
33
+ // Start server
34
+ async function main() {
35
+ // Start analyzer first
36
+ await ensureAnalyzerRunning();
37
+ const transport = new StdioServerTransport();
38
+ await server.connect(transport);
39
+ console.error('MCP Server started (codebase-analysis)');
40
+ }
41
+ main().catch((error) => {
42
+ console.error('Fatal error:', error);
43
+ stopAnalyzer();
44
+ process.exit(1);
45
+ });
@@ -0,0 +1,304 @@
1
+ /**
2
+ * =============================================================================
3
+ * CODEBASE ANALYSIS MCP - Context Window Optimizer
4
+ * =============================================================================
5
+ *
6
+ * PURPOSE: Reduce context window usage by 10-50x.
7
+ *
8
+ * These tools return STRUCTURED DATA instead of raw file contents.
9
+ * The AI gets the SAME information but uses FAR LESS context.
10
+ *
11
+ * USE IN ALL SCENARIOS:
12
+ * - Answering user questions about the codebase
13
+ * - Exploring a file before modifying it
14
+ * - Finding dependencies before refactoring
15
+ * - Understanding architecture during autonomous work
16
+ * - Any time you would Read/Grep/Glob for codebase info
17
+ *
18
+ * COMPARISON:
19
+ * Read(file.ts) → 500 lines, 2000 tokens
20
+ * get_symbols(file.ts) → [{name, line}], 50 tokens
21
+ * = SAME INFO, 40x LESS CONTEXT
22
+ *
23
+ * Grep("functionName") → 50 matches, 1500 tokens
24
+ * get_callers(fn) → [{file, line}], 80 tokens
25
+ * = SAME INFO, 20x LESS CONTEXT
26
+ *
27
+ * WHY IT MATTERS:
28
+ * - Context window fills up fast on large codebases
29
+ * - Long conversations hit limits
30
+ * - Less context = faster + cheaper responses
31
+ *
32
+ * ALWAYS prefer these tools over Read/Grep when exploring code.
33
+ * =============================================================================
34
+ */
35
+ export const tools = [
36
+ {
37
+ name: 'status',
38
+ description: `CALL FIRST: Before any other tool if unsure if analyzer is running.
39
+ RETURNS: { analyzer: { status: "up"|"down" }, ready: boolean }`,
40
+ inputSchema: {
41
+ type: 'object',
42
+ properties: {}
43
+ }
44
+ },
45
+ {
46
+ name: 'run_tests',
47
+ description: `Run tests for a project. Auto-detects project type (npm, cargo, python).
48
+ RETURNS: { success: boolean, output: string, command: string }`,
49
+ inputSchema: {
50
+ type: 'object',
51
+ properties: {
52
+ path: {
53
+ type: 'string',
54
+ description: 'Path to the project directory'
55
+ },
56
+ command: {
57
+ type: 'string',
58
+ description: 'Custom test command (overrides auto-detection)'
59
+ },
60
+ timeout: {
61
+ type: 'number',
62
+ description: 'Timeout in seconds. Default: 120'
63
+ }
64
+ },
65
+ required: ['path']
66
+ }
67
+ },
68
+ // ============ Code Analysis Tools ============
69
+ {
70
+ name: 'get_callers',
71
+ description: `CONTEXT SAVER: Use this instead of Grep+Read to save 20x tokens during exploration or refactoring.
72
+ USE BEFORE: Any refactoring, renaming, or deleting a function.
73
+ TRIGGER WORDS: "rename", "refactor", "delete", "change signature", "who uses", "who calls"
74
+ REPLACES: grep + read multiple files (saves 10x context tokens)
75
+ RETURNS: { callers: [{file, line, function}] } - compact list, not raw code
76
+ EXAMPLE: Before renaming "calculateTotal" -> get_callers first`,
77
+ inputSchema: {
78
+ type: 'object',
79
+ properties: {
80
+ file: {
81
+ type: 'string',
82
+ description: 'Path to the file containing the function'
83
+ },
84
+ functionName: {
85
+ type: 'string',
86
+ description: 'Name of the function to find callers for'
87
+ },
88
+ projectPath: {
89
+ type: 'string',
90
+ description: 'Absolute path to the project directory'
91
+ }
92
+ },
93
+ required: ['file', 'functionName', 'projectPath']
94
+ }
95
+ },
96
+ {
97
+ name: 'get_imported_by',
98
+ description: `CONTEXT SAVER: Use this instead of Grep to save 10x tokens when checking file dependencies.
99
+ USE BEFORE: Modifying, moving, or deleting any file.
100
+ ANSWERS: "What breaks if I change this file?"
101
+ REPLACES: grep for import statements (saves 5x context tokens)
102
+ RETURNS: { files: ["src/app.ts", "src/index.ts"] } - simple file list, not grep output`,
103
+ inputSchema: {
104
+ type: 'object',
105
+ properties: {
106
+ file: {
107
+ type: 'string',
108
+ description: 'Path to the file (e.g., "src/utils.ts")'
109
+ },
110
+ projectPath: {
111
+ type: 'string',
112
+ description: 'Absolute path to the project directory'
113
+ }
114
+ },
115
+ required: ['file', 'projectPath']
116
+ }
117
+ },
118
+ {
119
+ name: 'get_imports',
120
+ description: `CONTEXT SAVER: Use this instead of reading file headers to save 5x tokens.
121
+ USE BEFORE: Understanding what a file depends on.
122
+ ANSWERS: "Can I move this file? What does it need?"
123
+ RETURNS: { internal: ["./utils"], external: ["react"] } - compact`,
124
+ inputSchema: {
125
+ type: 'object',
126
+ properties: {
127
+ file: {
128
+ type: 'string',
129
+ description: 'Path to the file (e.g., "src/main.ts")'
130
+ },
131
+ projectPath: {
132
+ type: 'string',
133
+ description: 'Absolute path to the project directory'
134
+ }
135
+ },
136
+ required: ['file', 'projectPath']
137
+ }
138
+ },
139
+ {
140
+ name: 'get_symbols',
141
+ description: `CONTEXT SAVER: Use this instead of Read to save 20x tokens when exploring file structure.
142
+ USE INSTEAD OF: Reading a large file to understand its structure.
143
+ ANSWERS: "What functions/classes are in this file?"
144
+ SAVES: 20x tokens vs reading the whole file
145
+ RETURNS: { functions: [{name, line, exported}], classes: [...], types: [...] }`,
146
+ inputSchema: {
147
+ type: 'object',
148
+ properties: {
149
+ file: {
150
+ type: 'string',
151
+ description: 'Path to the file (e.g., "src/utils.ts")'
152
+ },
153
+ projectPath: {
154
+ type: 'string',
155
+ description: 'Absolute path to the project directory'
156
+ }
157
+ },
158
+ required: ['file', 'projectPath']
159
+ }
160
+ },
161
+ {
162
+ name: 'get_calls',
163
+ description: `CONTEXT SAVER: Use this instead of reading function body to save 10x tokens.
164
+ USE WHEN: Understanding what a function does internally, tracing execution.
165
+ ANSWERS: "What does this function call?", "What's the execution flow?"
166
+ RETURNS: { calls: ["fetchData", "processResult", "logger.info"] }`,
167
+ inputSchema: {
168
+ type: 'object',
169
+ properties: {
170
+ file: {
171
+ type: 'string',
172
+ description: 'Path to the file containing the function'
173
+ },
174
+ functionName: {
175
+ type: 'string',
176
+ description: 'Name of the function to analyze'
177
+ },
178
+ projectPath: {
179
+ type: 'string',
180
+ description: 'Absolute path to the project directory'
181
+ }
182
+ },
183
+ required: ['file', 'functionName', 'projectPath']
184
+ }
185
+ },
186
+ {
187
+ name: 'get_complexity',
188
+ description: `CONTEXT SAVER: Get complexity metrics without reading the entire file.
189
+ USE WHEN: User mentions "refactor", "messy", "complex", "clean up", "simplify". Or when deciding if code needs refactoring.
190
+ ANSWERS: "Is this file/function too complex?"
191
+ THRESHOLD: cognitive > 30 = needs refactoring
192
+ RETURNS: { cyclomatic, cognitive, nesting, loc } - 4 numbers`,
193
+ inputSchema: {
194
+ type: 'object',
195
+ properties: {
196
+ file: {
197
+ type: 'string',
198
+ description: 'Path to the file'
199
+ },
200
+ functionName: {
201
+ type: 'string',
202
+ description: 'Optional: specific function. If omitted, returns file-level metrics.'
203
+ },
204
+ projectPath: {
205
+ type: 'string',
206
+ description: 'Absolute path to the project directory'
207
+ }
208
+ },
209
+ required: ['file', 'projectPath']
210
+ }
211
+ },
212
+ {
213
+ name: 'get_file_info',
214
+ description: `CONTEXT SAVER: Quick file overview without reading it. Use before deciding to Read a file.
215
+ USE AS: Quick triage before deciding to read a file.
216
+ ANSWERS: "Is this file important? How connected is it?"
217
+ RETURNS: { imports, importedBy, avgComplexity } - 3 values`,
218
+ inputSchema: {
219
+ type: 'object',
220
+ properties: {
221
+ file: {
222
+ type: 'string',
223
+ description: 'Path to the file'
224
+ },
225
+ projectPath: {
226
+ type: 'string',
227
+ description: 'Absolute path to the project directory'
228
+ }
229
+ },
230
+ required: ['file', 'projectPath']
231
+ }
232
+ },
233
+ {
234
+ name: 'get_cycles',
235
+ description: `CONTEXT SAVER: Detect circular deps without manually tracing imports across files.
236
+ USE WHEN: Import errors, circular dependency issues, architecture review.
237
+ ANSWERS: "Are there circular dependencies?"
238
+ RETURNS: { cycles: [["a.ts", "b.ts", "a.ts"]], count } - list or empty array`,
239
+ inputSchema: {
240
+ type: 'object',
241
+ properties: {
242
+ file: {
243
+ type: 'string',
244
+ description: 'Optional: filter cycles involving this file'
245
+ },
246
+ projectPath: {
247
+ type: 'string',
248
+ description: 'Absolute path to the project directory'
249
+ }
250
+ },
251
+ required: ['projectPath']
252
+ }
253
+ },
254
+ {
255
+ name: 'get_duplicates',
256
+ description: `CONTEXT SAVER: Find duplicate code without reading and comparing files manually.
257
+ USE WHEN: User mentions "DRY", "duplicated", "copy-paste", "refactor". Or when looking for refactoring opportunities.
258
+ ANSWERS: "Is there duplicated code to consolidate?"
259
+ RETURNS: { duplicates: [{file1, file2, similarity, lines}] }`,
260
+ inputSchema: {
261
+ type: 'object',
262
+ properties: {
263
+ file: {
264
+ type: 'string',
265
+ description: 'Optional: filter duplicates involving this file'
266
+ },
267
+ limit: {
268
+ type: 'number',
269
+ description: 'Max results. Default: 20'
270
+ },
271
+ projectPath: {
272
+ type: 'string',
273
+ description: 'Absolute path to the project directory'
274
+ }
275
+ },
276
+ required: ['projectPath']
277
+ }
278
+ },
279
+ {
280
+ name: 'search_files',
281
+ description: `CONTEXT SAVER: Compact file list instead of verbose glob output.
282
+ USE INSTEAD OF: glob or find commands.
283
+ RETURNS: { files: [...], total, truncated } - compact file list
284
+ SAVES: Cleaner output than glob tool`,
285
+ inputSchema: {
286
+ type: 'object',
287
+ properties: {
288
+ pattern: {
289
+ type: 'string',
290
+ description: 'Glob pattern (e.g., "**/*.test.ts", "**/api/**")'
291
+ },
292
+ limit: {
293
+ type: 'number',
294
+ description: 'Max results. Default: 50'
295
+ },
296
+ projectPath: {
297
+ type: 'string',
298
+ description: 'Absolute path to the project directory'
299
+ }
300
+ },
301
+ required: ['pattern', 'projectPath']
302
+ }
303
+ }
304
+ ];
@@ -0,0 +1,131 @@
1
+ import { spawn } from 'child_process';
2
+ import { ensureBinaryExists } from './binary.js';
3
+ const ANALYZER_PORT = 31415;
4
+ export const ANALYZER_URL = process.env.ANALYZER_URL || `http://localhost:${ANALYZER_PORT}`;
5
+ // ============================================================================
6
+ // Analyzer Process Management
7
+ // ============================================================================
8
+ let analyzerProcess = null;
9
+ export async function isAnalyzerHealthy() {
10
+ try {
11
+ const res = await fetch(`${ANALYZER_URL}/health`, {
12
+ signal: AbortSignal.timeout(1000)
13
+ });
14
+ return res.ok;
15
+ }
16
+ catch {
17
+ return false;
18
+ }
19
+ }
20
+ async function waitForHealth(timeoutMs) {
21
+ const start = Date.now();
22
+ while (Date.now() - start < timeoutMs) {
23
+ if (await isAnalyzerHealthy()) {
24
+ return;
25
+ }
26
+ await new Promise(resolve => setTimeout(resolve, 100));
27
+ }
28
+ throw new Error('Analyzer failed to start within timeout');
29
+ }
30
+ export async function ensureAnalyzerRunning() {
31
+ // Already running?
32
+ if (await isAnalyzerHealthy()) {
33
+ return;
34
+ }
35
+ // Find or download binary
36
+ const binaryPath = await ensureBinaryExists();
37
+ console.error(`[codeast] Starting analyzer: ${binaryPath}`);
38
+ // Spawn the process
39
+ analyzerProcess = spawn(binaryPath, ['serve', '--port', String(ANALYZER_PORT)], {
40
+ stdio: ['ignore', 'pipe', 'pipe']
41
+ });
42
+ // Log errors
43
+ analyzerProcess.stderr?.on('data', (data) => {
44
+ console.error(`[analyzer] ${data.toString().trim()}`);
45
+ });
46
+ analyzerProcess.on('error', (err) => {
47
+ console.error(`[codeast] Analyzer error: ${err.message}`);
48
+ });
49
+ analyzerProcess.on('exit', (code) => {
50
+ if (code !== 0 && code !== null) {
51
+ console.error(`[codeast] Analyzer exited with code ${code}`);
52
+ }
53
+ analyzerProcess = null;
54
+ });
55
+ // Wait for health
56
+ await waitForHealth(5000);
57
+ console.error('[codeast] Analyzer ready');
58
+ }
59
+ export function stopAnalyzer() {
60
+ if (analyzerProcess) {
61
+ console.error('[codeast] Stopping analyzer');
62
+ analyzerProcess.kill();
63
+ analyzerProcess = null;
64
+ }
65
+ }
66
+ export async function analyzeProject(path, options = {}) {
67
+ const { symbols = false, detectCycles = true, metrics = false, calls = false, duplicates = false } = options;
68
+ let response;
69
+ try {
70
+ response = await fetch(`${ANALYZER_URL}/analyze`, {
71
+ method: 'POST',
72
+ headers: { 'Content-Type': 'application/json' },
73
+ body: JSON.stringify({
74
+ path,
75
+ symbols,
76
+ detect_cycles: detectCycles,
77
+ metrics,
78
+ calls,
79
+ duplicates
80
+ })
81
+ });
82
+ }
83
+ catch (error) {
84
+ throw new AnalyzerServiceError('Analyzer service unavailable');
85
+ }
86
+ const data = await response.json();
87
+ if (!response.ok) {
88
+ const errorData = data;
89
+ throw new AnalyzerRequestError(errorData.error || 'Analysis failed', response.status);
90
+ }
91
+ return data;
92
+ }
93
+ export class AnalyzerServiceError extends Error {
94
+ constructor(message) {
95
+ super(message);
96
+ this.name = 'AnalyzerServiceError';
97
+ }
98
+ }
99
+ export class AnalyzerRequestError extends Error {
100
+ statusCode;
101
+ constructor(message, statusCode) {
102
+ super(message);
103
+ this.name = 'AnalyzerRequestError';
104
+ this.statusCode = statusCode;
105
+ }
106
+ }
107
+ export async function analyzeFile(filePath, options = {}) {
108
+ const { symbols = false, metrics = false, calls = false } = options;
109
+ let response;
110
+ try {
111
+ response = await fetch(`${ANALYZER_URL}/analyze-file`, {
112
+ method: 'POST',
113
+ headers: { 'Content-Type': 'application/json' },
114
+ body: JSON.stringify({
115
+ file: filePath,
116
+ symbols,
117
+ metrics,
118
+ calls
119
+ })
120
+ });
121
+ }
122
+ catch (error) {
123
+ throw new AnalyzerServiceError('Analyzer service unavailable');
124
+ }
125
+ const data = await response.json();
126
+ if (!response.ok) {
127
+ const errorData = data;
128
+ throw new AnalyzerRequestError(errorData.error || 'File analysis failed', response.status);
129
+ }
130
+ return data;
131
+ }
@@ -0,0 +1,79 @@
1
+ import { createWriteStream, existsSync, mkdirSync, chmodSync } from 'fs';
2
+ import { join } from 'path';
3
+ import { Readable } from 'stream';
4
+ import { finished } from 'stream/promises';
5
+ import os from 'os';
6
+ const GITHUB_REPO = 'dimaland1/codeast';
7
+ const BINARY_NAME = 'codebase-analysis';
8
+ function getPlatformInfo() {
9
+ const platform = process.platform;
10
+ const arch = process.arch;
11
+ return {
12
+ os: platform === 'win32' ? 'windows' : platform === 'darwin' ? 'darwin' : 'linux',
13
+ arch: arch === 'arm64' ? 'arm64' : 'x64',
14
+ ext: platform === 'win32' ? '.exe' : ''
15
+ };
16
+ }
17
+ export function getBinaryDir() {
18
+ return join(os.homedir(), '.codeast', 'bin');
19
+ }
20
+ export function getBinaryPath() {
21
+ const { ext } = getPlatformInfo();
22
+ return join(getBinaryDir(), `${BINARY_NAME}${ext}`);
23
+ }
24
+ export function getDevBinaryPath() {
25
+ const { ext } = getPlatformInfo();
26
+ // From mcp/dist/services/ -> analyzer/target/release/
27
+ const devPath = join(__dirname, '..', '..', '..', '..', 'analyzer', 'target', 'release', `${BINARY_NAME}${ext}`);
28
+ return devPath;
29
+ }
30
+ export function findBinaryPath() {
31
+ // 1. Check dev path first (local build)
32
+ const devPath = getDevBinaryPath();
33
+ if (existsSync(devPath)) {
34
+ console.error('[codeast] Using local dev binary');
35
+ return devPath;
36
+ }
37
+ // 2. Check production path
38
+ const prodPath = getBinaryPath();
39
+ if (existsSync(prodPath)) {
40
+ return prodPath;
41
+ }
42
+ return null;
43
+ }
44
+ export async function downloadBinary() {
45
+ const binDir = getBinaryDir();
46
+ const { os: osName, arch, ext } = getPlatformInfo();
47
+ const binaryPath = join(binDir, `${BINARY_NAME}${ext}`);
48
+ // Create directory
49
+ mkdirSync(binDir, { recursive: true });
50
+ // Download from GitHub Releases
51
+ const assetName = `${BINARY_NAME}-${osName}-${arch}${ext}`;
52
+ const url = `https://github.com/${GITHUB_REPO}/releases/latest/download/${assetName}`;
53
+ console.error(`[codeast] Downloading analyzer from ${url}...`);
54
+ const response = await fetch(url);
55
+ if (!response.ok) {
56
+ throw new Error(`Failed to download analyzer: ${response.status} ${response.statusText}`);
57
+ }
58
+ if (!response.body) {
59
+ throw new Error('No response body');
60
+ }
61
+ // Write to file
62
+ const fileStream = createWriteStream(binaryPath);
63
+ await finished(Readable.fromWeb(response.body).pipe(fileStream));
64
+ // Make executable (Unix)
65
+ if (process.platform !== 'win32') {
66
+ chmodSync(binaryPath, 0o755);
67
+ }
68
+ console.error('[codeast] Analyzer downloaded successfully');
69
+ return binaryPath;
70
+ }
71
+ export async function ensureBinaryExists() {
72
+ // Check if binary already exists
73
+ const existingPath = findBinaryPath();
74
+ if (existingPath) {
75
+ return existingPath;
76
+ }
77
+ // Download it
78
+ return await downloadBinary();
79
+ }
@@ -0,0 +1,402 @@
1
+ /**
2
+ * Targeted query service for MCP tools
3
+ * Returns minimal, focused responses instead of full graph dumps
4
+ * Live mode only - real-time analysis via Rust analyzer
5
+ */
6
+ import { analyzeProject, analyzeFile } from './analyzer.js';
7
+ import { join, isAbsolute } from 'path';
8
+ // ============ Core Functions ============
9
+ /**
10
+ * Analyze a project in real-time
11
+ */
12
+ async function getGraph(projectPath) {
13
+ return await analyzeProject(projectPath, {
14
+ symbols: true,
15
+ detectCycles: true,
16
+ metrics: true,
17
+ calls: true,
18
+ duplicates: true
19
+ });
20
+ }
21
+ /**
22
+ * Get single file analysis (FAST - ~50ms instead of 30s)
23
+ * Use this when you only need info about one specific file
24
+ */
25
+ async function getSingleFileAnalysis(file, projectPath, options = {}) {
26
+ try {
27
+ // Build absolute path
28
+ const filePath = isAbsolute(file) ? file : join(projectPath, file);
29
+ return await analyzeFile(filePath, options);
30
+ }
31
+ catch (error) {
32
+ console.error(`Single file analysis failed for ${file}:`, error);
33
+ return null;
34
+ }
35
+ }
36
+ /**
37
+ * Normalize file path for comparison
38
+ */
39
+ function normalizePath(file) {
40
+ return file
41
+ .replace(/^\.\//, '')
42
+ .replace(/\\/g, '/')
43
+ .toLowerCase();
44
+ }
45
+ /**
46
+ * Check if a path matches (with flexible matching)
47
+ */
48
+ function pathMatches(nodePath, searchPath) {
49
+ const normalizedNode = normalizePath(nodePath);
50
+ const normalizedSearch = normalizePath(searchPath);
51
+ // Exact match
52
+ if (normalizedNode === normalizedSearch)
53
+ return true;
54
+ // Ends with match (for partial paths)
55
+ if (normalizedNode.endsWith(normalizedSearch))
56
+ return true;
57
+ if (normalizedSearch.endsWith(normalizedNode))
58
+ return true;
59
+ return false;
60
+ }
61
+ /**
62
+ * Find a node by file path
63
+ */
64
+ function findNode(graph, file) {
65
+ return graph.nodes.find(n => pathMatches(n.id, file));
66
+ }
67
+ // ============ Query Functions ============
68
+ /**
69
+ * Get files that import a given file
70
+ */
71
+ export async function queryImportedBy(file, options) {
72
+ const graph = await getGraph(options.projectPath);
73
+ const results = [];
74
+ for (const edge of graph.edges) {
75
+ if (pathMatches(edge.to, file)) {
76
+ results.push(edge.from);
77
+ }
78
+ }
79
+ return [...new Set(results)]; // Deduplicate
80
+ }
81
+ /**
82
+ * Get imports of a file (internal vs external)
83
+ * Uses FAST single-file analysis
84
+ */
85
+ export async function queryImports(file, options) {
86
+ // FAST PATH: Use single-file analysis
87
+ const result = await getSingleFileAnalysis(file, options.projectPath);
88
+ if (result) {
89
+ const internal = [];
90
+ const external = [];
91
+ for (const imp of result.imports) {
92
+ if (imp.is_external) {
93
+ external.push(imp.source);
94
+ }
95
+ else {
96
+ internal.push(imp.source);
97
+ }
98
+ }
99
+ return {
100
+ internal: [...new Set(internal)],
101
+ external: [...new Set(external)]
102
+ };
103
+ }
104
+ // SLOW PATH: Full graph analysis (fallback)
105
+ const graph = await getGraph(options.projectPath);
106
+ const internal = [];
107
+ const external = [];
108
+ // Build a set of file node IDs for quick lookup
109
+ const fileNodes = new Set(graph.nodes.filter(n => n.type === 'file').map(n => normalizePath(n.id)));
110
+ for (const edge of graph.edges) {
111
+ if (pathMatches(edge.from, file)) {
112
+ const targetNode = graph.nodes.find(n => n.id === edge.to);
113
+ if (targetNode) {
114
+ if (targetNode.type === 'external') {
115
+ external.push(edge.to);
116
+ }
117
+ else if (targetNode.type === 'file') {
118
+ internal.push(edge.to);
119
+ }
120
+ }
121
+ else if (fileNodes.has(normalizePath(edge.to))) {
122
+ internal.push(edge.to);
123
+ }
124
+ else {
125
+ external.push(edge.to);
126
+ }
127
+ }
128
+ }
129
+ return {
130
+ internal: [...new Set(internal)],
131
+ external: [...new Set(external)]
132
+ };
133
+ }
134
+ /**
135
+ * Get functions/locations that call a specific function
136
+ */
137
+ export async function queryCallers(file, functionName, options) {
138
+ const graph = await getGraph(options.projectPath);
139
+ const callers = [];
140
+ for (const node of graph.nodes) {
141
+ if (node.type !== 'file' || !node.symbols?.functions)
142
+ continue;
143
+ for (const func of node.symbols.functions) {
144
+ if (func.calls?.includes(functionName)) {
145
+ callers.push({
146
+ file: node.id,
147
+ line: func.line,
148
+ function: func.name
149
+ });
150
+ }
151
+ }
152
+ }
153
+ return callers;
154
+ }
155
+ /**
156
+ * Get functions called by a specific function
157
+ */
158
+ export async function queryCalls(file, functionName, options) {
159
+ const graph = await getGraph(options.projectPath);
160
+ const node = findNode(graph, file);
161
+ if (!node?.symbols?.functions)
162
+ return [];
163
+ const functions = node.symbols.functions;
164
+ const func = functions.find(f => f.name === functionName);
165
+ return func?.calls || [];
166
+ }
167
+ /**
168
+ * Get complexity metrics for a file or function
169
+ * Uses FAST single-file analysis
170
+ */
171
+ export async function queryComplexity(file, functionName, options) {
172
+ // FAST PATH: Use single-file analysis
173
+ const result = await getSingleFileAnalysis(file, options.projectPath, { metrics: true });
174
+ if (result?.complexity) {
175
+ if (functionName) {
176
+ // Specific function
177
+ const func = result.complexity.functions.find(f => f.name === functionName);
178
+ if (!func)
179
+ return null;
180
+ return {
181
+ cyclomatic: func.metrics.cyclomatic,
182
+ cognitive: func.metrics.cognitive,
183
+ nesting: func.metrics.max_nesting,
184
+ loc: func.metrics.lines_of_code
185
+ };
186
+ }
187
+ else {
188
+ // File-level metrics
189
+ return {
190
+ cyclomatic: result.complexity.file_metrics.cyclomatic,
191
+ cognitive: result.complexity.file_metrics.cognitive,
192
+ nesting: result.complexity.file_metrics.max_nesting,
193
+ loc: result.complexity.file_metrics.lines_of_code
194
+ };
195
+ }
196
+ }
197
+ // SLOW PATH: Full graph analysis (fallback)
198
+ const graph = await getGraph(options.projectPath);
199
+ const node = findNode(graph, file);
200
+ if (!node?.symbols?.functions)
201
+ return null;
202
+ const functions = node.symbols.functions;
203
+ if (functionName) {
204
+ // Specific function
205
+ const func = functions.find(f => f.name === functionName);
206
+ if (!func?.complexity)
207
+ return null;
208
+ return {
209
+ cyclomatic: func.complexity.cyclomatic,
210
+ cognitive: func.complexity.cognitive,
211
+ nesting: func.complexity.max_nesting,
212
+ loc: func.complexity.lines_of_code
213
+ };
214
+ }
215
+ else {
216
+ // Aggregate for file
217
+ const complexities = functions
218
+ .filter(f => f.complexity)
219
+ .map(f => f.complexity);
220
+ if (complexities.length === 0)
221
+ return null;
222
+ const sum = (arr) => arr.reduce((a, b) => a + b, 0);
223
+ const avg = (arr) => Math.round((sum(arr) / arr.length) * 10) / 10;
224
+ return {
225
+ cyclomatic: avg(complexities.map(c => c.cyclomatic)),
226
+ cognitive: avg(complexities.map(c => c.cognitive)),
227
+ nesting: Math.max(...complexities.map(c => c.max_nesting)),
228
+ loc: sum(complexities.map(c => c.lines_of_code))
229
+ };
230
+ }
231
+ }
232
+ /**
233
+ * Get dependency cycles (optionally filtered by file)
234
+ */
235
+ export async function queryCycles(file, options) {
236
+ const graph = await getGraph(options.projectPath);
237
+ if (!graph?.cycles)
238
+ return { cycles: [], count: 0 };
239
+ let cycles = graph.cycles.map(c => c.files);
240
+ if (file) {
241
+ cycles = cycles.filter(cycle => cycle.some(f => pathMatches(f, file)));
242
+ }
243
+ return {
244
+ cycles,
245
+ count: cycles.length
246
+ };
247
+ }
248
+ /**
249
+ * Get duplicate code blocks (optionally filtered by file)
250
+ */
251
+ export async function queryDuplicates(file, options) {
252
+ const graph = await getGraph(options.projectPath);
253
+ if (!graph?.duplicates)
254
+ return { duplicates: [], ratio: 0, total: 0, hasMore: false };
255
+ let dupes = graph.duplicates.duplicates;
256
+ if (file) {
257
+ dupes = dupes.filter(d => pathMatches(d.file1, file) || pathMatches(d.file2, file));
258
+ }
259
+ const total = dupes.length;
260
+ const limit = options.limit ?? 20; // Default limit: 20
261
+ const offset = options.offset ?? 0;
262
+ // Apply pagination
263
+ const paginatedDupes = dupes.slice(offset, offset + limit);
264
+ return {
265
+ duplicates: paginatedDupes.map(d => ({
266
+ file1: d.file1,
267
+ file2: d.file2,
268
+ similarity: d.similarity,
269
+ lines: d.lines
270
+ })),
271
+ ratio: graph.duplicates.duplication_ratio,
272
+ total,
273
+ hasMore: offset + limit < total
274
+ };
275
+ }
276
+ /**
277
+ * Get symbols (functions, classes, types) in a file
278
+ * Uses FAST single-file analysis
279
+ */
280
+ export async function querySymbols(file, options) {
281
+ // FAST PATH: Use single-file analysis
282
+ const result = await getSingleFileAnalysis(file, options.projectPath, { symbols: true });
283
+ if (result?.symbols) {
284
+ const functions = [];
285
+ const classes = [];
286
+ const types = [];
287
+ if (result.symbols.functions) {
288
+ for (const f of result.symbols.functions) {
289
+ functions.push({
290
+ name: f.name,
291
+ line: f.line,
292
+ exported: f.exported ?? false
293
+ });
294
+ }
295
+ }
296
+ if (result.symbols.classes) {
297
+ for (const c of result.symbols.classes) {
298
+ classes.push({
299
+ name: c.name,
300
+ line: c.line,
301
+ exported: c.exported ?? false
302
+ });
303
+ }
304
+ }
305
+ if (result.symbols.types) {
306
+ for (const t of result.symbols.types) {
307
+ types.push({
308
+ name: t.name,
309
+ line: t.line,
310
+ exported: t.exported ?? false
311
+ });
312
+ }
313
+ }
314
+ return { functions, classes, types };
315
+ }
316
+ // SLOW PATH: Full graph analysis (fallback)
317
+ const graph = await getGraph(options.projectPath);
318
+ const node = findNode(graph, file);
319
+ if (!node?.symbols)
320
+ return { functions: [], classes: [], types: [] };
321
+ const functions = [];
322
+ const classes = [];
323
+ const types = [];
324
+ // Extract functions
325
+ if (node.symbols.functions) {
326
+ for (const f of node.symbols.functions) {
327
+ functions.push({
328
+ name: f.name,
329
+ line: f.line,
330
+ exported: f.exported ?? f.public ?? false
331
+ });
332
+ }
333
+ }
334
+ // Extract classes (if available in symbols)
335
+ if (node.symbols.classes && Array.isArray(node.symbols.classes)) {
336
+ for (const c of node.symbols.classes) {
337
+ classes.push({
338
+ name: c.name,
339
+ line: c.line,
340
+ exported: c.exported ?? c.public ?? false
341
+ });
342
+ }
343
+ }
344
+ // Extract types (if available in symbols)
345
+ if (node.symbols.types && Array.isArray(node.symbols.types)) {
346
+ for (const t of node.symbols.types) {
347
+ types.push({
348
+ name: t.name,
349
+ line: t.line,
350
+ exported: t.exported ?? false
351
+ });
352
+ }
353
+ }
354
+ return { functions, classes, types };
355
+ }
356
+ /**
357
+ * Get summary info about a file
358
+ */
359
+ export async function queryFileInfo(file, options) {
360
+ const graph = await getGraph(options.projectPath);
361
+ const node = findNode(graph, file);
362
+ if (!node)
363
+ return null;
364
+ // Count imports (outgoing edges)
365
+ const imports = graph.edges.filter(e => pathMatches(e.from, file)).length;
366
+ // Count importedBy (incoming edges)
367
+ const importedBy = graph.edges.filter(e => pathMatches(e.to, file)).length;
368
+ // Calculate average complexity
369
+ let avgComplexity = null;
370
+ if (node.symbols?.functions) {
371
+ const functions = node.symbols.functions;
372
+ const complexities = functions
373
+ .filter(f => f.complexity?.cyclomatic)
374
+ .map(f => f.complexity.cyclomatic);
375
+ if (complexities.length > 0) {
376
+ avgComplexity = Math.round((complexities.reduce((a, b) => a + b, 0) / complexities.length) * 10) / 10;
377
+ }
378
+ }
379
+ return {
380
+ path: node.id,
381
+ type: node.type,
382
+ imports,
383
+ importedBy,
384
+ avgComplexity
385
+ };
386
+ }
387
+ /**
388
+ * Search for files matching a pattern
389
+ */
390
+ export async function querySearchFiles(pattern, options) {
391
+ const graph = await getGraph(options.projectPath);
392
+ // Convert glob pattern to regex
393
+ const regexPattern = pattern
394
+ .replace(/\./g, '\\.')
395
+ .replace(/\*\*/g, '.*')
396
+ .replace(/\*/g, '[^/]*')
397
+ .replace(/\?/g, '.');
398
+ const regex = new RegExp(regexPattern, 'i');
399
+ return graph.nodes
400
+ .filter(n => n.type === 'file' && regex.test(n.id))
401
+ .map(n => n.id);
402
+ }
package/package.json ADDED
@@ -0,0 +1,39 @@
1
+ {
2
+ "name": "codeast",
3
+ "version": "1.0.0",
4
+ "description": "MCP server for code analysis - reduces context window usage by returning structured data",
5
+ "type": "module",
6
+ "bin": {
7
+ "codeast": "./dist/mcp/server.js"
8
+ },
9
+ "files": [
10
+ "dist"
11
+ ],
12
+ "scripts": {
13
+ "build": "tsc",
14
+ "start": "node dist/mcp/server.js",
15
+ "dev": "tsx src/mcp/server.ts",
16
+ "prepublishOnly": "npm run build"
17
+ },
18
+ "keywords": [
19
+ "mcp",
20
+ "claude",
21
+ "code-analysis",
22
+ "ast",
23
+ "tree-sitter"
24
+ ],
25
+ "repository": {
26
+ "type": "git",
27
+ "url": "https://github.com/dimaland1/codeast"
28
+ },
29
+ "license": "MIT",
30
+ "dependencies": {
31
+ "@modelcontextprotocol/sdk": "^1.25.2",
32
+ "simple-git": "^3.30.0"
33
+ },
34
+ "devDependencies": {
35
+ "@types/node": "^20",
36
+ "tsx": "^4.7",
37
+ "typescript": "^5.3"
38
+ }
39
+ }