@supermodeltools/mcp-server 0.4.2 → 0.4.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,38 @@
1
+ "use strict";
2
+ /**
3
+ * Query types and result shapes for the query engine
4
+ */
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.isQueryError = isQueryError;
7
+ exports.createError = createError;
8
+ exports.createResponse = createResponse;
9
+ // Type guard for error responses
10
+ function isQueryError(result) {
11
+ return (typeof result === 'object' &&
12
+ result !== null &&
13
+ 'error' in result &&
14
+ typeof result.error === 'object');
15
+ }
16
+ // Create error response
17
+ function createError(code, message, options) {
18
+ return {
19
+ error: {
20
+ code,
21
+ message,
22
+ retryable: options?.retryable,
23
+ detail: options?.detail,
24
+ },
25
+ };
26
+ }
27
+ // Create success response
28
+ function createResponse(query, cacheKey, source, cachedAt, result, options) {
29
+ return {
30
+ query,
31
+ cacheKey,
32
+ source,
33
+ cachedAt,
34
+ result,
35
+ page: options?.page,
36
+ warnings: options?.warnings,
37
+ };
38
+ }
package/dist/server.js CHANGED
@@ -9,6 +9,7 @@ const stdio_js_1 = require("@modelcontextprotocol/sdk/server/stdio.js");
9
9
  const sdk_1 = require("@supermodeltools/sdk");
10
10
  const create_supermodel_graph_1 = __importDefault(require("./tools/create-supermodel-graph"));
11
11
  const types_js_1 = require("@modelcontextprotocol/sdk/types.js");
12
+ const zip_repository_1 = require("./utils/zip-repository");
12
13
  class Server {
13
14
  server;
14
15
  client;
@@ -18,7 +19,40 @@ class Server {
18
19
  version: '0.0.1',
19
20
  }, {
20
21
  capabilities: { tools: {}, logging: {} },
21
- instructions: 'This MCP server provides tools for deep codebase analysis using static analysis and graph generation.\n\n# WHEN TO USE CODEBASE ANALYSIS\n\nProactively use the analyze_codebase tool in these scenarios:\n\n1. EXPLORING NEW CODE: When the user asks about an unfamiliar codebase, analyze it first to understand structure, dependencies, and architecture before answering questions or making changes.\n\n2. PLANNING REFACTORS: Before refactoring code, analyze dependency and call graphs to assess impact across the codebase. Identify all affected files and relationships.\n\n3. ASSESSING CHANGE IMPACT: When asked to modify existing functionality, analyze call graphs to understand what depends on the code being changed.\n\n4. UNDERSTANDING ARCHITECTURE: When questions arise about "how does X work" or "where is Y implemented", analyze the codebase to map out the actual structure and relationships.\n\n5. FINDING DEPENDENCIES: When investigating bugs or adding features, analyze dependency graphs to understand module relationships and potential side effects.\n\n6. MAPPING DOMAIN MODELS: When working with complex business logic, analyze domain classifications to understand system boundaries and architectural patterns.\n\n# QUICK START\n\nBasic workflow:\n1. Create repository ZIP: git archive -o /tmp/repo.zip HEAD\n2. Generate cache key: git rev-parse --short HEAD\n3. Call analyze_codebase with file=/tmp/repo.zip, Idempotency-Key=projectname:supermodel:{hash}, and jq_filter to extract needed data\n4. Use the graph data to inform your work\n5. Keep results in context - reuse for multiple queries about the same code state\n\n# TECHNICAL DETAILS\n\nPREPARING REPOSITORY ZIP FILES:\n\nFor Git Repositories (Recommended):\nRun: cd /path/to/your/repo && git archive -o /tmp/repo.zip HEAD\n\nThis method automatically:\n- Respects .gitignore\n- Includes only tracked files\n- Creates cleaner, smaller archives\n- Produces reproducible results\n\nFor Any Directory:\nRun: cd /path/to/your/repo && zip -r /tmp/repo.zip . -x "node_modules/*" -x ".git/*" -x "dist/*" -x "build/*" -x "target/*" -x "*.pyc" -x "__pycache__/*" -x "venv/*" -x ".venv/*" -x "vendor/*" -x ".idea/*" -x ".vscode/*"\n\nINCLUDE: Source code files (.py, .js, .ts, .tsx, .java, .go, .rs, .rb, .kt, .scala, .c, .cpp, .h, .hpp), configuration files (package.json, tsconfig.json, pyproject.toml, Cargo.toml, go.mod, pom.xml), and type definitions (.d.ts, .pyi).\n\nEXCLUDE: Dependencies (node_modules/, vendor/, venv/, .venv/, target/), build outputs (dist/, build/, out/, .next/, __pycache__/), version control (.git/), IDE files (.idea/, .vscode/), and large binaries/images/datasets.\n\nIf ZIP exceeds 50MB: Ensure dependencies are excluded, consider analyzing a subdirectory, or check for accidentally committed binary files.\n\nCACHING STRATEGY:\n\nGraph generation is computationally expensive. NEVER regenerate for the same code state.\n\nIdempotency Keys: Use format {repo_identifier}:{graph_type}:{content_hash}\nExample: myproject:supermodel:abc123def\n\nGenerate content hash:\n- For git repos: git rev-parse --short HEAD\n- For ZIP files: shasum -a 256 /tmp/repo.zip | cut -d\' \' -f1 | head -c 12\n\nREGENERATE when:\n- Source code files changed\n- New files added affecting analysis scope\n- Files deleted from the analyzed set\n- Dependencies changed (affects dependency graph)\n\nDO NOT regenerate when:\n- Only documentation/comments changed\n- Only formatting changed\n- Only non-code files changed\n- Switching between different analysis questions on the same code state\n\nSESSION MANAGEMENT:\n\nWithin a session:\n- Keep graph results in context memory\n- Reference previous results instead of re-calling the API\n- Use jq_filter to extract specific subsets from cached results\n- Store summary statistics for quick reference\n\nAcross sessions:\n- Store the idempotency key used\n- Store a summary of the graph (node counts, key relationships)\n- On resume, check if code state matches before regenerating\n\nOPTIMIZATION:\n\nThe analyze_codebase tool returns comprehensive results including all graph types (dependencies, calls, domain model, AST). Call it ONCE and use jq_filter to extract specific data as needed.\n\nExample filters:\n- \'.summary\' - Get overview statistics\n- \'.graph.nodes[] | select(.type=="function")\' - Extract function nodes\n- \'.graph.relationships[] | select(.type=="calls")\' - Extract call relationships\n- \'.graph.nodes[] | select(.file | contains("auth"))\' - Find nodes in auth-related files\n\nTrack: idempotency key, commit/ZIP hash, generation timestamp, summary stats (file count, node count, relationship count).\n\nCOMPLETE EXAMPLE:\n\n# Initial analysis\n$ cd /path/to/project\n$ git archive -o /tmp/project.zip HEAD\n$ HASH=$(git rev-parse --short HEAD)\n# Call analyze_codebase with:\n# file: /tmp/project.zip\n# Idempotency-Key: myproject:supermodel:${HASH}\n# jq_filter: \'.summary\'\n\n# Later queries on same code\n# Reuse cached results or filter differently:\n# jq_filter: \'.graph.nodes[] | select(.type=="class")\'',
22
+ instructions: `# Server Instructions: Supermodel Codebase Explorer
23
+
24
+ ## Graph Rules
25
+ - This API produces graphs of the code contained within a target directory.
26
+ - STRATEGY: Before debugging, planning, or analyzing a change to a code repository, generate a code graph. Use it to localize changes and find what files to search more efficiently than grep.
27
+
28
+ ## Debugging Strategy
29
+ 1. Generate a code graph of the given repository or a subset.
30
+ 2. Analyze the nodes and relationships which appear to be related to your issue.
31
+ 3. Analyze the broader context of these nodes in relationships within their domain and subdomain.
32
+ 4. Use the graph like a diagram to navigate the codebase more efficiently than raw grep and to analyze the potential blast radius of any change.
33
+
34
+ ## Planning Strategy
35
+ 1. Generate a code graph of the given repository or a subset.
36
+ 2. Analyze relationships like dependencies, calls, and inheritance to identify the potential blast radius of a proposed change.
37
+ 3. Examine other elements of the same Domain and Subdomain to look for patterns including best practices or anti-patterns.
38
+ 4. Look at the nodes you plan to change and find their physical locations, allowing you to analyze more efficiently than blind grepping.
39
+
40
+ ## Analysis Strategy
41
+ 1. Generate a code graph of the given repository or a subset.
42
+ 2. Analyze the system domains to understand the high-level system architecture.
43
+ 3. Examine leaf nodes to see the structure of the broader tree.
44
+ 4. Use the graph like a map to navigate the codebase more efficiently than blind grepping.
45
+
46
+ ## Performance Optimization
47
+
48
+ For localized bugs:
49
+ 1. Identify the affected subsystem from the issue description
50
+ 2. Analyze only that subdirectory (e.g., \`django/db\` instead of full repo)
51
+ 3. This is faster, uses less memory, and avoids ZIP size limits
52
+
53
+ Example:
54
+ - Full repo: directory="/repo" → 180MB, 50k nodes
55
+ - Subsystem: directory="/repo/django/db" → 15MB, 3k nodes`,
22
56
  });
23
57
  const config = new sdk_1.Configuration({
24
58
  basePath: process.env.SUPERMODEL_BASE_URL || 'https://api.supermodeltools.com',
@@ -47,6 +81,9 @@ class Server {
47
81
  });
48
82
  }
49
83
  async start() {
84
+ // Clean up any stale ZIP files from previous sessions
85
+ // (older than 24 hours)
86
+ await (0, zip_repository_1.cleanupOldZips)(24 * 60 * 60 * 1000);
50
87
  const transport = new stdio_js_1.StdioServerTransport();
51
88
  await this.server.connect(transport);
52
89
  console.error('Supermodel MCP Server running on stdio');