code-graph-context 2.2.0 → 2.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +119 -64
- package/dist/cli/cli.js +266 -0
- package/dist/cli/neo4j-docker.js +159 -0
- package/dist/mcp/constants.js +120 -0
- package/dist/mcp/handlers/incremental-parse.handler.js +19 -0
- package/dist/mcp/mcp.server.js +76 -1
- package/dist/mcp/service-init.js +53 -0
- package/dist/mcp/services/watch-manager.js +57 -7
- package/dist/mcp/tools/hello.tool.js +16 -2
- package/dist/mcp/tools/index.js +33 -0
- package/dist/mcp/tools/swarm-cleanup.tool.js +157 -0
- package/dist/mcp/tools/swarm-constants.js +35 -0
- package/dist/mcp/tools/swarm-pheromone.tool.js +196 -0
- package/dist/mcp/tools/swarm-sense.tool.js +212 -0
- package/dist/storage/neo4j/neo4j.service.js +8 -4
- package/package.json +2 -2
package/dist/mcp/constants.js
CHANGED
|
@@ -31,6 +31,9 @@ export const TOOL_NAMES = {
|
|
|
31
31
|
listWatchers: 'list_watchers',
|
|
32
32
|
detectDeadCode: 'detect_dead_code',
|
|
33
33
|
detectDuplicateCode: 'detect_duplicate_code',
|
|
34
|
+
swarmPheromone: 'swarm_pheromone',
|
|
35
|
+
swarmSense: 'swarm_sense',
|
|
36
|
+
swarmCleanup: 'swarm_cleanup',
|
|
34
37
|
};
|
|
35
38
|
// Tool Metadata
|
|
36
39
|
export const TOOL_METADATA = {
|
|
@@ -354,6 +357,123 @@ Parameters:
|
|
|
354
357
|
|
|
355
358
|
Use this to identify refactoring opportunities and reduce code duplication.`,
|
|
356
359
|
},
|
|
360
|
+
[TOOL_NAMES.swarmPheromone]: {
|
|
361
|
+
title: 'Swarm Pheromone',
|
|
362
|
+
description: `Leave a pheromone marker on a code node for stigmergic coordination between agents.
|
|
363
|
+
|
|
364
|
+
**What is Stigmergy?**
|
|
365
|
+
Agents coordinate indirectly by leaving markers (pheromones) on code nodes. Other agents sense these markers and adapt their behavior. No direct messaging needed.
|
|
366
|
+
|
|
367
|
+
**Pheromone Types:**
|
|
368
|
+
- exploring: "I'm looking at this" (2 min half-life)
|
|
369
|
+
- modifying: "I'm actively working on this" (10 min half-life)
|
|
370
|
+
- claiming: "This is my territory" (1 hour half-life)
|
|
371
|
+
- completed: "I finished work here" (24 hour half-life)
|
|
372
|
+
- warning: "Danger - don't touch" (never decays)
|
|
373
|
+
- blocked: "I'm stuck on this" (5 min half-life)
|
|
374
|
+
- proposal: "Proposed artifact awaiting approval" (1 hour half-life)
|
|
375
|
+
- needs_review: "Someone should check this" (30 min half-life)
|
|
376
|
+
|
|
377
|
+
**Parameters:**
|
|
378
|
+
- nodeId: The code node ID to mark
|
|
379
|
+
- type: Type of pheromone (see above)
|
|
380
|
+
- agentId: Your unique agent identifier
|
|
381
|
+
- swarmId: Swarm ID from orchestrator (for bulk cleanup)
|
|
382
|
+
- intensity: 0.0-1.0, how strong the signal (default: 1.0)
|
|
383
|
+
- data: Optional metadata (summary, reason, etc.)
|
|
384
|
+
- remove: Set true to remove the pheromone
|
|
385
|
+
|
|
386
|
+
**Workflow states** (exploring, claiming, modifying, completed, blocked) are mutually exclusive per agent+node. Setting one automatically removes others.
|
|
387
|
+
|
|
388
|
+
**Usage Pattern:**
|
|
389
|
+
1. Before starting work: swarm_sense to check what's claimed
|
|
390
|
+
2. Claim your target: swarm_pheromone({ nodeId, type: "claiming", agentId, swarmId })
|
|
391
|
+
3. Refresh periodically if working long
|
|
392
|
+
4. Mark complete: swarm_pheromone({ nodeId, type: "completed", agentId, swarmId, data: { summary: "..." } })
|
|
393
|
+
|
|
394
|
+
**Decay:**
|
|
395
|
+
Pheromones automatically fade over time. If an agent dies, its markers decay and work becomes available again.`,
|
|
396
|
+
},
|
|
397
|
+
[TOOL_NAMES.swarmSense]: {
|
|
398
|
+
title: 'Swarm Sense',
|
|
399
|
+
description: `Query pheromones in the code graph to sense what other agents are doing.
|
|
400
|
+
|
|
401
|
+
**What This Does:**
|
|
402
|
+
Returns active pheromones with their current intensity (after decay). Use this to:
|
|
403
|
+
- See what nodes are being worked on
|
|
404
|
+
- Avoid conflicts with other agents
|
|
405
|
+
- Find unclaimed work
|
|
406
|
+
- Check if your dependencies are being modified
|
|
407
|
+
|
|
408
|
+
**Parameters:**
|
|
409
|
+
- swarmId: Filter by swarm ID (see only this swarm's pheromones)
|
|
410
|
+
- types: Filter by pheromone types (e.g., ["modifying", "claiming"])
|
|
411
|
+
- nodeIds: Check specific nodes
|
|
412
|
+
- agentIds: Filter by specific agents
|
|
413
|
+
- excludeAgentId: Exclude your own pheromones (see what OTHERS are doing)
|
|
414
|
+
- minIntensity: Minimum intensity after decay (default: 0.3)
|
|
415
|
+
- limit: Max results (default: 50)
|
|
416
|
+
- includeStats: Get summary statistics by type
|
|
417
|
+
- cleanup: Remove fully decayed pheromones (intensity < 0.01)
|
|
418
|
+
|
|
419
|
+
**Usage Pattern:**
|
|
420
|
+
\`\`\`
|
|
421
|
+
// Before starting work, check what's taken
|
|
422
|
+
swarm_sense({
|
|
423
|
+
types: ["modifying", "claiming"],
|
|
424
|
+
minIntensity: 0.3
|
|
425
|
+
})
|
|
426
|
+
|
|
427
|
+
// Check a specific node before modifying
|
|
428
|
+
swarm_sense({
|
|
429
|
+
nodeIds: ["proj_xxx:Service:UserService"],
|
|
430
|
+
types: ["modifying", "warning"]
|
|
431
|
+
})
|
|
432
|
+
|
|
433
|
+
// See what other agents are doing (exclude self)
|
|
434
|
+
swarm_sense({
|
|
435
|
+
excludeAgentId: "my-agent-id",
|
|
436
|
+
types: ["exploring", "modifying"]
|
|
437
|
+
})
|
|
438
|
+
\`\`\`
|
|
439
|
+
|
|
440
|
+
**Decay:**
|
|
441
|
+
Intensity decreases over time (exponential decay). A pheromone with intensity 0.25 is almost gone. Below minIntensity threshold, it's not returned.`,
|
|
442
|
+
},
|
|
443
|
+
[TOOL_NAMES.swarmCleanup]: {
|
|
444
|
+
title: 'Swarm Cleanup',
|
|
445
|
+
description: `Bulk delete pheromones after a swarm completes.
|
|
446
|
+
|
|
447
|
+
**When to use:**
|
|
448
|
+
Call this when a swarm finishes to clean up all its pheromones. Prevents pollution for future swarms.
|
|
449
|
+
|
|
450
|
+
**Parameters:**
|
|
451
|
+
- projectId: Required - the project
|
|
452
|
+
- swarmId: Delete all pheromones from this swarm
|
|
453
|
+
- agentId: Delete all pheromones from this specific agent
|
|
454
|
+
- all: Set true to delete ALL pheromones in project (use with caution)
|
|
455
|
+
- keepTypes: Pheromone types to preserve (default: ["warning"])
|
|
456
|
+
- dryRun: Preview what would be deleted without deleting
|
|
457
|
+
|
|
458
|
+
**Must specify one of:** swarmId, agentId, or all=true
|
|
459
|
+
|
|
460
|
+
**Examples:**
|
|
461
|
+
\`\`\`
|
|
462
|
+
// Clean up after a swarm completes
|
|
463
|
+
swarm_cleanup({ projectId: "backend", swarmId: "swarm_abc123" })
|
|
464
|
+
|
|
465
|
+
// Preview what would be deleted
|
|
466
|
+
swarm_cleanup({ projectId: "backend", swarmId: "swarm_abc123", dryRun: true })
|
|
467
|
+
|
|
468
|
+
// Clean up a specific agent's pheromones
|
|
469
|
+
swarm_cleanup({ projectId: "backend", agentId: "swarm_abc123_auth" })
|
|
470
|
+
|
|
471
|
+
// Nuclear option: delete all (except warnings)
|
|
472
|
+
swarm_cleanup({ projectId: "backend", all: true })
|
|
473
|
+
\`\`\`
|
|
474
|
+
|
|
475
|
+
**Note:** \`warning\` pheromones are preserved by default. Pass \`keepTypes: []\` to delete everything.`,
|
|
476
|
+
},
|
|
357
477
|
};
|
|
358
478
|
// Default Values
|
|
359
479
|
export const DEFAULTS = {
|
|
@@ -37,6 +37,7 @@ export const performIncrementalParse = async (projectPath, projectId, tsconfigPa
|
|
|
37
37
|
});
|
|
38
38
|
// If no changes, return early
|
|
39
39
|
if (filesToReparse.length === 0 && filesToDelete.length === 0) {
|
|
40
|
+
await debugLog('Incremental parse: no changes, returning early', {});
|
|
40
41
|
return {
|
|
41
42
|
nodesUpdated: 0,
|
|
42
43
|
edgesUpdated: 0,
|
|
@@ -44,24 +45,35 @@ export const performIncrementalParse = async (projectPath, projectId, tsconfigPa
|
|
|
44
45
|
filesDeleted: filesToDelete.length,
|
|
45
46
|
};
|
|
46
47
|
}
|
|
48
|
+
await debugLog('Incremental parse: changes detected, continuing', { filesToReparse: filesToReparse.length });
|
|
47
49
|
let savedCrossFileEdges = [];
|
|
48
50
|
const filesToRemoveFromGraph = [...filesToDelete, ...filesToReparse];
|
|
49
51
|
if (filesToRemoveFromGraph.length > 0) {
|
|
52
|
+
await debugLog('Incremental parse: getting cross-file edges', { count: filesToRemoveFromGraph.length });
|
|
50
53
|
// Save cross-file edges before deletion
|
|
51
54
|
savedCrossFileEdges = await getCrossFileEdges(neo4jService, filesToRemoveFromGraph, resolvedId);
|
|
55
|
+
await debugLog('Incremental parse: got cross-file edges', { savedCount: savedCrossFileEdges.length });
|
|
56
|
+
await debugLog('Incremental parse: deleting old subgraphs', {});
|
|
52
57
|
// Delete old subgraphs
|
|
53
58
|
await deleteSourceFileSubgraphs(neo4jService, filesToRemoveFromGraph, resolvedId);
|
|
59
|
+
await debugLog('Incremental parse: deleted old subgraphs', {});
|
|
54
60
|
}
|
|
55
61
|
let nodesImported = 0;
|
|
56
62
|
let edgesImported = 0;
|
|
57
63
|
if (filesToReparse.length > 0) {
|
|
64
|
+
await debugLog('Incremental parse: loading existing nodes', {});
|
|
58
65
|
// Load existing nodes for edge detection
|
|
59
66
|
const existingNodes = await loadExistingNodesForEdgeDetection(neo4jService, filesToRemoveFromGraph, resolvedId);
|
|
67
|
+
await debugLog('Incremental parse: loaded existing nodes', { count: existingNodes.length });
|
|
60
68
|
parser.setExistingNodes(existingNodes);
|
|
69
|
+
await debugLog('Incremental parse: parsing workspace', { fileCount: filesToReparse.length });
|
|
61
70
|
// Parse only changed files
|
|
62
71
|
await parser.parseWorkspace(filesToReparse);
|
|
72
|
+
await debugLog('Incremental parse: parsed workspace', {});
|
|
63
73
|
// Export graph data
|
|
74
|
+
await debugLog('Incremental parse: exporting to JSON', {});
|
|
64
75
|
const { nodes, edges } = parser.exportToJson();
|
|
76
|
+
await debugLog('Incremental parse: exported to JSON', { nodeCount: nodes.length, edgeCount: edges.length });
|
|
65
77
|
// Get framework schemas if available (use unknown as intermediate to access private property)
|
|
66
78
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
67
79
|
const parserAny = parser;
|
|
@@ -80,26 +92,33 @@ export const performIncrementalParse = async (projectPath, projectId, tsconfigPa
|
|
|
80
92
|
},
|
|
81
93
|
};
|
|
82
94
|
// Write to JSON file (required by GraphGeneratorHandler)
|
|
95
|
+
await debugLog('Incremental parse: writing JSON file', {});
|
|
83
96
|
const outputPath = join(projectPath, FILE_PATHS.graphOutput);
|
|
84
97
|
writeFileSync(outputPath, JSON.stringify(graphData, null, LOG_CONFIG.jsonIndentation));
|
|
98
|
+
await debugLog('Incremental parse: wrote JSON file', { outputPath });
|
|
85
99
|
// Update Project node
|
|
100
|
+
await debugLog('Incremental parse: updating project node', {});
|
|
86
101
|
await neo4jService.run(UPSERT_PROJECT_QUERY, {
|
|
87
102
|
projectId: resolvedId,
|
|
88
103
|
path: projectPath,
|
|
89
104
|
name: projectName,
|
|
90
105
|
status: 'complete',
|
|
91
106
|
});
|
|
107
|
+
await debugLog('Incremental parse: updated project node', {});
|
|
92
108
|
// Import nodes and edges (clearExisting = false for incremental)
|
|
109
|
+
await debugLog('Incremental parse: starting graph import', {});
|
|
93
110
|
graphHandler.setProjectId(resolvedId);
|
|
94
111
|
try {
|
|
95
112
|
const result = await graphHandler.generateGraph(outputPath, DEFAULTS.batchSize, false);
|
|
96
113
|
nodesImported = result.nodesImported;
|
|
97
114
|
edgesImported = result.edgesImported;
|
|
115
|
+
await debugLog('Incremental parse: graph import completed', { nodesImported, edgesImported });
|
|
98
116
|
}
|
|
99
117
|
finally {
|
|
100
118
|
// Clean up temporary graph.json file
|
|
101
119
|
try {
|
|
102
120
|
unlinkSync(outputPath);
|
|
121
|
+
await debugLog('Incremental parse: cleaned up temp file', {});
|
|
103
122
|
}
|
|
104
123
|
catch {
|
|
105
124
|
// Ignore cleanup errors - file may not exist or be inaccessible
|
package/dist/mcp/mcp.server.js
CHANGED
|
@@ -22,11 +22,34 @@ import { initializeServices } from './service-init.js';
|
|
|
22
22
|
import { watchManager } from './services/watch-manager.js';
|
|
23
23
|
import { registerAllTools } from './tools/index.js';
|
|
24
24
|
import { debugLog } from './utils.js';
|
|
25
|
+
// Track server state for debugging
|
|
26
|
+
let serverStartTime;
|
|
27
|
+
let toolCallCount = 0;
|
|
28
|
+
let lastToolCall = null;
|
|
29
|
+
/**
|
|
30
|
+
* Log memory usage and server stats
|
|
31
|
+
*/
|
|
32
|
+
const logServerStats = async (context) => {
|
|
33
|
+
const mem = process.memoryUsage();
|
|
34
|
+
await debugLog(`Server stats [${context}]`, {
|
|
35
|
+
uptime: serverStartTime ? `${Math.round((Date.now() - serverStartTime.getTime()) / 1000)}s` : 'not started',
|
|
36
|
+
toolCallCount,
|
|
37
|
+
lastToolCall,
|
|
38
|
+
memory: {
|
|
39
|
+
heapUsed: `${Math.round(mem.heapUsed / 1024 / 1024)}MB`,
|
|
40
|
+
heapTotal: `${Math.round(mem.heapTotal / 1024 / 1024)}MB`,
|
|
41
|
+
rss: `${Math.round(mem.rss / 1024 / 1024)}MB`,
|
|
42
|
+
},
|
|
43
|
+
pid: process.pid,
|
|
44
|
+
});
|
|
45
|
+
};
|
|
25
46
|
/**
|
|
26
47
|
* Main server initialization and startup
|
|
27
48
|
*/
|
|
28
49
|
const startServer = async () => {
|
|
50
|
+
serverStartTime = new Date();
|
|
29
51
|
console.error(JSON.stringify({ level: 'info', message: MESSAGES.server.starting }));
|
|
52
|
+
await debugLog('Server starting', { pid: process.pid, startTime: serverStartTime.toISOString() });
|
|
30
53
|
// Create MCP server instance
|
|
31
54
|
const server = new McpServer({
|
|
32
55
|
name: MCP_SERVER_CONFIG.name,
|
|
@@ -34,6 +57,7 @@ const startServer = async () => {
|
|
|
34
57
|
});
|
|
35
58
|
// Register all tools
|
|
36
59
|
registerAllTools(server);
|
|
60
|
+
await debugLog('Tools registered', { toolCount: 15 });
|
|
37
61
|
// Configure watch manager with incremental parse handler and MCP server
|
|
38
62
|
watchManager.setIncrementalParseHandler(performIncrementalParse);
|
|
39
63
|
watchManager.setMcpServer(server.server);
|
|
@@ -50,22 +74,46 @@ const startServer = async () => {
|
|
|
50
74
|
});
|
|
51
75
|
// Create and connect transport
|
|
52
76
|
console.error(JSON.stringify({ level: 'info', message: MESSAGES.server.creatingTransport }));
|
|
77
|
+
await debugLog('Creating stdio transport', {});
|
|
53
78
|
const transport = new StdioServerTransport();
|
|
79
|
+
// Add transport event logging
|
|
80
|
+
process.stdin.on('close', async () => {
|
|
81
|
+
await debugLog('STDIN closed - client disconnected', {});
|
|
82
|
+
await logServerStats('stdin-close');
|
|
83
|
+
});
|
|
84
|
+
process.stdin.on('end', async () => {
|
|
85
|
+
await debugLog('STDIN ended', {});
|
|
86
|
+
await logServerStats('stdin-end');
|
|
87
|
+
});
|
|
88
|
+
process.stdin.on('error', async (err) => {
|
|
89
|
+
await debugLog('STDIN error', { error: err.message, stack: err.stack });
|
|
90
|
+
});
|
|
91
|
+
process.stdout.on('close', async () => {
|
|
92
|
+
await debugLog('STDOUT closed', {});
|
|
93
|
+
});
|
|
94
|
+
process.stdout.on('error', async (err) => {
|
|
95
|
+
await debugLog('STDOUT error', { error: err.message, stack: err.stack });
|
|
96
|
+
});
|
|
54
97
|
console.error(JSON.stringify({ level: 'info', message: MESSAGES.server.connectingTransport }));
|
|
98
|
+
await debugLog('Connecting transport', {});
|
|
55
99
|
await server.connect(transport);
|
|
56
100
|
console.error(JSON.stringify({ level: 'info', message: MESSAGES.server.connected }));
|
|
101
|
+
await debugLog('Server connected and ready', { pid: process.pid });
|
|
102
|
+
await logServerStats('server-ready');
|
|
57
103
|
};
|
|
58
104
|
/**
|
|
59
105
|
* Graceful shutdown handler
|
|
60
106
|
*/
|
|
61
107
|
const shutdown = async (signal) => {
|
|
62
108
|
console.error(JSON.stringify({ level: 'info', message: `Received ${signal}, shutting down...` }));
|
|
109
|
+
await logServerStats(`shutdown-${signal}`);
|
|
63
110
|
try {
|
|
64
111
|
await watchManager.stopAllWatchers();
|
|
65
112
|
await debugLog('Shutdown complete', { signal });
|
|
66
113
|
}
|
|
67
114
|
catch (error) {
|
|
68
115
|
console.error(JSON.stringify({ level: 'error', message: 'Error during shutdown', error: String(error) }));
|
|
116
|
+
await debugLog('Error during shutdown', { signal, error: String(error) });
|
|
69
117
|
}
|
|
70
118
|
process.exit(0);
|
|
71
119
|
};
|
|
@@ -73,14 +121,41 @@ const shutdown = async (signal) => {
|
|
|
73
121
|
process.on('uncaughtException', async (error) => {
|
|
74
122
|
console.error(JSON.stringify({ level: 'error', message: 'Uncaught exception', error: String(error), stack: error.stack }));
|
|
75
123
|
await debugLog('Uncaught exception', { error: String(error), stack: error.stack });
|
|
124
|
+
await logServerStats('uncaught-exception');
|
|
76
125
|
});
|
|
77
126
|
process.on('unhandledRejection', async (reason) => {
|
|
78
127
|
console.error(JSON.stringify({ level: 'error', message: 'Unhandled rejection', reason: String(reason) }));
|
|
79
128
|
await debugLog('Unhandled rejection', { reason: String(reason) });
|
|
129
|
+
await logServerStats('unhandled-rejection');
|
|
130
|
+
});
|
|
131
|
+
// Log other process events that might indicate issues
|
|
132
|
+
process.on('warning', async (warning) => {
|
|
133
|
+
await debugLog('Process warning', { name: warning.name, message: warning.message, stack: warning.stack });
|
|
134
|
+
});
|
|
135
|
+
process.on('beforeExit', async (code) => {
|
|
136
|
+
await debugLog('Process beforeExit', { code });
|
|
137
|
+
await logServerStats('before-exit');
|
|
138
|
+
});
|
|
139
|
+
process.on('exit', (code) => {
|
|
140
|
+
// Note: Can't use async here, exit is synchronous
|
|
141
|
+
console.error(JSON.stringify({ level: 'info', message: `Process exiting with code ${code}` }));
|
|
80
142
|
});
|
|
81
143
|
// Register shutdown handlers
|
|
82
|
-
|
|
144
|
+
// NOTE: Only handle SIGTERM for graceful shutdown. SIGINT is ignored because
|
|
145
|
+
// Claude Code may propagate SIGINT to child processes when spawning agents,
|
|
146
|
+
// which would incorrectly kill the MCP server. The MCP server lifecycle is
|
|
147
|
+
// managed by Claude Code via stdio transport closure.
|
|
83
148
|
process.on('SIGTERM', () => shutdown('SIGTERM'));
|
|
149
|
+
// Log SIGINT but don't exit - Claude Code manages our lifecycle
|
|
150
|
+
process.on('SIGINT', async () => {
|
|
151
|
+
await debugLog('SIGINT received but ignored - lifecycle managed by Claude Code', {});
|
|
152
|
+
await logServerStats('sigint-ignored');
|
|
153
|
+
});
|
|
154
|
+
// Also ignore SIGHUP which can be sent during terminal operations
|
|
155
|
+
process.on('SIGHUP', async () => {
|
|
156
|
+
await debugLog('SIGHUP received but ignored', {});
|
|
157
|
+
await logServerStats('sighup-ignored');
|
|
158
|
+
});
|
|
84
159
|
// Start the server
|
|
85
160
|
console.error(JSON.stringify({ level: 'info', message: MESSAGES.server.startingServer }));
|
|
86
161
|
await startServer();
|
package/dist/mcp/service-init.js
CHANGED
|
@@ -4,14 +4,67 @@
|
|
|
4
4
|
*/
|
|
5
5
|
import fs from 'fs/promises';
|
|
6
6
|
import { join } from 'path';
|
|
7
|
+
import { ensureNeo4jRunning, isDockerInstalled, isDockerRunning, } from '../cli/neo4j-docker.js';
|
|
7
8
|
import { Neo4jService, QUERIES } from '../storage/neo4j/neo4j.service.js';
|
|
8
9
|
import { FILE_PATHS, LOG_CONFIG } from './constants.js';
|
|
9
10
|
import { initializeNaturalLanguageService } from './tools/natural-language-to-cypher.tool.js';
|
|
10
11
|
import { debugLog } from './utils.js';
|
|
12
|
+
/**
|
|
13
|
+
* Log startup warnings for missing configuration
|
|
14
|
+
*/
|
|
15
|
+
const checkConfiguration = async () => {
|
|
16
|
+
if (!process.env.OPENAI_API_KEY) {
|
|
17
|
+
console.error(JSON.stringify({
|
|
18
|
+
level: 'warn',
|
|
19
|
+
message: '[code-graph-context] OPENAI_API_KEY not set. Semantic search and NL queries unavailable.',
|
|
20
|
+
}));
|
|
21
|
+
await debugLog('Configuration warning', { warning: 'OPENAI_API_KEY not set' });
|
|
22
|
+
}
|
|
23
|
+
};
|
|
24
|
+
/**
|
|
25
|
+
* Ensure Neo4j is running - auto-start if Docker available, fail if not
|
|
26
|
+
*/
|
|
27
|
+
const ensureNeo4j = async () => {
|
|
28
|
+
// Check if Docker is available
|
|
29
|
+
if (!isDockerInstalled()) {
|
|
30
|
+
const msg = 'Docker not installed. Install Docker or run: code-graph-context init';
|
|
31
|
+
console.error(JSON.stringify({ level: 'error', message: `[code-graph-context] ${msg}` }));
|
|
32
|
+
throw new Error(msg);
|
|
33
|
+
}
|
|
34
|
+
if (!isDockerRunning()) {
|
|
35
|
+
const msg = 'Docker not running. Start Docker or run: code-graph-context init';
|
|
36
|
+
console.error(JSON.stringify({ level: 'error', message: `[code-graph-context] ${msg}` }));
|
|
37
|
+
throw new Error(msg);
|
|
38
|
+
}
|
|
39
|
+
const result = await ensureNeo4jRunning();
|
|
40
|
+
if (!result.success) {
|
|
41
|
+
const msg = `Neo4j failed to start: ${result.error}. Run: code-graph-context init`;
|
|
42
|
+
console.error(JSON.stringify({ level: 'error', message: `[code-graph-context] ${msg}` }));
|
|
43
|
+
throw new Error(msg);
|
|
44
|
+
}
|
|
45
|
+
if (result.action === 'created') {
|
|
46
|
+
console.error(JSON.stringify({
|
|
47
|
+
level: 'info',
|
|
48
|
+
message: '[code-graph-context] Neo4j container created and started',
|
|
49
|
+
}));
|
|
50
|
+
}
|
|
51
|
+
else if (result.action === 'started') {
|
|
52
|
+
console.error(JSON.stringify({
|
|
53
|
+
level: 'info',
|
|
54
|
+
message: '[code-graph-context] Neo4j container started',
|
|
55
|
+
}));
|
|
56
|
+
}
|
|
57
|
+
await debugLog('Neo4j ready', result);
|
|
58
|
+
};
|
|
11
59
|
/**
|
|
12
60
|
* Initialize all external services required by the MCP server
|
|
13
61
|
*/
|
|
14
62
|
export const initializeServices = async () => {
|
|
63
|
+
// Check for missing configuration (non-fatal warnings)
|
|
64
|
+
await checkConfiguration();
|
|
65
|
+
// Ensure Neo4j is running (fatal if not)
|
|
66
|
+
await ensureNeo4j();
|
|
67
|
+
// Initialize services
|
|
15
68
|
await Promise.all([initializeNeo4jSchema(), initializeNaturalLanguageService()]);
|
|
16
69
|
};
|
|
17
70
|
/**
|
|
@@ -26,9 +26,12 @@ class WatchManager {
|
|
|
26
26
|
* Send a notification via MCP logging (if supported)
|
|
27
27
|
*/
|
|
28
28
|
sendNotification(notification) {
|
|
29
|
+
debugLog('sendNotification called', { type: notification.type, projectId: notification.projectId });
|
|
29
30
|
if (!this.mcpServer) {
|
|
31
|
+
debugLog('sendNotification: no MCP server, skipping', {});
|
|
30
32
|
return;
|
|
31
33
|
}
|
|
34
|
+
debugLog('sendNotification: sending to MCP', { type: notification.type });
|
|
32
35
|
// sendLoggingMessage returns a Promise - use .catch() to handle rejection
|
|
33
36
|
this.mcpServer
|
|
34
37
|
.sendLoggingMessage({
|
|
@@ -36,9 +39,16 @@ class WatchManager {
|
|
|
36
39
|
logger: 'file-watcher',
|
|
37
40
|
data: notification,
|
|
38
41
|
})
|
|
39
|
-
.
|
|
40
|
-
|
|
42
|
+
.then(() => {
|
|
43
|
+
debugLog('sendNotification: MCP message sent successfully', { type: notification.type });
|
|
44
|
+
})
|
|
45
|
+
.catch((error) => {
|
|
46
|
+
// MCP logging not supported - log but don't crash
|
|
41
47
|
// This is expected if the client doesn't support logging capability
|
|
48
|
+
debugLog('sendNotification: MCP message failed (expected if client lacks logging)', {
|
|
49
|
+
type: notification.type,
|
|
50
|
+
error: String(error)
|
|
51
|
+
});
|
|
42
52
|
});
|
|
43
53
|
}
|
|
44
54
|
/**
|
|
@@ -128,7 +138,7 @@ class WatchManager {
|
|
|
128
138
|
* Handle a file system event
|
|
129
139
|
*/
|
|
130
140
|
handleFileEvent(state, type, filePath) {
|
|
131
|
-
debugLog('
|
|
141
|
+
debugLog('handleFileEvent START', { type, filePath, projectId: state.projectId, status: state.status, isStopping: state.isStopping });
|
|
132
142
|
// Ignore events if watcher is stopping or not active
|
|
133
143
|
if (state.isStopping || state.status !== 'active') {
|
|
134
144
|
debugLog('Ignoring event - watcher not active or stopping', {
|
|
@@ -166,20 +176,35 @@ class WatchManager {
|
|
|
166
176
|
timestamp: new Date().toISOString(),
|
|
167
177
|
});
|
|
168
178
|
// Set new debounce timer
|
|
179
|
+
debugLog('handleFileEvent: setting debounce timer', { debounceMs: state.config.debounceMs });
|
|
169
180
|
state.debounceTimer = setTimeout(() => {
|
|
181
|
+
debugLog('handleFileEvent: debounce timer fired, calling processEvents', { projectId: state.projectId });
|
|
170
182
|
this.processEvents(state).catch((error) => {
|
|
183
|
+
debugLog('handleFileEvent: processEvents error', { error: String(error) });
|
|
171
184
|
console.error('[WatchManager] Error in processEvents:', error);
|
|
172
185
|
});
|
|
173
186
|
}, state.config.debounceMs);
|
|
187
|
+
debugLog('handleFileEvent END', { pendingCount: state.pendingEvents.length });
|
|
174
188
|
}
|
|
175
189
|
/**
|
|
176
190
|
* Process accumulated file events after debounce period
|
|
177
191
|
*/
|
|
178
192
|
async processEvents(state) {
|
|
193
|
+
await debugLog('processEvents START', {
|
|
194
|
+
projectId: state.projectId,
|
|
195
|
+
isProcessing: state.isProcessing,
|
|
196
|
+
pendingCount: state.pendingEvents.length,
|
|
197
|
+
isStopping: state.isStopping
|
|
198
|
+
});
|
|
179
199
|
// Don't process if already processing, no events, or watcher is stopping
|
|
180
|
-
if (state.isProcessing || state.pendingEvents.length === 0 || state.isStopping)
|
|
200
|
+
if (state.isProcessing || state.pendingEvents.length === 0 || state.isStopping) {
|
|
201
|
+
await debugLog('processEvents: early return', {
|
|
202
|
+
reason: state.isProcessing ? 'already processing' : state.pendingEvents.length === 0 ? 'no events' : 'stopping'
|
|
203
|
+
});
|
|
181
204
|
return;
|
|
205
|
+
}
|
|
182
206
|
state.isProcessing = true;
|
|
207
|
+
await debugLog('processEvents: set isProcessing=true', {});
|
|
183
208
|
const events = [...state.pendingEvents];
|
|
184
209
|
state.pendingEvents = [];
|
|
185
210
|
state.debounceTimer = null;
|
|
@@ -199,7 +224,15 @@ class WatchManager {
|
|
|
199
224
|
if (!this.incrementalParseHandler) {
|
|
200
225
|
throw new Error('Incremental parse handler not configured');
|
|
201
226
|
}
|
|
227
|
+
await debugLog('processEvents: calling incrementalParseHandler', {
|
|
228
|
+
projectPath: state.projectPath,
|
|
229
|
+
projectId: state.projectId
|
|
230
|
+
});
|
|
202
231
|
const result = await this.incrementalParseHandler(state.projectPath, state.projectId, state.tsconfigPath);
|
|
232
|
+
await debugLog('processEvents: incrementalParseHandler returned', {
|
|
233
|
+
nodesUpdated: result.nodesUpdated,
|
|
234
|
+
edgesUpdated: result.edgesUpdated
|
|
235
|
+
});
|
|
203
236
|
state.lastUpdateTime = new Date();
|
|
204
237
|
const elapsedMs = Date.now() - startTime;
|
|
205
238
|
this.sendNotification({
|
|
@@ -221,6 +254,7 @@ class WatchManager {
|
|
|
221
254
|
}
|
|
222
255
|
catch (error) {
|
|
223
256
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
257
|
+
await debugLog('processEvents: error caught', { error: errorMessage });
|
|
224
258
|
this.sendNotification({
|
|
225
259
|
type: 'incremental_parse_failed',
|
|
226
260
|
projectId: state.projectId,
|
|
@@ -235,6 +269,7 @@ class WatchManager {
|
|
|
235
269
|
}
|
|
236
270
|
finally {
|
|
237
271
|
state.isProcessing = false;
|
|
272
|
+
await debugLog('processEvents END', { projectId: state.projectId, isProcessing: state.isProcessing });
|
|
238
273
|
}
|
|
239
274
|
}
|
|
240
275
|
/**
|
|
@@ -243,9 +278,14 @@ class WatchManager {
|
|
|
243
278
|
handleWatcherError(state, error) {
|
|
244
279
|
state.status = 'error';
|
|
245
280
|
state.errorMessage = error instanceof Error ? error.message : String(error);
|
|
246
|
-
debugLog('
|
|
281
|
+
debugLog('handleWatcherError START', { projectId: state.projectId, error: state.errorMessage });
|
|
247
282
|
// Clean up the failed watcher to prevent it from staying in error state indefinitely
|
|
248
|
-
this.stopWatching(state.projectId)
|
|
283
|
+
this.stopWatching(state.projectId)
|
|
284
|
+
.then(() => {
|
|
285
|
+
debugLog('handleWatcherError: cleanup succeeded', { projectId: state.projectId });
|
|
286
|
+
})
|
|
287
|
+
.catch((cleanupError) => {
|
|
288
|
+
debugLog('handleWatcherError: cleanup failed', { projectId: state.projectId, cleanupError: String(cleanupError) });
|
|
249
289
|
console.error(`[WatchManager] Failed to cleanup errored watcher ${state.projectId}:`, cleanupError);
|
|
250
290
|
});
|
|
251
291
|
}
|
|
@@ -255,23 +295,33 @@ class WatchManager {
|
|
|
255
295
|
* Promise is tracked on state to allow cleanup during stop
|
|
256
296
|
*/
|
|
257
297
|
syncMissedChanges(state) {
|
|
258
|
-
|
|
298
|
+
debugLog('syncMissedChanges START', { projectId: state.projectId });
|
|
299
|
+
if (!this.incrementalParseHandler) {
|
|
300
|
+
debugLog('syncMissedChanges: no handler, skipping', {});
|
|
259
301
|
return;
|
|
302
|
+
}
|
|
260
303
|
// Track the promise on state so stopWatching can wait for it
|
|
261
304
|
state.syncPromise = this.incrementalParseHandler(state.projectPath, state.projectId, state.tsconfigPath)
|
|
262
305
|
.then((result) => {
|
|
306
|
+
debugLog('syncMissedChanges: completed', {
|
|
307
|
+
projectId: state.projectId,
|
|
308
|
+
nodesUpdated: result.nodesUpdated,
|
|
309
|
+
edgesUpdated: result.edgesUpdated
|
|
310
|
+
});
|
|
263
311
|
if (result.nodesUpdated > 0 || result.edgesUpdated > 0) {
|
|
264
312
|
console.log(`[WatchManager] Synced missed changes for ${state.projectId}: ` +
|
|
265
313
|
`${result.nodesUpdated} nodes, ${result.edgesUpdated} edges`);
|
|
266
314
|
}
|
|
267
315
|
})
|
|
268
316
|
.catch((error) => {
|
|
317
|
+
debugLog('syncMissedChanges: error', { projectId: state.projectId, error: String(error), isStopping: state.isStopping });
|
|
269
318
|
// Only log if watcher hasn't been stopped
|
|
270
319
|
if (!state.isStopping) {
|
|
271
320
|
console.error(`[WatchManager] Failed to sync missed changes for ${state.projectId}:`, error);
|
|
272
321
|
}
|
|
273
322
|
})
|
|
274
323
|
.finally(() => {
|
|
324
|
+
debugLog('syncMissedChanges END', { projectId: state.projectId });
|
|
275
325
|
state.syncPromise = undefined;
|
|
276
326
|
});
|
|
277
327
|
}
|
|
@@ -3,11 +3,25 @@
|
|
|
3
3
|
* Simple test tool to verify MCP connection
|
|
4
4
|
*/
|
|
5
5
|
import { TOOL_NAMES, TOOL_METADATA, MESSAGES } from '../constants.js';
|
|
6
|
-
import { createSuccessResponse } from '../utils.js';
|
|
6
|
+
import { createSuccessResponse, debugLog } from '../utils.js';
|
|
7
|
+
import { logToolCallStart, logToolCallEnd } from './index.js';
|
|
7
8
|
export const createHelloTool = (server) => {
|
|
8
9
|
server.registerTool(TOOL_NAMES.hello, {
|
|
9
10
|
title: TOOL_METADATA[TOOL_NAMES.hello].title,
|
|
10
11
|
description: TOOL_METADATA[TOOL_NAMES.hello].description,
|
|
11
12
|
inputSchema: {},
|
|
12
|
-
}, async () =>
|
|
13
|
+
}, async () => {
|
|
14
|
+
const startTime = Date.now();
|
|
15
|
+
const callId = await logToolCallStart('hello');
|
|
16
|
+
try {
|
|
17
|
+
const result = createSuccessResponse(MESSAGES.success.hello);
|
|
18
|
+
await logToolCallEnd('hello', callId, true, Date.now() - startTime);
|
|
19
|
+
return result;
|
|
20
|
+
}
|
|
21
|
+
catch (error) {
|
|
22
|
+
await debugLog('Hello tool error', { error: String(error) });
|
|
23
|
+
await logToolCallEnd('hello', callId, false, Date.now() - startTime);
|
|
24
|
+
throw error;
|
|
25
|
+
}
|
|
26
|
+
});
|
|
13
27
|
};
|
package/dist/mcp/tools/index.js
CHANGED
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
* MCP Tool Factory
|
|
3
3
|
* Centralized tool creation and registration
|
|
4
4
|
*/
|
|
5
|
+
import { debugLog } from '../utils.js';
|
|
5
6
|
import { createCheckParseStatusTool } from './check-parse-status.tool.js';
|
|
6
7
|
import { createDetectDeadCodeTool } from './detect-dead-code.tool.js';
|
|
7
8
|
import { createDetectDuplicateCodeTool } from './detect-duplicate-code.tool.js';
|
|
@@ -14,8 +15,36 @@ import { createParseTypescriptProjectTool } from './parse-typescript-project.too
|
|
|
14
15
|
import { createSearchCodebaseTool } from './search-codebase.tool.js';
|
|
15
16
|
import { createStartWatchProjectTool } from './start-watch-project.tool.js';
|
|
16
17
|
import { createStopWatchProjectTool } from './stop-watch-project.tool.js';
|
|
18
|
+
import { createSwarmCleanupTool } from './swarm-cleanup.tool.js';
|
|
19
|
+
import { createSwarmPheromoneTool } from './swarm-pheromone.tool.js';
|
|
20
|
+
import { createSwarmSenseTool } from './swarm-sense.tool.js';
|
|
17
21
|
import { createTestNeo4jConnectionTool } from './test-neo4j-connection.tool.js';
|
|
18
22
|
import { createTraverseFromNodeTool } from './traverse-from-node.tool.js';
|
|
23
|
+
// Track tool calls for debugging
|
|
24
|
+
let globalToolCallCount = 0;
|
|
25
|
+
/**
|
|
26
|
+
* Log tool call start (exported for use by individual tools)
|
|
27
|
+
*/
|
|
28
|
+
export const logToolCallStart = async (toolName, params) => {
|
|
29
|
+
globalToolCallCount++;
|
|
30
|
+
const callId = globalToolCallCount;
|
|
31
|
+
await debugLog(`Tool call START: ${toolName}`, {
|
|
32
|
+
callId,
|
|
33
|
+
totalCalls: globalToolCallCount,
|
|
34
|
+
params: params ? JSON.stringify(params).substring(0, 500) : 'none',
|
|
35
|
+
});
|
|
36
|
+
return callId;
|
|
37
|
+
};
|
|
38
|
+
/**
|
|
39
|
+
* Log tool call end (exported for use by individual tools)
|
|
40
|
+
*/
|
|
41
|
+
export const logToolCallEnd = async (toolName, callId, success, duration) => {
|
|
42
|
+
await debugLog(`Tool call END: ${toolName}`, {
|
|
43
|
+
callId,
|
|
44
|
+
success,
|
|
45
|
+
duration: duration ? `${duration}ms` : 'unknown',
|
|
46
|
+
});
|
|
47
|
+
};
|
|
19
48
|
/**
|
|
20
49
|
* Register all MCP tools with the server
|
|
21
50
|
*/
|
|
@@ -39,4 +68,8 @@ export const registerAllTools = (server) => {
|
|
|
39
68
|
createStartWatchProjectTool(server);
|
|
40
69
|
createStopWatchProjectTool(server);
|
|
41
70
|
createListWatchersTool(server);
|
|
71
|
+
// Register swarm coordination tools
|
|
72
|
+
createSwarmPheromoneTool(server);
|
|
73
|
+
createSwarmSenseTool(server);
|
|
74
|
+
createSwarmCleanupTool(server);
|
|
42
75
|
};
|