code-graph-context 2.2.0 → 2.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -30,6 +30,7 @@ A Model Context Protocol (MCP) server that builds rich code graphs to provide de
30
30
  - **Impact Analysis**: Assess refactoring risk with dependency analysis (LOW/MEDIUM/HIGH/CRITICAL scoring)
31
31
  - **Dead Code Detection**: Find unreferenced exports, uncalled private methods, unused interfaces with confidence scoring
32
32
  - **Duplicate Code Detection**: Identify structural duplicates (identical AST) and semantic duplicates (similar logic via embeddings)
33
+ - **Swarm Coordination**: Multi-agent stigmergic coordination through pheromone markers with exponential decay
33
34
  - **High Performance**: Optimized Neo4j storage with vector indexing for fast retrieval
34
35
  - **MCP Integration**: Seamless integration with Claude Code and other MCP-compatible tools
35
36
 
@@ -255,6 +256,9 @@ npm run build
255
256
  | `natural_language_to_cypher` | Convert natural language to Cypher | **Advanced queries** - complex graph queries |
256
257
  | `detect_dead_code` | Find unreferenced exports, uncalled methods, unused interfaces | **Code cleanup** - identify potentially removable code |
257
258
  | `detect_duplicate_code` | Find structural and semantic code duplicates | **Refactoring** - identify DRY violations |
259
+ | `swarm_pheromone` | Leave pheromone markers on code nodes | **Multi-agent** - stigmergic coordination |
260
+ | `swarm_sense` | Query pheromones in the code graph | **Multi-agent** - sense what other agents are doing |
261
+ | `swarm_cleanup` | Bulk delete pheromones | **Multi-agent** - cleanup after swarm completion |
258
262
  | `test_neo4j_connection` | Verify database connectivity | **Health check** - troubleshooting |
259
263
 
260
264
  > **Note**: All query tools (`search_codebase`, `traverse_from_node`, `impact_analysis`, `natural_language_to_cypher`) require a `projectId` parameter. Use `list_projects` to discover available projects.
@@ -755,6 +759,67 @@ await mcp.call('stop_watch_project', {
755
759
  - 1000 pending events per watcher
756
760
  - Graceful cleanup on server shutdown
757
761
 
762
+ #### 8. Swarm Coordination Tools
763
+ **Purpose**: Enable multiple parallel agents to coordinate work through stigmergic pheromone markers in the code graph—no direct messaging needed.
764
+
765
+ **Core Concepts:**
766
+ - **Pheromones**: Markers attached to graph nodes that decay over time
767
+ - **swarmId**: Groups related agents for bulk cleanup when done
768
+ - **Workflow States**: `exploring`, `claiming`, `modifying`, `completed`, `blocked` (mutually exclusive per agent+node)
769
+ - **Flags**: `warning`, `proposal`, `needs_review` (can coexist with workflow states)
770
+
771
+ **Pheromone Types & Decay:**
772
+ | Type | Half-Life | Use |
773
+ |------|-----------|-----|
774
+ | `exploring` | 2 min | Browsing/reading |
775
+ | `modifying` | 10 min | Active work |
776
+ | `claiming` | 1 hour | Ownership |
777
+ | `completed` | 24 hours | Done |
778
+ | `warning` | Never | Danger |
779
+ | `blocked` | 5 min | Stuck |
780
+ | `proposal` | 1 hour | Awaiting approval |
781
+ | `needs_review` | 30 min | Review requested |
782
+
783
+ ```typescript
784
+ // Orchestrator: Generate swarm ID and spawn agents
785
+ const swarmId = `swarm_${Date.now()}`;
786
+
787
+ // Agent: Check what's claimed before working
788
+ await mcp.call('swarm_sense', {
789
+ projectId: 'my-backend',
790
+ swarmId,
791
+ types: ['claiming', 'modifying']
792
+ });
793
+
794
+ // Agent: Claim a node before working
795
+ await mcp.call('swarm_pheromone', {
796
+ projectId: 'my-backend',
797
+ nodeId: 'proj_xxx:ClassDeclaration:abc123', // From search_codebase or traverse_from_node
798
+ type: 'claiming',
799
+ agentId: 'agent_1',
800
+ swarmId
801
+ });
802
+
803
+ // Agent: Mark complete when done
804
+ await mcp.call('swarm_pheromone', {
805
+ projectId: 'my-backend',
806
+ nodeId: 'proj_xxx:ClassDeclaration:abc123',
807
+ type: 'completed',
808
+ agentId: 'agent_1',
809
+ swarmId,
810
+ data: { summary: 'Added soft delete support' }
811
+ });
812
+
813
+ // Orchestrator: Clean up when swarm is done
814
+ await mcp.call('swarm_cleanup', {
815
+ projectId: 'my-backend',
816
+ swarmId,
817
+ keepTypes: ['warning'] // Preserve warnings
818
+ });
819
+ ```
820
+
821
+ **Important**: Node IDs must come from graph tool responses (`search_codebase`, `traverse_from_node`). Never fabricate node IDs—they are hash-based strings like `proj_xxx:ClassDeclaration:abc123`.
822
+
758
823
  ### Workflow Examples
759
824
 
760
825
  #### Example 1: Understanding Authentication Flow
@@ -31,6 +31,9 @@ export const TOOL_NAMES = {
31
31
  listWatchers: 'list_watchers',
32
32
  detectDeadCode: 'detect_dead_code',
33
33
  detectDuplicateCode: 'detect_duplicate_code',
34
+ swarmPheromone: 'swarm_pheromone',
35
+ swarmSense: 'swarm_sense',
36
+ swarmCleanup: 'swarm_cleanup',
34
37
  };
35
38
  // Tool Metadata
36
39
  export const TOOL_METADATA = {
@@ -354,6 +357,123 @@ Parameters:
354
357
 
355
358
  Use this to identify refactoring opportunities and reduce code duplication.`,
356
359
  },
360
+ [TOOL_NAMES.swarmPheromone]: {
361
+ title: 'Swarm Pheromone',
362
+ description: `Leave a pheromone marker on a code node for stigmergic coordination between agents.
363
+
364
+ **What is Stigmergy?**
365
+ Agents coordinate indirectly by leaving markers (pheromones) on code nodes. Other agents sense these markers and adapt their behavior. No direct messaging needed.
366
+
367
+ **Pheromone Types:**
368
+ - exploring: "I'm looking at this" (2 min half-life)
369
+ - modifying: "I'm actively working on this" (10 min half-life)
370
+ - claiming: "This is my territory" (1 hour half-life)
371
+ - completed: "I finished work here" (24 hour half-life)
372
+ - warning: "Danger - don't touch" (never decays)
373
+ - blocked: "I'm stuck on this" (5 min half-life)
374
+ - proposal: "Proposed artifact awaiting approval" (1 hour half-life)
375
+ - needs_review: "Someone should check this" (30 min half-life)
376
+
377
+ **Parameters:**
378
+ - nodeId: The code node ID to mark
379
+ - type: Type of pheromone (see above)
380
+ - agentId: Your unique agent identifier
381
+ - swarmId: Swarm ID from orchestrator (for bulk cleanup)
382
+ - intensity: 0.0-1.0, how strong the signal (default: 1.0)
383
+ - data: Optional metadata (summary, reason, etc.)
384
+ - remove: Set true to remove the pheromone
385
+
386
+ **Workflow states** (exploring, claiming, modifying, completed, blocked) are mutually exclusive per agent+node. Setting one automatically removes others.
387
+
388
+ **Usage Pattern:**
389
+ 1. Before starting work: swarm_sense to check what's claimed
390
+ 2. Claim your target: swarm_pheromone({ nodeId, type: "claiming", agentId, swarmId })
391
+ 3. Refresh periodically if working long
392
+ 4. Mark complete: swarm_pheromone({ nodeId, type: "completed", agentId, swarmId, data: { summary: "..." } })
393
+
394
+ **Decay:**
395
+ Pheromones automatically fade over time. If an agent dies, its markers decay and work becomes available again.`,
396
+ },
397
+ [TOOL_NAMES.swarmSense]: {
398
+ title: 'Swarm Sense',
399
+ description: `Query pheromones in the code graph to sense what other agents are doing.
400
+
401
+ **What This Does:**
402
+ Returns active pheromones with their current intensity (after decay). Use this to:
403
+ - See what nodes are being worked on
404
+ - Avoid conflicts with other agents
405
+ - Find unclaimed work
406
+ - Check if your dependencies are being modified
407
+
408
+ **Parameters:**
409
+ - swarmId: Filter by swarm ID (see only this swarm's pheromones)
410
+ - types: Filter by pheromone types (e.g., ["modifying", "claiming"])
411
+ - nodeIds: Check specific nodes
412
+ - agentIds: Filter by specific agents
413
+ - excludeAgentId: Exclude your own pheromones (see what OTHERS are doing)
414
+ - minIntensity: Minimum intensity after decay (default: 0.3)
415
+ - limit: Max results (default: 50)
416
+ - includeStats: Get summary statistics by type
417
+ - cleanup: Remove fully decayed pheromones (intensity < 0.01)
418
+
419
+ **Usage Pattern:**
420
+ \`\`\`
421
+ // Before starting work, check what's taken
422
+ swarm_sense({
423
+ types: ["modifying", "claiming"],
424
+ minIntensity: 0.3
425
+ })
426
+
427
+ // Check a specific node before modifying
428
+ swarm_sense({
429
+ nodeIds: ["proj_xxx:Service:UserService"],
430
+ types: ["modifying", "warning"]
431
+ })
432
+
433
+ // See what other agents are doing (exclude self)
434
+ swarm_sense({
435
+ excludeAgentId: "my-agent-id",
436
+ types: ["exploring", "modifying"]
437
+ })
438
+ \`\`\`
439
+
440
+ **Decay:**
441
+ Intensity decreases over time (exponential decay). A pheromone with intensity 0.25 is almost gone. Below minIntensity threshold, it's not returned.`,
442
+ },
443
+ [TOOL_NAMES.swarmCleanup]: {
444
+ title: 'Swarm Cleanup',
445
+ description: `Bulk delete pheromones after a swarm completes.
446
+
447
+ **When to use:**
448
+ Call this when a swarm finishes to clean up all its pheromones. Prevents pollution for future swarms.
449
+
450
+ **Parameters:**
451
+ - projectId: Required - the project
452
+ - swarmId: Delete all pheromones from this swarm
453
+ - agentId: Delete all pheromones from this specific agent
454
+ - all: Set true to delete ALL pheromones in project (use with caution)
455
+ - keepTypes: Pheromone types to preserve (default: ["warning"])
456
+ - dryRun: Preview what would be deleted without deleting
457
+
458
+ **Must specify one of:** swarmId, agentId, or all=true
459
+
460
+ **Examples:**
461
+ \`\`\`
462
+ // Clean up after a swarm completes
463
+ swarm_cleanup({ projectId: "backend", swarmId: "swarm_abc123" })
464
+
465
+ // Preview what would be deleted
466
+ swarm_cleanup({ projectId: "backend", swarmId: "swarm_abc123", dryRun: true })
467
+
468
+ // Clean up a specific agent's pheromones
469
+ swarm_cleanup({ projectId: "backend", agentId: "swarm_abc123_auth" })
470
+
471
+ // Nuclear option: delete all (except warnings)
472
+ swarm_cleanup({ projectId: "backend", all: true })
473
+ \`\`\`
474
+
475
+ **Note:** \`warning\` pheromones are preserved by default. Pass \`keepTypes: []\` to delete everything.`,
476
+ },
357
477
  };
358
478
  // Default Values
359
479
  export const DEFAULTS = {
@@ -37,6 +37,7 @@ export const performIncrementalParse = async (projectPath, projectId, tsconfigPa
37
37
  });
38
38
  // If no changes, return early
39
39
  if (filesToReparse.length === 0 && filesToDelete.length === 0) {
40
+ await debugLog('Incremental parse: no changes, returning early', {});
40
41
  return {
41
42
  nodesUpdated: 0,
42
43
  edgesUpdated: 0,
@@ -44,24 +45,35 @@ export const performIncrementalParse = async (projectPath, projectId, tsconfigPa
44
45
  filesDeleted: filesToDelete.length,
45
46
  };
46
47
  }
48
+ await debugLog('Incremental parse: changes detected, continuing', { filesToReparse: filesToReparse.length });
47
49
  let savedCrossFileEdges = [];
48
50
  const filesToRemoveFromGraph = [...filesToDelete, ...filesToReparse];
49
51
  if (filesToRemoveFromGraph.length > 0) {
52
+ await debugLog('Incremental parse: getting cross-file edges', { count: filesToRemoveFromGraph.length });
50
53
  // Save cross-file edges before deletion
51
54
  savedCrossFileEdges = await getCrossFileEdges(neo4jService, filesToRemoveFromGraph, resolvedId);
55
+ await debugLog('Incremental parse: got cross-file edges', { savedCount: savedCrossFileEdges.length });
56
+ await debugLog('Incremental parse: deleting old subgraphs', {});
52
57
  // Delete old subgraphs
53
58
  await deleteSourceFileSubgraphs(neo4jService, filesToRemoveFromGraph, resolvedId);
59
+ await debugLog('Incremental parse: deleted old subgraphs', {});
54
60
  }
55
61
  let nodesImported = 0;
56
62
  let edgesImported = 0;
57
63
  if (filesToReparse.length > 0) {
64
+ await debugLog('Incremental parse: loading existing nodes', {});
58
65
  // Load existing nodes for edge detection
59
66
  const existingNodes = await loadExistingNodesForEdgeDetection(neo4jService, filesToRemoveFromGraph, resolvedId);
67
+ await debugLog('Incremental parse: loaded existing nodes', { count: existingNodes.length });
60
68
  parser.setExistingNodes(existingNodes);
69
+ await debugLog('Incremental parse: parsing workspace', { fileCount: filesToReparse.length });
61
70
  // Parse only changed files
62
71
  await parser.parseWorkspace(filesToReparse);
72
+ await debugLog('Incremental parse: parsed workspace', {});
63
73
  // Export graph data
74
+ await debugLog('Incremental parse: exporting to JSON', {});
64
75
  const { nodes, edges } = parser.exportToJson();
76
+ await debugLog('Incremental parse: exported to JSON', { nodeCount: nodes.length, edgeCount: edges.length });
65
77
  // Get framework schemas if available (use unknown as intermediate to access private property)
66
78
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
67
79
  const parserAny = parser;
@@ -80,26 +92,33 @@ export const performIncrementalParse = async (projectPath, projectId, tsconfigPa
80
92
  },
81
93
  };
82
94
  // Write to JSON file (required by GraphGeneratorHandler)
95
+ await debugLog('Incremental parse: writing JSON file', {});
83
96
  const outputPath = join(projectPath, FILE_PATHS.graphOutput);
84
97
  writeFileSync(outputPath, JSON.stringify(graphData, null, LOG_CONFIG.jsonIndentation));
98
+ await debugLog('Incremental parse: wrote JSON file', { outputPath });
85
99
  // Update Project node
100
+ await debugLog('Incremental parse: updating project node', {});
86
101
  await neo4jService.run(UPSERT_PROJECT_QUERY, {
87
102
  projectId: resolvedId,
88
103
  path: projectPath,
89
104
  name: projectName,
90
105
  status: 'complete',
91
106
  });
107
+ await debugLog('Incremental parse: updated project node', {});
92
108
  // Import nodes and edges (clearExisting = false for incremental)
109
+ await debugLog('Incremental parse: starting graph import', {});
93
110
  graphHandler.setProjectId(resolvedId);
94
111
  try {
95
112
  const result = await graphHandler.generateGraph(outputPath, DEFAULTS.batchSize, false);
96
113
  nodesImported = result.nodesImported;
97
114
  edgesImported = result.edgesImported;
115
+ await debugLog('Incremental parse: graph import completed', { nodesImported, edgesImported });
98
116
  }
99
117
  finally {
100
118
  // Clean up temporary graph.json file
101
119
  try {
102
120
  unlinkSync(outputPath);
121
+ await debugLog('Incremental parse: cleaned up temp file', {});
103
122
  }
104
123
  catch {
105
124
  // Ignore cleanup errors - file may not exist or be inaccessible
@@ -22,11 +22,34 @@ import { initializeServices } from './service-init.js';
22
22
  import { watchManager } from './services/watch-manager.js';
23
23
  import { registerAllTools } from './tools/index.js';
24
24
  import { debugLog } from './utils.js';
25
+ // Track server state for debugging
26
+ let serverStartTime;
27
+ let toolCallCount = 0;
28
+ let lastToolCall = null;
29
+ /**
30
+ * Log memory usage and server stats
31
+ */
32
+ const logServerStats = async (context) => {
33
+ const mem = process.memoryUsage();
34
+ await debugLog(`Server stats [${context}]`, {
35
+ uptime: serverStartTime ? `${Math.round((Date.now() - serverStartTime.getTime()) / 1000)}s` : 'not started',
36
+ toolCallCount,
37
+ lastToolCall,
38
+ memory: {
39
+ heapUsed: `${Math.round(mem.heapUsed / 1024 / 1024)}MB`,
40
+ heapTotal: `${Math.round(mem.heapTotal / 1024 / 1024)}MB`,
41
+ rss: `${Math.round(mem.rss / 1024 / 1024)}MB`,
42
+ },
43
+ pid: process.pid,
44
+ });
45
+ };
25
46
  /**
26
47
  * Main server initialization and startup
27
48
  */
28
49
  const startServer = async () => {
50
+ serverStartTime = new Date();
29
51
  console.error(JSON.stringify({ level: 'info', message: MESSAGES.server.starting }));
52
+ await debugLog('Server starting', { pid: process.pid, startTime: serverStartTime.toISOString() });
30
53
  // Create MCP server instance
31
54
  const server = new McpServer({
32
55
  name: MCP_SERVER_CONFIG.name,
@@ -34,6 +57,7 @@ const startServer = async () => {
34
57
  });
35
58
  // Register all tools
36
59
  registerAllTools(server);
60
+ await debugLog('Tools registered', { toolCount: 15 });
37
61
  // Configure watch manager with incremental parse handler and MCP server
38
62
  watchManager.setIncrementalParseHandler(performIncrementalParse);
39
63
  watchManager.setMcpServer(server.server);
@@ -50,22 +74,46 @@ const startServer = async () => {
50
74
  });
51
75
  // Create and connect transport
52
76
  console.error(JSON.stringify({ level: 'info', message: MESSAGES.server.creatingTransport }));
77
+ await debugLog('Creating stdio transport', {});
53
78
  const transport = new StdioServerTransport();
79
+ // Add transport event logging
80
+ process.stdin.on('close', async () => {
81
+ await debugLog('STDIN closed - client disconnected', {});
82
+ await logServerStats('stdin-close');
83
+ });
84
+ process.stdin.on('end', async () => {
85
+ await debugLog('STDIN ended', {});
86
+ await logServerStats('stdin-end');
87
+ });
88
+ process.stdin.on('error', async (err) => {
89
+ await debugLog('STDIN error', { error: err.message, stack: err.stack });
90
+ });
91
+ process.stdout.on('close', async () => {
92
+ await debugLog('STDOUT closed', {});
93
+ });
94
+ process.stdout.on('error', async (err) => {
95
+ await debugLog('STDOUT error', { error: err.message, stack: err.stack });
96
+ });
54
97
  console.error(JSON.stringify({ level: 'info', message: MESSAGES.server.connectingTransport }));
98
+ await debugLog('Connecting transport', {});
55
99
  await server.connect(transport);
56
100
  console.error(JSON.stringify({ level: 'info', message: MESSAGES.server.connected }));
101
+ await debugLog('Server connected and ready', { pid: process.pid });
102
+ await logServerStats('server-ready');
57
103
  };
58
104
  /**
59
105
  * Graceful shutdown handler
60
106
  */
61
107
  const shutdown = async (signal) => {
62
108
  console.error(JSON.stringify({ level: 'info', message: `Received ${signal}, shutting down...` }));
109
+ await logServerStats(`shutdown-${signal}`);
63
110
  try {
64
111
  await watchManager.stopAllWatchers();
65
112
  await debugLog('Shutdown complete', { signal });
66
113
  }
67
114
  catch (error) {
68
115
  console.error(JSON.stringify({ level: 'error', message: 'Error during shutdown', error: String(error) }));
116
+ await debugLog('Error during shutdown', { signal, error: String(error) });
69
117
  }
70
118
  process.exit(0);
71
119
  };
@@ -73,14 +121,41 @@ const shutdown = async (signal) => {
73
121
  process.on('uncaughtException', async (error) => {
74
122
  console.error(JSON.stringify({ level: 'error', message: 'Uncaught exception', error: String(error), stack: error.stack }));
75
123
  await debugLog('Uncaught exception', { error: String(error), stack: error.stack });
124
+ await logServerStats('uncaught-exception');
76
125
  });
77
126
  process.on('unhandledRejection', async (reason) => {
78
127
  console.error(JSON.stringify({ level: 'error', message: 'Unhandled rejection', reason: String(reason) }));
79
128
  await debugLog('Unhandled rejection', { reason: String(reason) });
129
+ await logServerStats('unhandled-rejection');
130
+ });
131
+ // Log other process events that might indicate issues
132
+ process.on('warning', async (warning) => {
133
+ await debugLog('Process warning', { name: warning.name, message: warning.message, stack: warning.stack });
134
+ });
135
+ process.on('beforeExit', async (code) => {
136
+ await debugLog('Process beforeExit', { code });
137
+ await logServerStats('before-exit');
138
+ });
139
+ process.on('exit', (code) => {
140
+ // Note: Can't use async here, exit is synchronous
141
+ console.error(JSON.stringify({ level: 'info', message: `Process exiting with code ${code}` }));
80
142
  });
81
143
  // Register shutdown handlers
82
- process.on('SIGINT', () => shutdown('SIGINT'));
144
+ // NOTE: Only handle SIGTERM for graceful shutdown. SIGINT is ignored because
145
+ // Claude Code may propagate SIGINT to child processes when spawning agents,
146
+ // which would incorrectly kill the MCP server. The MCP server lifecycle is
147
+ // managed by Claude Code via stdio transport closure.
83
148
  process.on('SIGTERM', () => shutdown('SIGTERM'));
149
+ // Log SIGINT but don't exit - Claude Code manages our lifecycle
150
+ process.on('SIGINT', async () => {
151
+ await debugLog('SIGINT received but ignored - lifecycle managed by Claude Code', {});
152
+ await logServerStats('sigint-ignored');
153
+ });
154
+ // Also ignore SIGHUP which can be sent during terminal operations
155
+ process.on('SIGHUP', async () => {
156
+ await debugLog('SIGHUP received but ignored', {});
157
+ await logServerStats('sighup-ignored');
158
+ });
84
159
  // Start the server
85
160
  console.error(JSON.stringify({ level: 'info', message: MESSAGES.server.startingServer }));
86
161
  await startServer();
@@ -26,9 +26,12 @@ class WatchManager {
26
26
  * Send a notification via MCP logging (if supported)
27
27
  */
28
28
  sendNotification(notification) {
29
+ debugLog('sendNotification called', { type: notification.type, projectId: notification.projectId });
29
30
  if (!this.mcpServer) {
31
+ debugLog('sendNotification: no MCP server, skipping', {});
30
32
  return;
31
33
  }
34
+ debugLog('sendNotification: sending to MCP', { type: notification.type });
32
35
  // sendLoggingMessage returns a Promise - use .catch() to handle rejection
33
36
  this.mcpServer
34
37
  .sendLoggingMessage({
@@ -36,9 +39,16 @@ class WatchManager {
36
39
  logger: 'file-watcher',
37
40
  data: notification,
38
41
  })
39
- .catch(() => {
40
- // MCP logging not supported - silently ignore
42
+ .then(() => {
43
+ debugLog('sendNotification: MCP message sent successfully', { type: notification.type });
44
+ })
45
+ .catch((error) => {
46
+ // MCP logging not supported - log but don't crash
41
47
  // This is expected if the client doesn't support logging capability
48
+ debugLog('sendNotification: MCP message failed (expected if client lacks logging)', {
49
+ type: notification.type,
50
+ error: String(error)
51
+ });
42
52
  });
43
53
  }
44
54
  /**
@@ -128,7 +138,7 @@ class WatchManager {
128
138
  * Handle a file system event
129
139
  */
130
140
  handleFileEvent(state, type, filePath) {
131
- debugLog('File event received', { type, filePath, projectId: state.projectId, status: state.status });
141
+ debugLog('handleFileEvent START', { type, filePath, projectId: state.projectId, status: state.status, isStopping: state.isStopping });
132
142
  // Ignore events if watcher is stopping or not active
133
143
  if (state.isStopping || state.status !== 'active') {
134
144
  debugLog('Ignoring event - watcher not active or stopping', {
@@ -166,20 +176,35 @@ class WatchManager {
166
176
  timestamp: new Date().toISOString(),
167
177
  });
168
178
  // Set new debounce timer
179
+ debugLog('handleFileEvent: setting debounce timer', { debounceMs: state.config.debounceMs });
169
180
  state.debounceTimer = setTimeout(() => {
181
+ debugLog('handleFileEvent: debounce timer fired, calling processEvents', { projectId: state.projectId });
170
182
  this.processEvents(state).catch((error) => {
183
+ debugLog('handleFileEvent: processEvents error', { error: String(error) });
171
184
  console.error('[WatchManager] Error in processEvents:', error);
172
185
  });
173
186
  }, state.config.debounceMs);
187
+ debugLog('handleFileEvent END', { pendingCount: state.pendingEvents.length });
174
188
  }
175
189
  /**
176
190
  * Process accumulated file events after debounce period
177
191
  */
178
192
  async processEvents(state) {
193
+ await debugLog('processEvents START', {
194
+ projectId: state.projectId,
195
+ isProcessing: state.isProcessing,
196
+ pendingCount: state.pendingEvents.length,
197
+ isStopping: state.isStopping
198
+ });
179
199
  // Don't process if already processing, no events, or watcher is stopping
180
- if (state.isProcessing || state.pendingEvents.length === 0 || state.isStopping)
200
+ if (state.isProcessing || state.pendingEvents.length === 0 || state.isStopping) {
201
+ await debugLog('processEvents: early return', {
202
+ reason: state.isProcessing ? 'already processing' : state.pendingEvents.length === 0 ? 'no events' : 'stopping'
203
+ });
181
204
  return;
205
+ }
182
206
  state.isProcessing = true;
207
+ await debugLog('processEvents: set isProcessing=true', {});
183
208
  const events = [...state.pendingEvents];
184
209
  state.pendingEvents = [];
185
210
  state.debounceTimer = null;
@@ -199,7 +224,15 @@ class WatchManager {
199
224
  if (!this.incrementalParseHandler) {
200
225
  throw new Error('Incremental parse handler not configured');
201
226
  }
227
+ await debugLog('processEvents: calling incrementalParseHandler', {
228
+ projectPath: state.projectPath,
229
+ projectId: state.projectId
230
+ });
202
231
  const result = await this.incrementalParseHandler(state.projectPath, state.projectId, state.tsconfigPath);
232
+ await debugLog('processEvents: incrementalParseHandler returned', {
233
+ nodesUpdated: result.nodesUpdated,
234
+ edgesUpdated: result.edgesUpdated
235
+ });
203
236
  state.lastUpdateTime = new Date();
204
237
  const elapsedMs = Date.now() - startTime;
205
238
  this.sendNotification({
@@ -221,6 +254,7 @@ class WatchManager {
221
254
  }
222
255
  catch (error) {
223
256
  const errorMessage = error instanceof Error ? error.message : String(error);
257
+ await debugLog('processEvents: error caught', { error: errorMessage });
224
258
  this.sendNotification({
225
259
  type: 'incremental_parse_failed',
226
260
  projectId: state.projectId,
@@ -235,6 +269,7 @@ class WatchManager {
235
269
  }
236
270
  finally {
237
271
  state.isProcessing = false;
272
+ await debugLog('processEvents END', { projectId: state.projectId, isProcessing: state.isProcessing });
238
273
  }
239
274
  }
240
275
  /**
@@ -243,9 +278,14 @@ class WatchManager {
243
278
  handleWatcherError(state, error) {
244
279
  state.status = 'error';
245
280
  state.errorMessage = error instanceof Error ? error.message : String(error);
246
- debugLog('Watcher error', { projectId: state.projectId, error: state.errorMessage });
281
+ debugLog('handleWatcherError START', { projectId: state.projectId, error: state.errorMessage });
247
282
  // Clean up the failed watcher to prevent it from staying in error state indefinitely
248
- this.stopWatching(state.projectId).catch((cleanupError) => {
283
+ this.stopWatching(state.projectId)
284
+ .then(() => {
285
+ debugLog('handleWatcherError: cleanup succeeded', { projectId: state.projectId });
286
+ })
287
+ .catch((cleanupError) => {
288
+ debugLog('handleWatcherError: cleanup failed', { projectId: state.projectId, cleanupError: String(cleanupError) });
249
289
  console.error(`[WatchManager] Failed to cleanup errored watcher ${state.projectId}:`, cleanupError);
250
290
  });
251
291
  }
@@ -255,23 +295,33 @@ class WatchManager {
255
295
  * Promise is tracked on state to allow cleanup during stop
256
296
  */
257
297
  syncMissedChanges(state) {
258
- if (!this.incrementalParseHandler)
298
+ debugLog('syncMissedChanges START', { projectId: state.projectId });
299
+ if (!this.incrementalParseHandler) {
300
+ debugLog('syncMissedChanges: no handler, skipping', {});
259
301
  return;
302
+ }
260
303
  // Track the promise on state so stopWatching can wait for it
261
304
  state.syncPromise = this.incrementalParseHandler(state.projectPath, state.projectId, state.tsconfigPath)
262
305
  .then((result) => {
306
+ debugLog('syncMissedChanges: completed', {
307
+ projectId: state.projectId,
308
+ nodesUpdated: result.nodesUpdated,
309
+ edgesUpdated: result.edgesUpdated
310
+ });
263
311
  if (result.nodesUpdated > 0 || result.edgesUpdated > 0) {
264
312
  console.log(`[WatchManager] Synced missed changes for ${state.projectId}: ` +
265
313
  `${result.nodesUpdated} nodes, ${result.edgesUpdated} edges`);
266
314
  }
267
315
  })
268
316
  .catch((error) => {
317
+ debugLog('syncMissedChanges: error', { projectId: state.projectId, error: String(error), isStopping: state.isStopping });
269
318
  // Only log if watcher hasn't been stopped
270
319
  if (!state.isStopping) {
271
320
  console.error(`[WatchManager] Failed to sync missed changes for ${state.projectId}:`, error);
272
321
  }
273
322
  })
274
323
  .finally(() => {
324
+ debugLog('syncMissedChanges END', { projectId: state.projectId });
275
325
  state.syncPromise = undefined;
276
326
  });
277
327
  }
@@ -3,11 +3,25 @@
3
3
  * Simple test tool to verify MCP connection
4
4
  */
5
5
  import { TOOL_NAMES, TOOL_METADATA, MESSAGES } from '../constants.js';
6
- import { createSuccessResponse } from '../utils.js';
6
+ import { createSuccessResponse, debugLog } from '../utils.js';
7
+ import { logToolCallStart, logToolCallEnd } from './index.js';
7
8
  export const createHelloTool = (server) => {
8
9
  server.registerTool(TOOL_NAMES.hello, {
9
10
  title: TOOL_METADATA[TOOL_NAMES.hello].title,
10
11
  description: TOOL_METADATA[TOOL_NAMES.hello].description,
11
12
  inputSchema: {},
12
- }, async () => createSuccessResponse(MESSAGES.success.hello));
13
+ }, async () => {
14
+ const startTime = Date.now();
15
+ const callId = await logToolCallStart('hello');
16
+ try {
17
+ const result = createSuccessResponse(MESSAGES.success.hello);
18
+ await logToolCallEnd('hello', callId, true, Date.now() - startTime);
19
+ return result;
20
+ }
21
+ catch (error) {
22
+ await debugLog('Hello tool error', { error: String(error) });
23
+ await logToolCallEnd('hello', callId, false, Date.now() - startTime);
24
+ throw error;
25
+ }
26
+ });
13
27
  };
@@ -2,6 +2,7 @@
2
2
  * MCP Tool Factory
3
3
  * Centralized tool creation and registration
4
4
  */
5
+ import { debugLog } from '../utils.js';
5
6
  import { createCheckParseStatusTool } from './check-parse-status.tool.js';
6
7
  import { createDetectDeadCodeTool } from './detect-dead-code.tool.js';
7
8
  import { createDetectDuplicateCodeTool } from './detect-duplicate-code.tool.js';
@@ -14,8 +15,36 @@ import { createParseTypescriptProjectTool } from './parse-typescript-project.too
14
15
  import { createSearchCodebaseTool } from './search-codebase.tool.js';
15
16
  import { createStartWatchProjectTool } from './start-watch-project.tool.js';
16
17
  import { createStopWatchProjectTool } from './stop-watch-project.tool.js';
18
+ import { createSwarmCleanupTool } from './swarm-cleanup.tool.js';
19
+ import { createSwarmPheromoneTool } from './swarm-pheromone.tool.js';
20
+ import { createSwarmSenseTool } from './swarm-sense.tool.js';
17
21
  import { createTestNeo4jConnectionTool } from './test-neo4j-connection.tool.js';
18
22
  import { createTraverseFromNodeTool } from './traverse-from-node.tool.js';
23
+ // Track tool calls for debugging
24
+ let globalToolCallCount = 0;
25
+ /**
26
+ * Log tool call start (exported for use by individual tools)
27
+ */
28
+ export const logToolCallStart = async (toolName, params) => {
29
+ globalToolCallCount++;
30
+ const callId = globalToolCallCount;
31
+ await debugLog(`Tool call START: ${toolName}`, {
32
+ callId,
33
+ totalCalls: globalToolCallCount,
34
+ params: params ? JSON.stringify(params).substring(0, 500) : 'none',
35
+ });
36
+ return callId;
37
+ };
38
+ /**
39
+ * Log tool call end (exported for use by individual tools)
40
+ */
41
+ export const logToolCallEnd = async (toolName, callId, success, duration) => {
42
+ await debugLog(`Tool call END: ${toolName}`, {
43
+ callId,
44
+ success,
45
+ duration: duration ? `${duration}ms` : 'unknown',
46
+ });
47
+ };
19
48
  /**
20
49
  * Register all MCP tools with the server
21
50
  */
@@ -39,4 +68,8 @@ export const registerAllTools = (server) => {
39
68
  createStartWatchProjectTool(server);
40
69
  createStopWatchProjectTool(server);
41
70
  createListWatchersTool(server);
71
+ // Register swarm coordination tools
72
+ createSwarmPheromoneTool(server);
73
+ createSwarmSenseTool(server);
74
+ createSwarmCleanupTool(server);
42
75
  };
@@ -0,0 +1,157 @@
1
+ /**
2
+ * Swarm Cleanup Tool
3
+ * Bulk delete pheromones after a swarm completes
4
+ */
5
+ import { z } from 'zod';
6
+ import { Neo4jService } from '../../storage/neo4j/neo4j.service.js';
7
+ import { TOOL_NAMES, TOOL_METADATA } from '../constants.js';
8
+ import { createErrorResponse, createSuccessResponse, resolveProjectIdOrError, debugLog } from '../utils.js';
9
+ /**
10
+ * Neo4j query to delete pheromones by swarm ID
11
+ */
12
+ const CLEANUP_BY_SWARM_QUERY = `
13
+ MATCH (p:Pheromone)
14
+ WHERE p.projectId = $projectId
15
+ AND p.swarmId = $swarmId
16
+ AND NOT p.type IN $keepTypes
17
+ WITH p, p.agentId as agentId, p.type as type
18
+ DETACH DELETE p
19
+ RETURN count(p) as deleted, collect(DISTINCT agentId) as agents, collect(DISTINCT type) as types
20
+ `;
21
+ /**
22
+ * Neo4j query to delete pheromones by agent ID
23
+ */
24
+ const CLEANUP_BY_AGENT_QUERY = `
25
+ MATCH (p:Pheromone)
26
+ WHERE p.projectId = $projectId
27
+ AND p.agentId = $agentId
28
+ AND NOT p.type IN $keepTypes
29
+ WITH p, p.swarmId as swarmId, p.type as type
30
+ DETACH DELETE p
31
+ RETURN count(p) as deleted, collect(DISTINCT swarmId) as swarms, collect(DISTINCT type) as types
32
+ `;
33
+ /**
34
+ * Neo4j query to delete all pheromones in a project
35
+ */
36
+ const CLEANUP_ALL_QUERY = `
37
+ MATCH (p:Pheromone)
38
+ WHERE p.projectId = $projectId
39
+ AND NOT p.type IN $keepTypes
40
+ WITH p, p.agentId as agentId, p.swarmId as swarmId, p.type as type
41
+ DETACH DELETE p
42
+ RETURN count(p) as deleted, collect(DISTINCT agentId) as agents, collect(DISTINCT swarmId) as swarms, collect(DISTINCT type) as types
43
+ `;
44
+ /**
45
+ * Count queries for dry run
46
+ */
47
+ const COUNT_BY_SWARM_QUERY = `
48
+ MATCH (p:Pheromone)
49
+ WHERE p.projectId = $projectId AND p.swarmId = $swarmId AND NOT p.type IN $keepTypes
50
+ RETURN count(p) as count, collect(DISTINCT p.agentId) as agents, collect(DISTINCT p.type) as types
51
+ `;
52
+ const COUNT_BY_AGENT_QUERY = `
53
+ MATCH (p:Pheromone)
54
+ WHERE p.projectId = $projectId AND p.agentId = $agentId AND NOT p.type IN $keepTypes
55
+ RETURN count(p) as count, collect(DISTINCT p.swarmId) as swarms, collect(DISTINCT p.type) as types
56
+ `;
57
+ const COUNT_ALL_QUERY = `
58
+ MATCH (p:Pheromone)
59
+ WHERE p.projectId = $projectId AND NOT p.type IN $keepTypes
60
+ RETURN count(p) as count, collect(DISTINCT p.agentId) as agents, collect(DISTINCT p.swarmId) as swarms, collect(DISTINCT p.type) as types
61
+ `;
62
+ export const createSwarmCleanupTool = (server) => {
63
+ server.registerTool(TOOL_NAMES.swarmCleanup, {
64
+ title: TOOL_METADATA[TOOL_NAMES.swarmCleanup].title,
65
+ description: TOOL_METADATA[TOOL_NAMES.swarmCleanup].description,
66
+ inputSchema: {
67
+ projectId: z.string().describe('Project ID, name, or path'),
68
+ swarmId: z.string().optional().describe('Delete all pheromones from this swarm'),
69
+ agentId: z.string().optional().describe('Delete all pheromones from this agent'),
70
+ all: z.boolean().optional().default(false).describe('Delete ALL pheromones in project (use with caution)'),
71
+ keepTypes: z
72
+ .array(z.string())
73
+ .optional()
74
+ .default(['warning'])
75
+ .describe('Pheromone types to preserve (default: ["warning"])'),
76
+ dryRun: z.boolean().optional().default(false).describe('Preview what would be deleted without deleting'),
77
+ },
78
+ }, async ({ projectId, swarmId, agentId, all = false, keepTypes = ['warning'], dryRun = false }) => {
79
+ const neo4jService = new Neo4jService();
80
+ // Resolve project ID
81
+ const projectResult = await resolveProjectIdOrError(projectId, neo4jService);
82
+ if (!projectResult.success) {
83
+ await neo4jService.close();
84
+ return projectResult.error;
85
+ }
86
+ const resolvedProjectId = projectResult.projectId;
87
+ try {
88
+ // Validate: must specify swarmId, agentId, or all
89
+ if (!swarmId && !agentId && !all) {
90
+ return createErrorResponse('Must specify one of: swarmId, agentId, or all=true. Use dryRun=true to preview.');
91
+ }
92
+ await debugLog('Swarm cleanup operation', {
93
+ projectId: resolvedProjectId,
94
+ swarmId,
95
+ agentId,
96
+ all,
97
+ keepTypes,
98
+ dryRun,
99
+ });
100
+ const params = { projectId: resolvedProjectId, keepTypes };
101
+ let deleteQuery;
102
+ let countQuery;
103
+ let mode;
104
+ if (swarmId) {
105
+ params.swarmId = swarmId;
106
+ deleteQuery = CLEANUP_BY_SWARM_QUERY;
107
+ countQuery = COUNT_BY_SWARM_QUERY;
108
+ mode = 'swarm';
109
+ }
110
+ else if (agentId) {
111
+ params.agentId = agentId;
112
+ deleteQuery = CLEANUP_BY_AGENT_QUERY;
113
+ countQuery = COUNT_BY_AGENT_QUERY;
114
+ mode = 'agent';
115
+ }
116
+ else {
117
+ deleteQuery = CLEANUP_ALL_QUERY;
118
+ countQuery = COUNT_ALL_QUERY;
119
+ mode = 'all';
120
+ }
121
+ if (dryRun) {
122
+ const result = await neo4jService.run(countQuery, params);
123
+ const count = result[0]?.count ?? 0;
124
+ return createSuccessResponse(JSON.stringify({
125
+ success: true,
126
+ dryRun: true,
127
+ mode,
128
+ wouldDelete: typeof count === 'object' && 'toNumber' in count ? count.toNumber() : count,
129
+ agents: result[0]?.agents ?? [],
130
+ swarms: result[0]?.swarms ?? [],
131
+ types: result[0]?.types ?? [],
132
+ keepTypes,
133
+ projectId: resolvedProjectId,
134
+ }));
135
+ }
136
+ const result = await neo4jService.run(deleteQuery, params);
137
+ const deleted = result[0]?.deleted ?? 0;
138
+ return createSuccessResponse(JSON.stringify({
139
+ success: true,
140
+ mode,
141
+ deleted: typeof deleted === 'object' && 'toNumber' in deleted ? deleted.toNumber() : deleted,
142
+ agents: result[0]?.agents ?? [],
143
+ swarms: result[0]?.swarms ?? [],
144
+ types: result[0]?.types ?? [],
145
+ keepTypes,
146
+ projectId: resolvedProjectId,
147
+ }));
148
+ }
149
+ catch (error) {
150
+ await debugLog('Swarm cleanup error', { error: String(error) });
151
+ return createErrorResponse(error instanceof Error ? error : String(error));
152
+ }
153
+ finally {
154
+ await neo4jService.close();
155
+ }
156
+ });
157
+ };
@@ -0,0 +1,35 @@
1
+ /**
2
+ * Shared constants for swarm coordination tools
3
+ */
4
+ /**
5
+ * Pheromone types and their half-lives in milliseconds.
6
+ * Half-life determines decay rate - after one half-life, intensity drops to 50%.
7
+ */
8
+ export const PHEROMONE_CONFIG = {
9
+ exploring: { halfLife: 2 * 60 * 1000, description: 'Browsing/reading' },
10
+ modifying: { halfLife: 10 * 60 * 1000, description: 'Active work' },
11
+ claiming: { halfLife: 60 * 60 * 1000, description: 'Ownership' },
12
+ completed: { halfLife: 24 * 60 * 60 * 1000, description: 'Done' },
13
+ warning: { halfLife: -1, description: 'Never decays' },
14
+ blocked: { halfLife: 5 * 60 * 1000, description: 'Stuck' },
15
+ proposal: { halfLife: 60 * 60 * 1000, description: 'Awaiting approval' },
16
+ needs_review: { halfLife: 30 * 60 * 1000, description: 'Review requested' },
17
+ };
18
+ export const PHEROMONE_TYPES = Object.keys(PHEROMONE_CONFIG);
19
+ /**
20
+ * Get half-life for a pheromone type.
21
+ * Returns -1 for types that never decay (e.g., warning).
22
+ */
23
+ export const getHalfLife = (type) => {
24
+ return PHEROMONE_CONFIG[type]?.halfLife ?? PHEROMONE_CONFIG.exploring.halfLife;
25
+ };
26
+ /**
27
+ * Workflow states are mutually exclusive per agent+node.
28
+ * Setting one removes others in this group.
29
+ * Flags (warning, proposal, needs_review) can coexist with workflow states.
30
+ */
31
+ export const WORKFLOW_STATES = ['exploring', 'claiming', 'modifying', 'completed', 'blocked'];
32
+ /**
33
+ * Flags can coexist with workflow states.
34
+ */
35
+ export const FLAG_TYPES = ['warning', 'proposal', 'needs_review'];
@@ -0,0 +1,196 @@
1
+ /**
2
+ * Swarm Pheromone Tool
3
+ * Leave a pheromone marker on a code node for stigmergic coordination
4
+ */
5
+ import { z } from 'zod';
6
+ import { Neo4jService } from '../../storage/neo4j/neo4j.service.js';
7
+ import { TOOL_NAMES, TOOL_METADATA } from '../constants.js';
8
+ import { createErrorResponse, createSuccessResponse, resolveProjectIdOrError, debugLog } from '../utils.js';
9
+ import { PHEROMONE_TYPES, WORKFLOW_STATES, getHalfLife } from './swarm-constants.js';
10
+ /**
11
+ * Neo4j query to clean up other workflow states before setting a new one.
12
+ * Only runs for workflow state pheromones, not flags.
13
+ */
14
+ const CLEANUP_WORKFLOW_STATES_QUERY = `
15
+ MATCH (p:Pheromone)
16
+ WHERE p.projectId = $projectId
17
+ AND p.nodeId = $nodeId
18
+ AND p.agentId = $agentId
19
+ AND p.swarmId = $swarmId
20
+ AND p.type IN $workflowStates
21
+ AND p.type <> $newType
22
+ DETACH DELETE p
23
+ RETURN count(p) as cleaned
24
+ `;
25
+ /**
26
+ * Neo4j query to create or update a pheromone
27
+ */
28
+ const CREATE_PHEROMONE_QUERY = `
29
+ // Find the target code node (exclude other pheromones)
30
+ MATCH (target)
31
+ WHERE target.id = $nodeId
32
+ AND target.projectId = $projectId
33
+ AND NOT target:Pheromone
34
+ WITH target
35
+ LIMIT 1
36
+
37
+ // Create or update pheromone (scoped to project)
38
+ MERGE (p:Pheromone {projectId: $projectId, nodeId: $nodeId, agentId: $agentId, swarmId: $swarmId, type: $type})
39
+ ON CREATE SET
40
+ p.id = randomUUID(),
41
+ p.intensity = $intensity,
42
+ p.timestamp = timestamp(),
43
+ p.data = $data,
44
+ p.halfLife = $halfLife
45
+ ON MATCH SET
46
+ p.intensity = $intensity,
47
+ p.timestamp = timestamp(),
48
+ p.data = $data
49
+
50
+ // Create relationship to target node if it exists
51
+ WITH p, target
52
+ WHERE target IS NOT NULL
53
+ MERGE (p)-[:MARKS]->(target)
54
+
55
+ RETURN p.id as id, p.nodeId as nodeId, p.projectId as projectId, p.type as type, p.intensity as intensity,
56
+ p.timestamp as timestamp, p.agentId as agentId, p.swarmId as swarmId,
57
+ CASE WHEN target IS NOT NULL THEN true ELSE false END as linkedToNode
58
+ `;
59
+ /**
60
+ * Neo4j query to delete a pheromone
61
+ */
62
+ const DELETE_PHEROMONE_QUERY = `
63
+ MATCH (p:Pheromone {projectId: $projectId, nodeId: $nodeId, agentId: $agentId, swarmId: $swarmId, type: $type})
64
+ DETACH DELETE p
65
+ RETURN count(p) as deleted
66
+ `;
67
+ export const createSwarmPheromoneTool = (server) => {
68
+ server.registerTool(TOOL_NAMES.swarmPheromone, {
69
+ title: TOOL_METADATA[TOOL_NAMES.swarmPheromone].title,
70
+ description: TOOL_METADATA[TOOL_NAMES.swarmPheromone].description,
71
+ inputSchema: {
72
+ projectId: z.string().describe('Project ID, name, or path (e.g., "backend" or "proj_a1b2c3d4e5f6")'),
73
+ nodeId: z.string().describe('The code node ID to mark with a pheromone'),
74
+ type: z
75
+ .enum(PHEROMONE_TYPES)
76
+ .describe('Type of pheromone: exploring (browsing), modifying (active work), claiming (ownership), completed (done), warning (danger), blocked (stuck), proposal (awaiting approval), needs_review (review request)'),
77
+ intensity: z
78
+ .number()
79
+ .min(0)
80
+ .max(1)
81
+ .optional()
82
+ .default(1.0)
83
+ .describe('Pheromone intensity from 0.0 to 1.0 (default: 1.0)'),
84
+ agentId: z.string().describe('Unique identifier for the agent leaving the pheromone'),
85
+ swarmId: z.string().describe('Swarm ID for grouping related agents (e.g., "swarm_xyz")'),
86
+ data: z
87
+ .record(z.unknown())
88
+ .optional()
89
+ .describe('Optional metadata to attach to the pheromone (e.g., summary, reason)'),
90
+ remove: z
91
+ .boolean()
92
+ .optional()
93
+ .default(false)
94
+ .describe('If true, removes the pheromone instead of creating/updating it'),
95
+ },
96
+ }, async ({ projectId, nodeId, type, intensity = 1.0, agentId, swarmId, data, remove = false }) => {
97
+ const neo4jService = new Neo4jService();
98
+ // Resolve project ID
99
+ const projectResult = await resolveProjectIdOrError(projectId, neo4jService);
100
+ if (!projectResult.success) {
101
+ await neo4jService.close();
102
+ return projectResult.error;
103
+ }
104
+ const resolvedProjectId = projectResult.projectId;
105
+ try {
106
+ if (remove) {
107
+ const result = await neo4jService.run(DELETE_PHEROMONE_QUERY, {
108
+ projectId: resolvedProjectId,
109
+ nodeId,
110
+ agentId,
111
+ swarmId,
112
+ type,
113
+ });
114
+ const deleted = result[0]?.deleted ?? 0;
115
+ if (deleted > 0) {
116
+ return createSuccessResponse(JSON.stringify({
117
+ success: true,
118
+ action: 'removed',
119
+ projectId: resolvedProjectId,
120
+ nodeId,
121
+ type,
122
+ agentId,
123
+ swarmId,
124
+ }));
125
+ }
126
+ else {
127
+ return createSuccessResponse(JSON.stringify({
128
+ success: true,
129
+ action: 'not_found',
130
+ message: 'No matching pheromone found to remove',
131
+ projectId: resolvedProjectId,
132
+ nodeId,
133
+ type,
134
+ agentId,
135
+ swarmId,
136
+ }));
137
+ }
138
+ }
139
+ // Create or update pheromone
140
+ const halfLife = getHalfLife(type);
141
+ const dataJson = data ? JSON.stringify(data) : null;
142
+ // If setting a workflow state, clean up other workflow states first
143
+ let cleanedStates = 0;
144
+ if (WORKFLOW_STATES.includes(type)) {
145
+ const cleanupResult = await neo4jService.run(CLEANUP_WORKFLOW_STATES_QUERY, {
146
+ projectId: resolvedProjectId,
147
+ nodeId,
148
+ agentId,
149
+ swarmId,
150
+ workflowStates: WORKFLOW_STATES,
151
+ newType: type,
152
+ });
153
+ cleanedStates = cleanupResult[0]?.cleaned ?? 0;
154
+ }
155
+ const result = await neo4jService.run(CREATE_PHEROMONE_QUERY, {
156
+ projectId: resolvedProjectId,
157
+ nodeId,
158
+ type,
159
+ intensity,
160
+ agentId,
161
+ swarmId,
162
+ data: dataJson,
163
+ halfLife,
164
+ });
165
+ if (result.length === 0) {
166
+ return createErrorResponse(`Failed to create pheromone. Node ${nodeId} may not exist in the graph.`);
167
+ }
168
+ const pheromone = result[0];
169
+ return createSuccessResponse(JSON.stringify({
170
+ success: true,
171
+ action: cleanedStates > 0 ? 'transitioned' : 'created',
172
+ previousStatesRemoved: cleanedStates,
173
+ pheromone: {
174
+ id: pheromone.id,
175
+ projectId: pheromone.projectId,
176
+ nodeId: pheromone.nodeId,
177
+ type: pheromone.type,
178
+ intensity: pheromone.intensity,
179
+ agentId: pheromone.agentId,
180
+ swarmId: pheromone.swarmId,
181
+ timestamp: pheromone.timestamp,
182
+ linkedToNode: pheromone.linkedToNode,
183
+ halfLifeMs: halfLife,
184
+ expiresIn: halfLife < 0 ? 'never' : `${Math.round(halfLife / 60000)} minutes`,
185
+ },
186
+ }));
187
+ }
188
+ catch (error) {
189
+ await debugLog('Swarm pheromone error', { error: String(error) });
190
+ return createErrorResponse(error instanceof Error ? error : String(error));
191
+ }
192
+ finally {
193
+ await neo4jService.close();
194
+ }
195
+ });
196
+ };
@@ -0,0 +1,212 @@
1
+ /**
2
+ * Swarm Sense Tool
3
+ * Query pheromones in the code graph for stigmergic coordination
4
+ */
5
+ import { z } from 'zod';
6
+ import { Neo4jService } from '../../storage/neo4j/neo4j.service.js';
7
+ import { TOOL_NAMES, TOOL_METADATA } from '../constants.js';
8
+ import { createErrorResponse, createSuccessResponse, resolveProjectIdOrError, debugLog } from '../utils.js';
9
+ import { PHEROMONE_TYPES } from './swarm-constants.js';
10
+ /**
11
+ * Neo4j query to sense pheromones with decay calculation
12
+ * Uses nodeId-based matching (self-healing) instead of [:MARKS] relationship
13
+ * This survives graph rebuilds since nodeIds are deterministic
14
+ */
15
+ const SENSE_PHEROMONES_QUERY = `
16
+ // Match pheromones scoped to project, optionally filtering by type
17
+ MATCH (p:Pheromone)
18
+ WHERE p.projectId = $projectId
19
+ AND ($types IS NULL OR size($types) = 0 OR p.type IN $types)
20
+ AND ($nodeIds IS NULL OR size($nodeIds) = 0 OR p.nodeId IN $nodeIds)
21
+ AND ($agentIds IS NULL OR size($agentIds) = 0 OR p.agentId IN $agentIds)
22
+ AND ($swarmId IS NULL OR p.swarmId = $swarmId)
23
+ AND ($excludeAgentId IS NULL OR p.agentId <> $excludeAgentId)
24
+
25
+ // Calculate current intensity with exponential decay
26
+ WITH p,
27
+ CASE
28
+ WHEN p.halfLife IS NULL OR p.halfLife <= 0 THEN p.intensity
29
+ ELSE p.intensity * exp(-0.693147 * (timestamp() - p.timestamp) / p.halfLife)
30
+ END AS currentIntensity
31
+
32
+ // Filter by minimum intensity
33
+ WHERE currentIntensity >= $minIntensity
34
+
35
+ // Find target by nodeId (self-healing - survives graph rebuilds)
36
+ OPTIONAL MATCH (target)
37
+ WHERE target.id = p.nodeId AND target.projectId = p.projectId
38
+
39
+ // Return pheromone data
40
+ RETURN
41
+ p.id AS id,
42
+ p.projectId AS projectId,
43
+ p.nodeId AS nodeId,
44
+ p.type AS type,
45
+ p.intensity AS originalIntensity,
46
+ currentIntensity,
47
+ p.agentId AS agentId,
48
+ p.swarmId AS swarmId,
49
+ p.timestamp AS timestamp,
50
+ p.data AS data,
51
+ p.halfLife AS halfLifeMs,
52
+ CASE WHEN target IS NOT NULL THEN labels(target)[0] ELSE null END AS targetType,
53
+ CASE WHEN target IS NOT NULL THEN target.name ELSE null END AS targetName,
54
+ CASE WHEN target IS NOT NULL THEN target.filePath ELSE null END AS targetFilePath
55
+
56
+ ORDER BY currentIntensity DESC, p.timestamp DESC
57
+ LIMIT toInteger($limit)
58
+ `;
59
+ /**
60
+ * Neo4j query to get pheromone summary statistics
61
+ */
62
+ const PHEROMONE_STATS_QUERY = `
63
+ MATCH (p:Pheromone)
64
+ WHERE p.projectId = $projectId
65
+ WITH p,
66
+ CASE
67
+ WHEN p.halfLife IS NULL OR p.halfLife <= 0 THEN p.intensity
68
+ ELSE p.intensity * exp(-0.693147 * (timestamp() - p.timestamp) / p.halfLife)
69
+ END AS currentIntensity
70
+ WHERE currentIntensity >= $minIntensity
71
+
72
+ RETURN
73
+ p.type AS type,
74
+ count(p) AS count,
75
+ avg(currentIntensity) AS avgIntensity,
76
+ collect(DISTINCT p.agentId) AS agents
77
+ ORDER BY count DESC
78
+ `;
79
+ /**
80
+ * Neo4j query to clean up fully decayed pheromones for a project
81
+ */
82
+ const CLEANUP_DECAYED_QUERY = `
83
+ MATCH (p:Pheromone)
84
+ WHERE p.projectId = $projectId
85
+ AND p.halfLife IS NOT NULL
86
+ AND p.halfLife > 0
87
+ AND p.intensity * exp(-0.693147 * (timestamp() - p.timestamp) / p.halfLife) < 0.01
88
+ DETACH DELETE p
89
+ RETURN count(p) AS cleaned
90
+ `;
91
+ export const createSwarmSenseTool = (server) => {
92
+ server.registerTool(TOOL_NAMES.swarmSense, {
93
+ title: TOOL_METADATA[TOOL_NAMES.swarmSense].title,
94
+ description: TOOL_METADATA[TOOL_NAMES.swarmSense].description,
95
+ inputSchema: {
96
+ projectId: z.string().describe('Project ID, name, or path (e.g., "backend" or "proj_a1b2c3d4e5f6")'),
97
+ types: z
98
+ .array(z.enum(PHEROMONE_TYPES))
99
+ .optional()
100
+ .describe('Filter by pheromone types. If empty, returns all types. Options: exploring, modifying, claiming, completed, warning, blocked, proposal, needs_review'),
101
+ nodeIds: z.array(z.string()).optional().describe('Filter by specific node IDs. If empty, searches all nodes.'),
102
+ agentIds: z
103
+ .array(z.string())
104
+ .optional()
105
+ .describe('Filter by specific agent IDs. If empty, returns pheromones from all agents.'),
106
+ swarmId: z.string().optional().describe('Filter by swarm ID. If empty, returns pheromones from all swarms.'),
107
+ excludeAgentId: z
108
+ .string()
109
+ .optional()
110
+ .describe('Exclude pheromones from this agent ID (useful for seeing what OTHER agents are doing)'),
111
+ minIntensity: z
112
+ .number()
113
+ .min(0)
114
+ .max(1)
115
+ .optional()
116
+ .default(0.3)
117
+ .describe('Minimum effective intensity after decay (0.0-1.0, default: 0.3)'),
118
+ limit: z
119
+ .number()
120
+ .int()
121
+ .min(1)
122
+ .max(500)
123
+ .optional()
124
+ .default(50)
125
+ .describe('Maximum number of pheromones to return (default: 50, max: 500)'),
126
+ includeStats: z.boolean().optional().default(false).describe('Include summary statistics by pheromone type'),
127
+ cleanup: z
128
+ .boolean()
129
+ .optional()
130
+ .default(false)
131
+ .describe('Run cleanup of fully decayed pheromones (intensity < 0.01)'),
132
+ },
133
+ }, async ({ projectId, types, nodeIds, agentIds, swarmId, excludeAgentId, minIntensity = 0.3, limit = 50, includeStats = false, cleanup = false, }) => {
134
+ const neo4jService = new Neo4jService();
135
+ // Resolve project ID
136
+ const projectResult = await resolveProjectIdOrError(projectId, neo4jService);
137
+ if (!projectResult.success) {
138
+ await neo4jService.close();
139
+ return projectResult.error;
140
+ }
141
+ const resolvedProjectId = projectResult.projectId;
142
+ try {
143
+ const result = {
144
+ pheromones: [],
145
+ projectId: resolvedProjectId,
146
+ query: {
147
+ types: types ?? null,
148
+ minIntensity,
149
+ limit,
150
+ },
151
+ };
152
+ // Run cleanup if requested
153
+ if (cleanup) {
154
+ const cleanupResult = await neo4jService.run(CLEANUP_DECAYED_QUERY, { projectId: resolvedProjectId });
155
+ result.cleaned = cleanupResult[0]?.cleaned ?? 0;
156
+ }
157
+ // Query pheromones (ensure limit is integer for Neo4j LIMIT clause)
158
+ const pheromones = await neo4jService.run(SENSE_PHEROMONES_QUERY, {
159
+ projectId: resolvedProjectId,
160
+ types: types ?? null,
161
+ nodeIds: nodeIds ?? null,
162
+ agentIds: agentIds ?? null,
163
+ swarmId: swarmId ?? null,
164
+ excludeAgentId: excludeAgentId ?? null,
165
+ minIntensity,
166
+ limit: Math.floor(limit),
167
+ });
168
+ result.pheromones = pheromones.map((p) => {
169
+ // Convert Neo4j Integer to JS number
170
+ const ts = typeof p.timestamp === 'object' && p.timestamp?.toNumber ? p.timestamp.toNumber() : p.timestamp;
171
+ return {
172
+ id: p.id,
173
+ projectId: p.projectId,
174
+ nodeId: p.nodeId,
175
+ type: p.type,
176
+ intensity: Math.round(p.currentIntensity * 1000) / 1000, // Round to 3 decimals
177
+ originalIntensity: p.originalIntensity,
178
+ agentId: p.agentId,
179
+ swarmId: p.swarmId,
180
+ timestamp: ts,
181
+ age: ts ? `${Math.round((Date.now() - ts) / 1000)}s ago` : null,
182
+ data: p.data ? JSON.parse(p.data) : null,
183
+ target: p.targetType
184
+ ? {
185
+ type: p.targetType,
186
+ name: p.targetName,
187
+ filePath: p.targetFilePath,
188
+ }
189
+ : null,
190
+ };
191
+ });
192
+ // Include stats if requested
193
+ if (includeStats) {
194
+ const stats = await neo4jService.run(PHEROMONE_STATS_QUERY, { projectId: resolvedProjectId, minIntensity });
195
+ result.stats = stats.map((s) => ({
196
+ type: s.type,
197
+ count: typeof s.count === 'object' ? s.count.toNumber() : s.count,
198
+ avgIntensity: Math.round(s.avgIntensity * 1000) / 1000,
199
+ activeAgents: s.agents,
200
+ }));
201
+ }
202
+ return createSuccessResponse(JSON.stringify(result, null, 2));
203
+ }
204
+ catch (error) {
205
+ await debugLog('Swarm sense error', { error: String(error) });
206
+ return createErrorResponse(error instanceof Error ? error : String(error));
207
+ }
208
+ finally {
209
+ await neo4jService.close();
210
+ }
211
+ });
212
+ };
@@ -215,15 +215,19 @@ export const QUERIES = {
215
215
  // The previous query (WHERE startNode(r) IS NULL OR endNode(r) IS NULL) could never match anything
216
216
  // Get existing nodes (excluding files being reparsed) for edge target matching
217
217
  // Returns minimal info needed for edge detection: id, name, coreType, semanticType
218
+ // NOTE: Using property-based query instead of path traversal to avoid Cartesian explosion
219
+ // The old query `MATCH (sf:SourceFile)-[*]->(n)` caused OOM with large graphs
218
220
  GET_EXISTING_NODES_FOR_EDGE_DETECTION: `
219
- MATCH (sf:SourceFile)-[*]->(n)
220
- WHERE NOT sf.filePath IN $excludeFilePaths AND sf.projectId = $projectId
221
- RETURN n.id AS id,
221
+ MATCH (n)
222
+ WHERE n.projectId = $projectId
223
+ AND n.filePath IS NOT NULL
224
+ AND NOT n.filePath IN $excludeFilePaths
225
+ RETURN DISTINCT n.id AS id,
222
226
  n.name AS name,
223
227
  n.coreType AS coreType,
224
228
  n.semanticType AS semanticType,
225
229
  labels(n) AS labels,
226
- sf.filePath AS filePath
230
+ n.filePath AS filePath
227
231
  `,
228
232
  EXPLORE_ALL_CONNECTIONS: (maxDepth = MAX_TRAVERSAL_DEPTH, direction = 'BOTH', relationshipTypes) => {
229
233
  const safeMaxDepth = Math.min(Math.max(maxDepth, 1), MAX_TRAVERSAL_DEPTH);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "code-graph-context",
3
- "version": "2.2.0",
3
+ "version": "2.3.0",
4
4
  "description": "MCP server that builds code graphs to provide rich context to LLMs",
5
5
  "type": "module",
6
6
  "homepage": "https://github.com/drewdrewH/code-graph-context#readme",