code-graph-context 2.0.1 ā 2.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +221 -2
- package/dist/constants.js +167 -0
- package/dist/core/config/fairsquare-framework-schema.js +9 -7
- package/dist/core/config/schema.js +41 -2
- package/dist/core/embeddings/natural-language-to-cypher.service.js +166 -110
- package/dist/core/parsers/typescript-parser.js +1039 -742
- package/dist/core/parsers/workspace-parser.js +175 -193
- package/dist/core/utils/code-normalizer.js +299 -0
- package/dist/core/utils/file-change-detection.js +17 -2
- package/dist/core/utils/file-utils.js +40 -5
- package/dist/core/utils/graph-factory.js +161 -0
- package/dist/core/utils/shared-utils.js +79 -0
- package/dist/core/workspace/workspace-detector.js +59 -5
- package/dist/mcp/constants.js +261 -8
- package/dist/mcp/handlers/graph-generator.handler.js +1 -0
- package/dist/mcp/handlers/incremental-parse.handler.js +22 -6
- package/dist/mcp/handlers/parallel-import.handler.js +136 -0
- package/dist/mcp/handlers/streaming-import.handler.js +14 -59
- package/dist/mcp/mcp.server.js +77 -2
- package/dist/mcp/services/job-manager.js +5 -8
- package/dist/mcp/services/watch-manager.js +64 -25
- package/dist/mcp/tools/detect-dead-code.tool.js +413 -0
- package/dist/mcp/tools/detect-duplicate-code.tool.js +450 -0
- package/dist/mcp/tools/hello.tool.js +16 -2
- package/dist/mcp/tools/impact-analysis.tool.js +20 -4
- package/dist/mcp/tools/index.js +37 -0
- package/dist/mcp/tools/parse-typescript-project.tool.js +15 -14
- package/dist/mcp/tools/swarm-cleanup.tool.js +157 -0
- package/dist/mcp/tools/swarm-constants.js +35 -0
- package/dist/mcp/tools/swarm-pheromone.tool.js +196 -0
- package/dist/mcp/tools/swarm-sense.tool.js +212 -0
- package/dist/mcp/workers/chunk-worker-pool.js +196 -0
- package/dist/mcp/workers/chunk-worker.types.js +4 -0
- package/dist/mcp/workers/chunk.worker.js +89 -0
- package/dist/mcp/workers/parse-coordinator.js +183 -0
- package/dist/mcp/workers/worker.pool.js +54 -0
- package/dist/storage/neo4j/neo4j.service.js +198 -14
- package/package.json +1 -1
|
@@ -33,36 +33,33 @@ export class StreamingImportHandler {
|
|
|
33
33
|
if (config.onProgress) {
|
|
34
34
|
this.progressReporter.setCallback(config.onProgress);
|
|
35
35
|
}
|
|
36
|
-
// Set project ID on graph generator
|
|
37
|
-
this.graphGeneratorHandler.setProjectId(config.projectId);
|
|
38
|
-
// Phase 1: Get discovered files (already discovered by worker, this returns cached result)
|
|
39
36
|
const allFilePaths = await parser.discoverSourceFiles();
|
|
40
|
-
console.log(`š Found ${allFilePaths.length} files to parse`);
|
|
41
37
|
await debugLog('Streaming import started', {
|
|
42
38
|
totalFiles: allFilePaths.length,
|
|
43
39
|
chunkSize: config.chunkSize,
|
|
44
40
|
});
|
|
45
|
-
|
|
41
|
+
this.progressReporter.report({
|
|
42
|
+
phase: 'parsing',
|
|
43
|
+
current: 0,
|
|
44
|
+
total: allFilePaths.length,
|
|
45
|
+
message: `Starting streaming import of ${allFilePaths.length} files in chunks of ~${config.chunkSize}`,
|
|
46
|
+
});
|
|
46
47
|
const chunks = [];
|
|
47
48
|
for (let i = 0; i < allFilePaths.length; i += config.chunkSize) {
|
|
48
49
|
chunks.push(allFilePaths.slice(i, i + config.chunkSize));
|
|
49
50
|
}
|
|
50
|
-
console.log(`š¦ Split into ${chunks.length} chunks of ~${config.chunkSize} files each`);
|
|
51
51
|
let totalNodesImported = 0;
|
|
52
52
|
let totalEdgesImported = 0;
|
|
53
|
-
// Phase 2: Parse and import chunks
|
|
54
53
|
for (let chunkIndex = 0; chunkIndex < chunks.length; chunkIndex++) {
|
|
55
54
|
const chunk = chunks[chunkIndex];
|
|
56
55
|
const filesProcessed = chunkIndex * config.chunkSize + chunk.length;
|
|
57
|
-
console.log(`\nš Processing chunk ${chunkIndex + 1}/${chunks.length} (${chunk.length} files)`);
|
|
58
56
|
try {
|
|
59
|
-
//
|
|
57
|
+
// Skip edge resolution during chunk parsing - resolve after all chunks complete
|
|
60
58
|
const { nodes, edges } = await parser.parseChunk(chunk, true);
|
|
61
|
-
//
|
|
59
|
+
// Accumulate nodes for cross-chunk edge resolution
|
|
62
60
|
parser.addExistingNodesFromChunk(nodes);
|
|
63
|
-
// Import to Neo4j if we have data
|
|
64
61
|
if (nodes.length > 0 || edges.length > 0) {
|
|
65
|
-
await debugLog('Importing chunk
|
|
62
|
+
await debugLog('Importing chunk', {
|
|
66
63
|
chunkIndex: chunkIndex + 1,
|
|
67
64
|
totalChunks: chunks.length,
|
|
68
65
|
nodeCount: nodes.length,
|
|
@@ -72,14 +69,12 @@ export class StreamingImportHandler {
|
|
|
72
69
|
totalEdgesImported += edges.length;
|
|
73
70
|
}
|
|
74
71
|
else {
|
|
75
|
-
console.warn(`ā ļø Chunk ${chunkIndex + 1} produced 0 nodes/edges from ${chunk.length} files`);
|
|
76
72
|
await debugLog('Empty chunk result', {
|
|
77
73
|
chunkIndex: chunkIndex + 1,
|
|
78
74
|
fileCount: chunk.length,
|
|
79
75
|
sampleFiles: chunk.slice(0, 3),
|
|
80
76
|
});
|
|
81
77
|
}
|
|
82
|
-
// Report progress with all relevant data
|
|
83
78
|
await this.progressReporter.report({
|
|
84
79
|
phase: 'importing',
|
|
85
80
|
current: filesProcessed,
|
|
@@ -94,10 +89,8 @@ export class StreamingImportHandler {
|
|
|
94
89
|
totalChunks: chunks.length,
|
|
95
90
|
},
|
|
96
91
|
});
|
|
97
|
-
console.log(`ā
Chunk ${chunkIndex + 1}: ${nodes.length} nodes, ${edges.length} edges imported`);
|
|
98
92
|
}
|
|
99
93
|
catch (chunkError) {
|
|
100
|
-
console.error(`ā Error processing chunk ${chunkIndex + 1}:`, chunkError);
|
|
101
94
|
await debugLog('Chunk processing error', {
|
|
102
95
|
chunkIndex: chunkIndex + 1,
|
|
103
96
|
fileCount: chunk.length,
|
|
@@ -105,40 +98,24 @@ export class StreamingImportHandler {
|
|
|
105
98
|
error: chunkError instanceof Error ? chunkError.message : String(chunkError),
|
|
106
99
|
stack: chunkError instanceof Error ? chunkError.stack : undefined,
|
|
107
100
|
});
|
|
108
|
-
// Re-throw to fail the entire import - don't silently continue
|
|
109
101
|
throw chunkError;
|
|
110
102
|
}
|
|
111
|
-
// Note: Don't clear parsed data during streaming - we need accumulated nodes for cross-chunk edge resolution
|
|
112
|
-
// Memory usage is bounded because we only keep Neo4jNode references (not full AST)
|
|
113
103
|
}
|
|
114
|
-
// Phase 3: Resolve cross-chunk deferred edges
|
|
115
104
|
await this.progressReporter.reportResolving(0, totalEdgesImported);
|
|
116
|
-
|
|
117
|
-
const resolvedEdges = await parser.resolveDeferredEdgesManually();
|
|
105
|
+
const resolvedEdges = await parser.resolveDeferredEdges();
|
|
118
106
|
if (resolvedEdges.length > 0) {
|
|
119
107
|
await this.importEdgesToNeo4j(resolvedEdges);
|
|
120
108
|
totalEdgesImported += resolvedEdges.length;
|
|
121
|
-
|
|
122
|
-
}
|
|
123
|
-
else {
|
|
124
|
-
console.log('ā¹ļø No cross-chunk edges to resolve');
|
|
109
|
+
await debugLog(`Resolved ${resolvedEdges.length} cross-chunk edges`);
|
|
125
110
|
}
|
|
126
|
-
// Phase 3b: Apply edge enhancements on all accumulated nodes
|
|
127
|
-
// This catches context-dependent edges (like INTERNAL_API_CALL) that span chunks
|
|
128
|
-
console.log('\nš Applying edge enhancements on all nodes...');
|
|
129
111
|
const enhancedEdges = await parser.applyEdgeEnhancementsManually();
|
|
130
112
|
if (enhancedEdges.length > 0) {
|
|
131
113
|
await this.importEdgesToNeo4j(enhancedEdges);
|
|
132
114
|
totalEdgesImported += enhancedEdges.length;
|
|
133
|
-
|
|
115
|
+
await debugLog(`Created ${enhancedEdges.length} edges from edge enhancements`);
|
|
134
116
|
}
|
|
135
|
-
else {
|
|
136
|
-
console.log('ā¹ļø No edges from edge enhancements');
|
|
137
|
-
}
|
|
138
|
-
// Clear accumulated data now that edge resolution is complete
|
|
139
117
|
parser.clearParsedData();
|
|
140
118
|
await this.progressReporter.reportResolving(resolvedEdges.length, resolvedEdges.length);
|
|
141
|
-
// Phase 4: Complete
|
|
142
119
|
const elapsedMs = Date.now() - startTime;
|
|
143
120
|
await this.progressReporter.reportComplete(totalNodesImported, totalEdgesImported);
|
|
144
121
|
const result = {
|
|
@@ -148,32 +125,17 @@ export class StreamingImportHandler {
|
|
|
148
125
|
chunksProcessed: chunks.length,
|
|
149
126
|
elapsedMs,
|
|
150
127
|
};
|
|
151
|
-
console.log(`\nš Streaming import complete!`);
|
|
152
|
-
console.log(` Files: ${allFilePaths.length}`);
|
|
153
|
-
console.log(` Nodes: ${totalNodesImported}`);
|
|
154
|
-
console.log(` Edges: ${totalEdgesImported}`);
|
|
155
|
-
console.log(` Time: ${(elapsedMs / 1000).toFixed(2)}s`);
|
|
156
128
|
await debugLog('Streaming import completed', result);
|
|
157
129
|
return result;
|
|
158
130
|
}
|
|
159
|
-
/**
|
|
160
|
-
* Import a chunk of nodes and edges to Neo4j using the graph generator handler
|
|
161
|
-
*/
|
|
162
131
|
async importChunkToNeo4j(nodes, edges) {
|
|
163
|
-
// Write to temporary JSON and use existing import mechanism
|
|
164
|
-
// This reuses the batched embedding and import logic
|
|
165
132
|
const tempPath = generateTempPath('chunk');
|
|
166
133
|
const fs = await import('fs/promises');
|
|
167
134
|
try {
|
|
168
|
-
await fs.writeFile(tempPath, JSON.stringify({
|
|
169
|
-
nodes,
|
|
170
|
-
edges,
|
|
171
|
-
metadata: { chunked: true },
|
|
172
|
-
}));
|
|
135
|
+
await fs.writeFile(tempPath, JSON.stringify({ nodes, edges, metadata: { chunked: true } }));
|
|
173
136
|
await this.graphGeneratorHandler.generateGraph(tempPath, DEFAULTS.batchSize, false);
|
|
174
137
|
}
|
|
175
138
|
finally {
|
|
176
|
-
// Clean up temp file
|
|
177
139
|
try {
|
|
178
140
|
await fs.unlink(tempPath);
|
|
179
141
|
}
|
|
@@ -182,20 +144,13 @@ export class StreamingImportHandler {
|
|
|
182
144
|
}
|
|
183
145
|
}
|
|
184
146
|
}
|
|
185
|
-
/**
|
|
186
|
-
* Import resolved edges to Neo4j
|
|
187
|
-
*/
|
|
188
147
|
async importEdgesToNeo4j(edges) {
|
|
189
148
|
if (edges.length === 0)
|
|
190
149
|
return;
|
|
191
150
|
const tempPath = generateTempPath('edges');
|
|
192
151
|
const fs = await import('fs/promises');
|
|
193
152
|
try {
|
|
194
|
-
await fs.writeFile(tempPath, JSON.stringify({
|
|
195
|
-
nodes: [],
|
|
196
|
-
edges,
|
|
197
|
-
metadata: { edgesOnly: true },
|
|
198
|
-
}));
|
|
153
|
+
await fs.writeFile(tempPath, JSON.stringify({ nodes: [], edges, metadata: { edgesOnly: true } }));
|
|
199
154
|
await this.graphGeneratorHandler.generateGraph(tempPath, DEFAULTS.batchSize, false);
|
|
200
155
|
}
|
|
201
156
|
finally {
|
package/dist/mcp/mcp.server.js
CHANGED
|
@@ -18,15 +18,38 @@ import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
|
|
|
18
18
|
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
|
|
19
19
|
import { MCP_SERVER_CONFIG, MESSAGES } from './constants.js';
|
|
20
20
|
import { performIncrementalParse } from './handlers/incremental-parse.handler.js';
|
|
21
|
-
import { watchManager } from './services/watch-manager.js';
|
|
22
21
|
import { initializeServices } from './service-init.js';
|
|
22
|
+
import { watchManager } from './services/watch-manager.js';
|
|
23
23
|
import { registerAllTools } from './tools/index.js';
|
|
24
24
|
import { debugLog } from './utils.js';
|
|
25
|
+
// Track server state for debugging
|
|
26
|
+
let serverStartTime;
|
|
27
|
+
let toolCallCount = 0;
|
|
28
|
+
let lastToolCall = null;
|
|
29
|
+
/**
|
|
30
|
+
* Log memory usage and server stats
|
|
31
|
+
*/
|
|
32
|
+
const logServerStats = async (context) => {
|
|
33
|
+
const mem = process.memoryUsage();
|
|
34
|
+
await debugLog(`Server stats [${context}]`, {
|
|
35
|
+
uptime: serverStartTime ? `${Math.round((Date.now() - serverStartTime.getTime()) / 1000)}s` : 'not started',
|
|
36
|
+
toolCallCount,
|
|
37
|
+
lastToolCall,
|
|
38
|
+
memory: {
|
|
39
|
+
heapUsed: `${Math.round(mem.heapUsed / 1024 / 1024)}MB`,
|
|
40
|
+
heapTotal: `${Math.round(mem.heapTotal / 1024 / 1024)}MB`,
|
|
41
|
+
rss: `${Math.round(mem.rss / 1024 / 1024)}MB`,
|
|
42
|
+
},
|
|
43
|
+
pid: process.pid,
|
|
44
|
+
});
|
|
45
|
+
};
|
|
25
46
|
/**
|
|
26
47
|
* Main server initialization and startup
|
|
27
48
|
*/
|
|
28
49
|
const startServer = async () => {
|
|
50
|
+
serverStartTime = new Date();
|
|
29
51
|
console.error(JSON.stringify({ level: 'info', message: MESSAGES.server.starting }));
|
|
52
|
+
await debugLog('Server starting', { pid: process.pid, startTime: serverStartTime.toISOString() });
|
|
30
53
|
// Create MCP server instance
|
|
31
54
|
const server = new McpServer({
|
|
32
55
|
name: MCP_SERVER_CONFIG.name,
|
|
@@ -34,6 +57,7 @@ const startServer = async () => {
|
|
|
34
57
|
});
|
|
35
58
|
// Register all tools
|
|
36
59
|
registerAllTools(server);
|
|
60
|
+
await debugLog('Tools registered', { toolCount: 15 });
|
|
37
61
|
// Configure watch manager with incremental parse handler and MCP server
|
|
38
62
|
watchManager.setIncrementalParseHandler(performIncrementalParse);
|
|
39
63
|
watchManager.setMcpServer(server.server);
|
|
@@ -50,22 +74,46 @@ const startServer = async () => {
|
|
|
50
74
|
});
|
|
51
75
|
// Create and connect transport
|
|
52
76
|
console.error(JSON.stringify({ level: 'info', message: MESSAGES.server.creatingTransport }));
|
|
77
|
+
await debugLog('Creating stdio transport', {});
|
|
53
78
|
const transport = new StdioServerTransport();
|
|
79
|
+
// Add transport event logging
|
|
80
|
+
process.stdin.on('close', async () => {
|
|
81
|
+
await debugLog('STDIN closed - client disconnected', {});
|
|
82
|
+
await logServerStats('stdin-close');
|
|
83
|
+
});
|
|
84
|
+
process.stdin.on('end', async () => {
|
|
85
|
+
await debugLog('STDIN ended', {});
|
|
86
|
+
await logServerStats('stdin-end');
|
|
87
|
+
});
|
|
88
|
+
process.stdin.on('error', async (err) => {
|
|
89
|
+
await debugLog('STDIN error', { error: err.message, stack: err.stack });
|
|
90
|
+
});
|
|
91
|
+
process.stdout.on('close', async () => {
|
|
92
|
+
await debugLog('STDOUT closed', {});
|
|
93
|
+
});
|
|
94
|
+
process.stdout.on('error', async (err) => {
|
|
95
|
+
await debugLog('STDOUT error', { error: err.message, stack: err.stack });
|
|
96
|
+
});
|
|
54
97
|
console.error(JSON.stringify({ level: 'info', message: MESSAGES.server.connectingTransport }));
|
|
98
|
+
await debugLog('Connecting transport', {});
|
|
55
99
|
await server.connect(transport);
|
|
56
100
|
console.error(JSON.stringify({ level: 'info', message: MESSAGES.server.connected }));
|
|
101
|
+
await debugLog('Server connected and ready', { pid: process.pid });
|
|
102
|
+
await logServerStats('server-ready');
|
|
57
103
|
};
|
|
58
104
|
/**
|
|
59
105
|
* Graceful shutdown handler
|
|
60
106
|
*/
|
|
61
107
|
const shutdown = async (signal) => {
|
|
62
108
|
console.error(JSON.stringify({ level: 'info', message: `Received ${signal}, shutting down...` }));
|
|
109
|
+
await logServerStats(`shutdown-${signal}`);
|
|
63
110
|
try {
|
|
64
111
|
await watchManager.stopAllWatchers();
|
|
65
112
|
await debugLog('Shutdown complete', { signal });
|
|
66
113
|
}
|
|
67
114
|
catch (error) {
|
|
68
115
|
console.error(JSON.stringify({ level: 'error', message: 'Error during shutdown', error: String(error) }));
|
|
116
|
+
await debugLog('Error during shutdown', { signal, error: String(error) });
|
|
69
117
|
}
|
|
70
118
|
process.exit(0);
|
|
71
119
|
};
|
|
@@ -73,14 +121,41 @@ const shutdown = async (signal) => {
|
|
|
73
121
|
process.on('uncaughtException', async (error) => {
|
|
74
122
|
console.error(JSON.stringify({ level: 'error', message: 'Uncaught exception', error: String(error), stack: error.stack }));
|
|
75
123
|
await debugLog('Uncaught exception', { error: String(error), stack: error.stack });
|
|
124
|
+
await logServerStats('uncaught-exception');
|
|
76
125
|
});
|
|
77
126
|
process.on('unhandledRejection', async (reason) => {
|
|
78
127
|
console.error(JSON.stringify({ level: 'error', message: 'Unhandled rejection', reason: String(reason) }));
|
|
79
128
|
await debugLog('Unhandled rejection', { reason: String(reason) });
|
|
129
|
+
await logServerStats('unhandled-rejection');
|
|
130
|
+
});
|
|
131
|
+
// Log other process events that might indicate issues
|
|
132
|
+
process.on('warning', async (warning) => {
|
|
133
|
+
await debugLog('Process warning', { name: warning.name, message: warning.message, stack: warning.stack });
|
|
134
|
+
});
|
|
135
|
+
process.on('beforeExit', async (code) => {
|
|
136
|
+
await debugLog('Process beforeExit', { code });
|
|
137
|
+
await logServerStats('before-exit');
|
|
138
|
+
});
|
|
139
|
+
process.on('exit', (code) => {
|
|
140
|
+
// Note: Can't use async here, exit is synchronous
|
|
141
|
+
console.error(JSON.stringify({ level: 'info', message: `Process exiting with code ${code}` }));
|
|
80
142
|
});
|
|
81
143
|
// Register shutdown handlers
|
|
82
|
-
|
|
144
|
+
// NOTE: Only handle SIGTERM for graceful shutdown. SIGINT is ignored because
|
|
145
|
+
// Claude Code may propagate SIGINT to child processes when spawning agents,
|
|
146
|
+
// which would incorrectly kill the MCP server. The MCP server lifecycle is
|
|
147
|
+
// managed by Claude Code via stdio transport closure.
|
|
83
148
|
process.on('SIGTERM', () => shutdown('SIGTERM'));
|
|
149
|
+
// Log SIGINT but don't exit - Claude Code manages our lifecycle
|
|
150
|
+
process.on('SIGINT', async () => {
|
|
151
|
+
await debugLog('SIGINT received but ignored - lifecycle managed by Claude Code', {});
|
|
152
|
+
await logServerStats('sigint-ignored');
|
|
153
|
+
});
|
|
154
|
+
// Also ignore SIGHUP which can be sent during terminal operations
|
|
155
|
+
process.on('SIGHUP', async () => {
|
|
156
|
+
await debugLog('SIGHUP received but ignored', {});
|
|
157
|
+
await logServerStats('sighup-ignored');
|
|
158
|
+
});
|
|
84
159
|
// Start the server
|
|
85
160
|
console.error(JSON.stringify({ level: 'info', message: MESSAGES.server.startingServer }));
|
|
86
161
|
await startServer();
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
* Tracks background parsing jobs for async mode
|
|
4
4
|
*/
|
|
5
5
|
import { randomBytes } from 'crypto';
|
|
6
|
+
import { JOBS } from '../constants.js';
|
|
6
7
|
const generateJobId = () => {
|
|
7
8
|
return `job_${randomBytes(8).toString('hex')}`;
|
|
8
9
|
};
|
|
@@ -15,10 +16,6 @@ const createInitialProgress = () => ({
|
|
|
15
16
|
currentChunk: 0,
|
|
16
17
|
totalChunks: 0,
|
|
17
18
|
});
|
|
18
|
-
// Cleanup interval: 5 minutes
|
|
19
|
-
const CLEANUP_INTERVAL_MS = 5 * 60 * 1000;
|
|
20
|
-
// Maximum concurrent jobs to prevent memory exhaustion
|
|
21
|
-
const MAX_JOBS = 100;
|
|
22
19
|
class JobManager {
|
|
23
20
|
jobs = new Map();
|
|
24
21
|
cleanupInterval = null;
|
|
@@ -38,7 +35,7 @@ class JobManager {
|
|
|
38
35
|
if (cleaned > 0) {
|
|
39
36
|
console.log(`[JobManager] Cleaned up ${cleaned} old jobs`);
|
|
40
37
|
}
|
|
41
|
-
},
|
|
38
|
+
}, JOBS.cleanupIntervalMs);
|
|
42
39
|
// Don't prevent Node.js from exiting if this is the only timer
|
|
43
40
|
this.cleanupInterval.unref();
|
|
44
41
|
}
|
|
@@ -57,11 +54,11 @@ class JobManager {
|
|
|
57
54
|
*/
|
|
58
55
|
createJob(projectPath, projectId) {
|
|
59
56
|
// SECURITY: Enforce maximum job limit to prevent memory exhaustion
|
|
60
|
-
if (this.jobs.size >=
|
|
57
|
+
if (this.jobs.size >= JOBS.maxJobs) {
|
|
61
58
|
// Try to cleanup old jobs first
|
|
62
59
|
const cleaned = this.cleanupOldJobs(0); // Remove all completed/failed jobs
|
|
63
|
-
if (this.jobs.size >=
|
|
64
|
-
throw new Error(`Maximum job limit (${
|
|
60
|
+
if (this.jobs.size >= JOBS.maxJobs) {
|
|
61
|
+
throw new Error(`Maximum job limit (${JOBS.maxJobs}) reached. ` +
|
|
65
62
|
`${this.listJobs('running').length} jobs are currently running. ` +
|
|
66
63
|
`Please wait for jobs to complete or cancel existing jobs.`);
|
|
67
64
|
}
|
|
@@ -4,19 +4,8 @@
|
|
|
4
4
|
* Uses @parcel/watcher for high-performance file watching
|
|
5
5
|
*/
|
|
6
6
|
import * as watcher from '@parcel/watcher';
|
|
7
|
+
import { WATCH } from '../constants.js';
|
|
7
8
|
import { debugLog } from '../utils.js';
|
|
8
|
-
const DEFAULT_EXCLUDE_PATTERNS = [
|
|
9
|
-
'**/node_modules/**',
|
|
10
|
-
'**/dist/**',
|
|
11
|
-
'**/build/**',
|
|
12
|
-
'**/.git/**',
|
|
13
|
-
'**/*.d.ts',
|
|
14
|
-
'**/*.js.map',
|
|
15
|
-
'**/*.js',
|
|
16
|
-
];
|
|
17
|
-
const DEFAULT_DEBOUNCE_MS = 1000;
|
|
18
|
-
const MAX_WATCHERS = 10;
|
|
19
|
-
const MAX_PENDING_EVENTS = 1000;
|
|
20
9
|
class WatchManager {
|
|
21
10
|
watchers = new Map();
|
|
22
11
|
mcpServer = null;
|
|
@@ -37,9 +26,12 @@ class WatchManager {
|
|
|
37
26
|
* Send a notification via MCP logging (if supported)
|
|
38
27
|
*/
|
|
39
28
|
sendNotification(notification) {
|
|
29
|
+
debugLog('sendNotification called', { type: notification.type, projectId: notification.projectId });
|
|
40
30
|
if (!this.mcpServer) {
|
|
31
|
+
debugLog('sendNotification: no MCP server, skipping', {});
|
|
41
32
|
return;
|
|
42
33
|
}
|
|
34
|
+
debugLog('sendNotification: sending to MCP', { type: notification.type });
|
|
43
35
|
// sendLoggingMessage returns a Promise - use .catch() to handle rejection
|
|
44
36
|
this.mcpServer
|
|
45
37
|
.sendLoggingMessage({
|
|
@@ -47,9 +39,16 @@ class WatchManager {
|
|
|
47
39
|
logger: 'file-watcher',
|
|
48
40
|
data: notification,
|
|
49
41
|
})
|
|
50
|
-
.
|
|
51
|
-
|
|
42
|
+
.then(() => {
|
|
43
|
+
debugLog('sendNotification: MCP message sent successfully', { type: notification.type });
|
|
44
|
+
})
|
|
45
|
+
.catch((error) => {
|
|
46
|
+
// MCP logging not supported - log but don't crash
|
|
52
47
|
// This is expected if the client doesn't support logging capability
|
|
48
|
+
debugLog('sendNotification: MCP message failed (expected if client lacks logging)', {
|
|
49
|
+
type: notification.type,
|
|
50
|
+
error: String(error)
|
|
51
|
+
});
|
|
53
52
|
});
|
|
54
53
|
}
|
|
55
54
|
/**
|
|
@@ -62,15 +61,15 @@ class WatchManager {
|
|
|
62
61
|
return this.getWatcherInfoFromState(existing);
|
|
63
62
|
}
|
|
64
63
|
// Enforce maximum watcher limit
|
|
65
|
-
if (this.watchers.size >=
|
|
66
|
-
throw new Error(`Maximum watcher limit (${
|
|
64
|
+
if (this.watchers.size >= WATCH.maxWatchers) {
|
|
65
|
+
throw new Error(`Maximum watcher limit (${WATCH.maxWatchers}) reached. ` + `Stop an existing watcher before starting a new one.`);
|
|
67
66
|
}
|
|
68
67
|
const fullConfig = {
|
|
69
68
|
projectPath: config.projectPath,
|
|
70
69
|
projectId: config.projectId,
|
|
71
70
|
tsconfigPath: config.tsconfigPath,
|
|
72
|
-
debounceMs: config.debounceMs ??
|
|
73
|
-
excludePatterns: config.excludePatterns ??
|
|
71
|
+
debounceMs: config.debounceMs ?? WATCH.defaultDebounceMs,
|
|
72
|
+
excludePatterns: config.excludePatterns ?? [...WATCH.excludePatterns],
|
|
74
73
|
};
|
|
75
74
|
await debugLog('Creating @parcel/watcher subscription', {
|
|
76
75
|
watchPath: fullConfig.projectPath,
|
|
@@ -139,7 +138,7 @@ class WatchManager {
|
|
|
139
138
|
* Handle a file system event
|
|
140
139
|
*/
|
|
141
140
|
handleFileEvent(state, type, filePath) {
|
|
142
|
-
debugLog('
|
|
141
|
+
debugLog('handleFileEvent START', { type, filePath, projectId: state.projectId, status: state.status, isStopping: state.isStopping });
|
|
143
142
|
// Ignore events if watcher is stopping or not active
|
|
144
143
|
if (state.isStopping || state.status !== 'active') {
|
|
145
144
|
debugLog('Ignoring event - watcher not active or stopping', {
|
|
@@ -154,9 +153,9 @@ class WatchManager {
|
|
|
154
153
|
timestamp: Date.now(),
|
|
155
154
|
};
|
|
156
155
|
// Prevent unbounded event accumulation - drop oldest events if buffer is full
|
|
157
|
-
if (state.pendingEvents.length >=
|
|
156
|
+
if (state.pendingEvents.length >= WATCH.maxPendingEvents) {
|
|
158
157
|
debugLog('Event buffer full, dropping oldest events', { projectId: state.projectId });
|
|
159
|
-
state.pendingEvents = state.pendingEvents.slice(-Math.floor(
|
|
158
|
+
state.pendingEvents = state.pendingEvents.slice(-Math.floor(WATCH.maxPendingEvents / 2));
|
|
160
159
|
}
|
|
161
160
|
state.pendingEvents.push(event);
|
|
162
161
|
debugLog('Event added to pending', { pendingCount: state.pendingEvents.length });
|
|
@@ -177,20 +176,35 @@ class WatchManager {
|
|
|
177
176
|
timestamp: new Date().toISOString(),
|
|
178
177
|
});
|
|
179
178
|
// Set new debounce timer
|
|
179
|
+
debugLog('handleFileEvent: setting debounce timer', { debounceMs: state.config.debounceMs });
|
|
180
180
|
state.debounceTimer = setTimeout(() => {
|
|
181
|
+
debugLog('handleFileEvent: debounce timer fired, calling processEvents', { projectId: state.projectId });
|
|
181
182
|
this.processEvents(state).catch((error) => {
|
|
183
|
+
debugLog('handleFileEvent: processEvents error', { error: String(error) });
|
|
182
184
|
console.error('[WatchManager] Error in processEvents:', error);
|
|
183
185
|
});
|
|
184
186
|
}, state.config.debounceMs);
|
|
187
|
+
debugLog('handleFileEvent END', { pendingCount: state.pendingEvents.length });
|
|
185
188
|
}
|
|
186
189
|
/**
|
|
187
190
|
* Process accumulated file events after debounce period
|
|
188
191
|
*/
|
|
189
192
|
async processEvents(state) {
|
|
193
|
+
await debugLog('processEvents START', {
|
|
194
|
+
projectId: state.projectId,
|
|
195
|
+
isProcessing: state.isProcessing,
|
|
196
|
+
pendingCount: state.pendingEvents.length,
|
|
197
|
+
isStopping: state.isStopping
|
|
198
|
+
});
|
|
190
199
|
// Don't process if already processing, no events, or watcher is stopping
|
|
191
|
-
if (state.isProcessing || state.pendingEvents.length === 0 || state.isStopping)
|
|
200
|
+
if (state.isProcessing || state.pendingEvents.length === 0 || state.isStopping) {
|
|
201
|
+
await debugLog('processEvents: early return', {
|
|
202
|
+
reason: state.isProcessing ? 'already processing' : state.pendingEvents.length === 0 ? 'no events' : 'stopping'
|
|
203
|
+
});
|
|
192
204
|
return;
|
|
205
|
+
}
|
|
193
206
|
state.isProcessing = true;
|
|
207
|
+
await debugLog('processEvents: set isProcessing=true', {});
|
|
194
208
|
const events = [...state.pendingEvents];
|
|
195
209
|
state.pendingEvents = [];
|
|
196
210
|
state.debounceTimer = null;
|
|
@@ -210,7 +224,15 @@ class WatchManager {
|
|
|
210
224
|
if (!this.incrementalParseHandler) {
|
|
211
225
|
throw new Error('Incremental parse handler not configured');
|
|
212
226
|
}
|
|
227
|
+
await debugLog('processEvents: calling incrementalParseHandler', {
|
|
228
|
+
projectPath: state.projectPath,
|
|
229
|
+
projectId: state.projectId
|
|
230
|
+
});
|
|
213
231
|
const result = await this.incrementalParseHandler(state.projectPath, state.projectId, state.tsconfigPath);
|
|
232
|
+
await debugLog('processEvents: incrementalParseHandler returned', {
|
|
233
|
+
nodesUpdated: result.nodesUpdated,
|
|
234
|
+
edgesUpdated: result.edgesUpdated
|
|
235
|
+
});
|
|
214
236
|
state.lastUpdateTime = new Date();
|
|
215
237
|
const elapsedMs = Date.now() - startTime;
|
|
216
238
|
this.sendNotification({
|
|
@@ -232,6 +254,7 @@ class WatchManager {
|
|
|
232
254
|
}
|
|
233
255
|
catch (error) {
|
|
234
256
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
257
|
+
await debugLog('processEvents: error caught', { error: errorMessage });
|
|
235
258
|
this.sendNotification({
|
|
236
259
|
type: 'incremental_parse_failed',
|
|
237
260
|
projectId: state.projectId,
|
|
@@ -246,6 +269,7 @@ class WatchManager {
|
|
|
246
269
|
}
|
|
247
270
|
finally {
|
|
248
271
|
state.isProcessing = false;
|
|
272
|
+
await debugLog('processEvents END', { projectId: state.projectId, isProcessing: state.isProcessing });
|
|
249
273
|
}
|
|
250
274
|
}
|
|
251
275
|
/**
|
|
@@ -254,9 +278,14 @@ class WatchManager {
|
|
|
254
278
|
handleWatcherError(state, error) {
|
|
255
279
|
state.status = 'error';
|
|
256
280
|
state.errorMessage = error instanceof Error ? error.message : String(error);
|
|
257
|
-
debugLog('
|
|
281
|
+
debugLog('handleWatcherError START', { projectId: state.projectId, error: state.errorMessage });
|
|
258
282
|
// Clean up the failed watcher to prevent it from staying in error state indefinitely
|
|
259
|
-
this.stopWatching(state.projectId)
|
|
283
|
+
this.stopWatching(state.projectId)
|
|
284
|
+
.then(() => {
|
|
285
|
+
debugLog('handleWatcherError: cleanup succeeded', { projectId: state.projectId });
|
|
286
|
+
})
|
|
287
|
+
.catch((cleanupError) => {
|
|
288
|
+
debugLog('handleWatcherError: cleanup failed', { projectId: state.projectId, cleanupError: String(cleanupError) });
|
|
260
289
|
console.error(`[WatchManager] Failed to cleanup errored watcher ${state.projectId}:`, cleanupError);
|
|
261
290
|
});
|
|
262
291
|
}
|
|
@@ -266,23 +295,33 @@ class WatchManager {
|
|
|
266
295
|
* Promise is tracked on state to allow cleanup during stop
|
|
267
296
|
*/
|
|
268
297
|
syncMissedChanges(state) {
|
|
269
|
-
|
|
298
|
+
debugLog('syncMissedChanges START', { projectId: state.projectId });
|
|
299
|
+
if (!this.incrementalParseHandler) {
|
|
300
|
+
debugLog('syncMissedChanges: no handler, skipping', {});
|
|
270
301
|
return;
|
|
302
|
+
}
|
|
271
303
|
// Track the promise on state so stopWatching can wait for it
|
|
272
304
|
state.syncPromise = this.incrementalParseHandler(state.projectPath, state.projectId, state.tsconfigPath)
|
|
273
305
|
.then((result) => {
|
|
306
|
+
debugLog('syncMissedChanges: completed', {
|
|
307
|
+
projectId: state.projectId,
|
|
308
|
+
nodesUpdated: result.nodesUpdated,
|
|
309
|
+
edgesUpdated: result.edgesUpdated
|
|
310
|
+
});
|
|
274
311
|
if (result.nodesUpdated > 0 || result.edgesUpdated > 0) {
|
|
275
312
|
console.log(`[WatchManager] Synced missed changes for ${state.projectId}: ` +
|
|
276
313
|
`${result.nodesUpdated} nodes, ${result.edgesUpdated} edges`);
|
|
277
314
|
}
|
|
278
315
|
})
|
|
279
316
|
.catch((error) => {
|
|
317
|
+
debugLog('syncMissedChanges: error', { projectId: state.projectId, error: String(error), isStopping: state.isStopping });
|
|
280
318
|
// Only log if watcher hasn't been stopped
|
|
281
319
|
if (!state.isStopping) {
|
|
282
320
|
console.error(`[WatchManager] Failed to sync missed changes for ${state.projectId}:`, error);
|
|
283
321
|
}
|
|
284
322
|
})
|
|
285
323
|
.finally(() => {
|
|
324
|
+
debugLog('syncMissedChanges END', { projectId: state.projectId });
|
|
286
325
|
state.syncPromise = undefined;
|
|
287
326
|
});
|
|
288
327
|
}
|