code-graph-context 2.4.5 → 2.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +466 -620
- package/dist/mcp/constants.js +274 -0
- package/dist/mcp/handlers/swarm-worker.handler.js +251 -0
- package/dist/mcp/handlers/task-decomposition.handler.js +294 -0
- package/dist/mcp/tools/index.js +13 -1
- package/dist/mcp/tools/swarm-claim-task.tool.js +331 -0
- package/dist/mcp/tools/swarm-complete-task.tool.js +421 -0
- package/dist/mcp/tools/swarm-constants.js +113 -0
- package/dist/mcp/tools/swarm-get-tasks.tool.js +419 -0
- package/dist/mcp/tools/swarm-orchestrate.tool.js +389 -0
- package/dist/mcp/tools/swarm-post-task.tool.js +220 -0
- package/package.json +1 -1
|
@@ -0,0 +1,294 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Task Decomposition Handler
|
|
3
|
+
* Transforms high-level natural language tasks into atomic, dependency-ordered SwarmTasks
|
|
4
|
+
*/
|
|
5
|
+
import path from 'path';
|
|
6
|
+
import { TASK_PRIORITIES, TASK_INFERENCE_PATTERNS, generateTaskId, } from '../tools/swarm-constants.js';
|
|
7
|
+
import { debugLog } from '../utils.js';
|
|
8
|
+
/**
|
|
9
|
+
* TaskDecompositionHandler - Breaks down complex tasks into atomic units
|
|
10
|
+
*/
|
|
11
|
+
export class TaskDecompositionHandler {
|
|
12
|
+
/**
|
|
13
|
+
* Decompose a high-level task into atomic, dependency-ordered tasks
|
|
14
|
+
*/
|
|
15
|
+
async decomposeTask(taskDescription, affectedNodes, impactMap, basePriority = 'normal') {
|
|
16
|
+
await debugLog('Decomposing task', {
|
|
17
|
+
task: taskDescription,
|
|
18
|
+
nodeCount: affectedNodes.length,
|
|
19
|
+
});
|
|
20
|
+
// Step 1: Infer task type from description
|
|
21
|
+
const taskType = this.inferTaskType(taskDescription);
|
|
22
|
+
// Step 2: Group nodes by file
|
|
23
|
+
const fileGroups = this.groupNodesByFile(affectedNodes);
|
|
24
|
+
// Step 3: Create atomic tasks for each file
|
|
25
|
+
const tasks = [];
|
|
26
|
+
const taskIdsByFile = new Map();
|
|
27
|
+
for (const [filePath, nodes] of fileGroups.entries()) {
|
|
28
|
+
const taskId = generateTaskId();
|
|
29
|
+
taskIdsByFile.set(filePath, taskId);
|
|
30
|
+
// Get the highest impact level for nodes in this file
|
|
31
|
+
const impactLevel = this.getHighestImpactLevel(nodes, impactMap);
|
|
32
|
+
// Adjust priority based on impact
|
|
33
|
+
const adjustedPriority = this.adjustPriorityByImpact(basePriority, impactLevel);
|
|
34
|
+
const task = {
|
|
35
|
+
id: taskId,
|
|
36
|
+
title: this.generateTaskTitle(taskDescription, filePath, nodes),
|
|
37
|
+
description: this.generateTaskDescription(taskDescription, nodes),
|
|
38
|
+
type: taskType,
|
|
39
|
+
priority: adjustedPriority,
|
|
40
|
+
priorityScore: TASK_PRIORITIES[adjustedPriority],
|
|
41
|
+
impactLevel,
|
|
42
|
+
nodeIds: nodes.map((n) => n.id),
|
|
43
|
+
filePath,
|
|
44
|
+
dependencies: [], // Will be filled in next step
|
|
45
|
+
affectedNodes: nodes,
|
|
46
|
+
metadata: {
|
|
47
|
+
nodeCount: nodes.length,
|
|
48
|
+
nodeTypes: this.getNodeTypeSummary(nodes),
|
|
49
|
+
},
|
|
50
|
+
};
|
|
51
|
+
tasks.push(task);
|
|
52
|
+
}
|
|
53
|
+
// Step 4: Calculate dependencies based on impact analysis
|
|
54
|
+
const dependencyGraph = this.calculateDependencies(tasks, impactMap);
|
|
55
|
+
// Step 5: Update task dependencies
|
|
56
|
+
for (const task of tasks) {
|
|
57
|
+
task.dependencies = dependencyGraph.get(task.id) ?? [];
|
|
58
|
+
}
|
|
59
|
+
// Step 6: Topological sort for execution order
|
|
60
|
+
const executionOrder = this.topologicalSort(tasks, dependencyGraph);
|
|
61
|
+
// Step 7: Calculate parallelization potential
|
|
62
|
+
const parallelizable = tasks.filter((t) => t.dependencies.length === 0).length;
|
|
63
|
+
const sequential = tasks.length - parallelizable;
|
|
64
|
+
const result = {
|
|
65
|
+
tasks,
|
|
66
|
+
dependencyGraph,
|
|
67
|
+
executionOrder,
|
|
68
|
+
summary: {
|
|
69
|
+
totalTasks: tasks.length,
|
|
70
|
+
parallelizable,
|
|
71
|
+
sequential,
|
|
72
|
+
estimatedComplexity: this.estimateComplexity(tasks, impactMap),
|
|
73
|
+
},
|
|
74
|
+
};
|
|
75
|
+
await debugLog('Task decomposition complete', {
|
|
76
|
+
totalTasks: tasks.length,
|
|
77
|
+
parallelizable,
|
|
78
|
+
sequential,
|
|
79
|
+
});
|
|
80
|
+
return result;
|
|
81
|
+
}
|
|
82
|
+
/**
|
|
83
|
+
* Infer task type from natural language description
|
|
84
|
+
*/
|
|
85
|
+
inferTaskType(description) {
|
|
86
|
+
const lowerDesc = description.toLowerCase();
|
|
87
|
+
for (const [, pattern] of Object.entries(TASK_INFERENCE_PATTERNS)) {
|
|
88
|
+
for (const keyword of pattern.keywords) {
|
|
89
|
+
if (lowerDesc.includes(keyword)) {
|
|
90
|
+
return pattern.taskType;
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
// Default to 'implement' if no pattern matches
|
|
95
|
+
return 'implement';
|
|
96
|
+
}
|
|
97
|
+
/**
|
|
98
|
+
* Group nodes by their file path
|
|
99
|
+
*/
|
|
100
|
+
groupNodesByFile(nodes) {
|
|
101
|
+
const groups = new Map();
|
|
102
|
+
for (const node of nodes) {
|
|
103
|
+
const filePath = node.filePath;
|
|
104
|
+
if (!groups.has(filePath)) {
|
|
105
|
+
groups.set(filePath, []);
|
|
106
|
+
}
|
|
107
|
+
groups.get(filePath).push(node);
|
|
108
|
+
}
|
|
109
|
+
return groups;
|
|
110
|
+
}
|
|
111
|
+
/**
|
|
112
|
+
* Get the highest impact level among a set of nodes
|
|
113
|
+
*/
|
|
114
|
+
getHighestImpactLevel(nodes, impactMap) {
|
|
115
|
+
const levels = ['LOW', 'MEDIUM', 'HIGH', 'CRITICAL'];
|
|
116
|
+
let highestIndex = 0;
|
|
117
|
+
for (const node of nodes) {
|
|
118
|
+
const impact = impactMap.get(node.id);
|
|
119
|
+
if (impact) {
|
|
120
|
+
const index = levels.indexOf(impact.riskLevel);
|
|
121
|
+
if (index > highestIndex) {
|
|
122
|
+
highestIndex = index;
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
return levels[highestIndex];
|
|
127
|
+
}
|
|
128
|
+
/**
|
|
129
|
+
* Adjust priority based on impact level
|
|
130
|
+
*/
|
|
131
|
+
adjustPriorityByImpact(basePriority, impactLevel) {
|
|
132
|
+
const priorityOrder = ['backlog', 'low', 'normal', 'high', 'critical'];
|
|
133
|
+
const currentIndex = priorityOrder.indexOf(basePriority);
|
|
134
|
+
// Bump priority for high-impact tasks (they need more attention)
|
|
135
|
+
if (impactLevel === 'CRITICAL' && currentIndex < 4) {
|
|
136
|
+
return priorityOrder[Math.min(currentIndex + 2, 4)];
|
|
137
|
+
}
|
|
138
|
+
if (impactLevel === 'HIGH' && currentIndex < 3) {
|
|
139
|
+
return priorityOrder[Math.min(currentIndex + 1, 3)];
|
|
140
|
+
}
|
|
141
|
+
return basePriority;
|
|
142
|
+
}
|
|
143
|
+
/**
|
|
144
|
+
* Generate a concise task title
|
|
145
|
+
*/
|
|
146
|
+
generateTaskTitle(taskDescription, filePath, nodes) {
|
|
147
|
+
const fileName = path.basename(filePath);
|
|
148
|
+
const primaryNode = nodes[0];
|
|
149
|
+
const nodeType = primaryNode?.semanticType ?? primaryNode?.coreType ?? 'code';
|
|
150
|
+
// Extract action word from task description
|
|
151
|
+
const actionMatch = taskDescription.match(/^(\w+)/i);
|
|
152
|
+
const action = actionMatch ? actionMatch[1] : 'Update';
|
|
153
|
+
if (nodes.length === 1) {
|
|
154
|
+
return `${action} ${primaryNode.name} in ${fileName}`;
|
|
155
|
+
}
|
|
156
|
+
return `${action} ${nodes.length} ${nodeType}s in ${fileName}`;
|
|
157
|
+
}
|
|
158
|
+
/**
|
|
159
|
+
* Generate detailed task description
|
|
160
|
+
*/
|
|
161
|
+
generateTaskDescription(taskDescription, nodes) {
|
|
162
|
+
const nodeList = nodes
|
|
163
|
+
.slice(0, 5)
|
|
164
|
+
.map((n) => `- ${n.name} (${n.semanticType ?? n.coreType})`)
|
|
165
|
+
.join('\n');
|
|
166
|
+
const moreText = nodes.length > 5 ? `\n... and ${nodes.length - 5} more` : '';
|
|
167
|
+
return `${taskDescription}
|
|
168
|
+
|
|
169
|
+
Affected code elements:
|
|
170
|
+
${nodeList}${moreText}`;
|
|
171
|
+
}
|
|
172
|
+
/**
|
|
173
|
+
* Get summary of node types
|
|
174
|
+
*/
|
|
175
|
+
getNodeTypeSummary(nodes) {
|
|
176
|
+
const summary = {};
|
|
177
|
+
for (const node of nodes) {
|
|
178
|
+
const type = node.semanticType ?? node.coreType;
|
|
179
|
+
summary[type] = (summary[type] ?? 0) + 1;
|
|
180
|
+
}
|
|
181
|
+
return summary;
|
|
182
|
+
}
|
|
183
|
+
/**
|
|
184
|
+
* Calculate dependencies between tasks based on impact analysis
|
|
185
|
+
*
|
|
186
|
+
* Logic: If file A depends on file B (B is in A's affected files),
|
|
187
|
+
* then the task for A should wait for B's task to complete.
|
|
188
|
+
* This ensures changes propagate correctly through the dependency chain.
|
|
189
|
+
*/
|
|
190
|
+
calculateDependencies(tasks, impactMap) {
|
|
191
|
+
const taskByFile = new Map();
|
|
192
|
+
for (const task of tasks) {
|
|
193
|
+
taskByFile.set(task.filePath, task);
|
|
194
|
+
}
|
|
195
|
+
const dependencies = new Map();
|
|
196
|
+
for (const task of tasks) {
|
|
197
|
+
const deps = [];
|
|
198
|
+
// Check each node in this task for dependencies
|
|
199
|
+
for (const nodeId of task.nodeIds) {
|
|
200
|
+
const impact = impactMap.get(nodeId);
|
|
201
|
+
if (!impact)
|
|
202
|
+
continue;
|
|
203
|
+
// If this node depends on files that have their own tasks,
|
|
204
|
+
// those tasks should complete first
|
|
205
|
+
for (const affectedFile of impact.affectedFiles) {
|
|
206
|
+
const depTask = taskByFile.get(affectedFile);
|
|
207
|
+
if (depTask && depTask.id !== task.id && !deps.includes(depTask.id)) {
|
|
208
|
+
deps.push(depTask.id);
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
}
|
|
212
|
+
dependencies.set(task.id, deps);
|
|
213
|
+
}
|
|
214
|
+
return dependencies;
|
|
215
|
+
}
|
|
216
|
+
/**
|
|
217
|
+
* Topological sort to determine execution order
|
|
218
|
+
* Returns task IDs in order: tasks with no dependencies first
|
|
219
|
+
*/
|
|
220
|
+
topologicalSort(tasks, dependencyGraph) {
|
|
221
|
+
const result = [];
|
|
222
|
+
const visited = new Set();
|
|
223
|
+
const visiting = new Set();
|
|
224
|
+
const taskMap = new Map();
|
|
225
|
+
for (const task of tasks) {
|
|
226
|
+
taskMap.set(task.id, task);
|
|
227
|
+
}
|
|
228
|
+
const visit = (taskId) => {
|
|
229
|
+
if (visited.has(taskId))
|
|
230
|
+
return;
|
|
231
|
+
if (visiting.has(taskId)) {
|
|
232
|
+
// Cycle detected - skip to avoid infinite loop
|
|
233
|
+
// In practice, this means the tasks can run in either order
|
|
234
|
+
return;
|
|
235
|
+
}
|
|
236
|
+
visiting.add(taskId);
|
|
237
|
+
const deps = dependencyGraph.get(taskId) ?? [];
|
|
238
|
+
for (const depId of deps) {
|
|
239
|
+
visit(depId);
|
|
240
|
+
}
|
|
241
|
+
visiting.delete(taskId);
|
|
242
|
+
visited.add(taskId);
|
|
243
|
+
result.push(taskId);
|
|
244
|
+
};
|
|
245
|
+
// Visit all tasks, prioritizing those with fewer dependencies
|
|
246
|
+
const sortedTasks = [...tasks].sort((a, b) => (dependencyGraph.get(a.id)?.length ?? 0) - (dependencyGraph.get(b.id)?.length ?? 0));
|
|
247
|
+
for (const task of sortedTasks) {
|
|
248
|
+
visit(task.id);
|
|
249
|
+
}
|
|
250
|
+
return result;
|
|
251
|
+
}
|
|
252
|
+
/**
|
|
253
|
+
* Estimate overall complexity of the decomposed tasks
|
|
254
|
+
*/
|
|
255
|
+
estimateComplexity(tasks, impactMap) {
|
|
256
|
+
// Consider: number of tasks, dependency depth, impact levels
|
|
257
|
+
const taskCount = tasks.length;
|
|
258
|
+
const criticalCount = tasks.filter((t) => t.impactLevel === 'CRITICAL').length;
|
|
259
|
+
const highCount = tasks.filter((t) => t.impactLevel === 'HIGH').length;
|
|
260
|
+
// Calculate max dependency chain depth
|
|
261
|
+
let maxDepth = 0;
|
|
262
|
+
for (const task of tasks) {
|
|
263
|
+
maxDepth = Math.max(maxDepth, task.dependencies.length);
|
|
264
|
+
}
|
|
265
|
+
// Scoring
|
|
266
|
+
let score = 0;
|
|
267
|
+
score += Math.min(taskCount / 10, 3); // Up to 3 points for task count
|
|
268
|
+
score += criticalCount * 1.5; // 1.5 points per critical task
|
|
269
|
+
score += highCount * 0.5; // 0.5 points per high-impact task
|
|
270
|
+
score += maxDepth * 0.3; // 0.3 points per dependency depth level
|
|
271
|
+
if (score >= 5)
|
|
272
|
+
return 'HIGH';
|
|
273
|
+
if (score >= 2)
|
|
274
|
+
return 'MEDIUM';
|
|
275
|
+
return 'LOW';
|
|
276
|
+
}
|
|
277
|
+
/**
|
|
278
|
+
* Get tasks that can run in parallel (no pending dependencies)
|
|
279
|
+
*/
|
|
280
|
+
getParallelizableTasks(allTasks, completedTaskIds) {
|
|
281
|
+
return allTasks.filter((task) => {
|
|
282
|
+
// Already completed
|
|
283
|
+
if (completedTaskIds.has(task.id))
|
|
284
|
+
return false;
|
|
285
|
+
// Check if all dependencies are completed
|
|
286
|
+
const deps = task.dependencies;
|
|
287
|
+
return deps.every((depId) => completedTaskIds.has(depId));
|
|
288
|
+
});
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
/**
|
|
292
|
+
* Export singleton instance
|
|
293
|
+
*/
|
|
294
|
+
export const taskDecompositionHandler = new TaskDecompositionHandler();
|
package/dist/mcp/tools/index.js
CHANGED
|
@@ -15,8 +15,13 @@ import { createParseTypescriptProjectTool } from './parse-typescript-project.too
|
|
|
15
15
|
import { createSearchCodebaseTool } from './search-codebase.tool.js';
|
|
16
16
|
import { createStartWatchProjectTool } from './start-watch-project.tool.js';
|
|
17
17
|
import { createStopWatchProjectTool } from './stop-watch-project.tool.js';
|
|
18
|
+
import { createSwarmClaimTaskTool } from './swarm-claim-task.tool.js';
|
|
18
19
|
import { createSwarmCleanupTool } from './swarm-cleanup.tool.js';
|
|
20
|
+
import { createSwarmCompleteTaskTool } from './swarm-complete-task.tool.js';
|
|
21
|
+
import { createSwarmGetTasksTool } from './swarm-get-tasks.tool.js';
|
|
22
|
+
import { createSwarmOrchestrateTool } from './swarm-orchestrate.tool.js';
|
|
19
23
|
import { createSwarmPheromoneTool } from './swarm-pheromone.tool.js';
|
|
24
|
+
import { createSwarmPostTaskTool } from './swarm-post-task.tool.js';
|
|
20
25
|
import { createSwarmSenseTool } from './swarm-sense.tool.js';
|
|
21
26
|
import { createTestNeo4jConnectionTool } from './test-neo4j-connection.tool.js';
|
|
22
27
|
import { createTraverseFromNodeTool } from './traverse-from-node.tool.js';
|
|
@@ -68,8 +73,15 @@ export const registerAllTools = (server) => {
|
|
|
68
73
|
createStartWatchProjectTool(server);
|
|
69
74
|
createStopWatchProjectTool(server);
|
|
70
75
|
createListWatchersTool(server);
|
|
71
|
-
// Register swarm coordination tools
|
|
76
|
+
// Register swarm coordination tools (pheromones for indirect coordination)
|
|
72
77
|
createSwarmPheromoneTool(server);
|
|
73
78
|
createSwarmSenseTool(server);
|
|
74
79
|
createSwarmCleanupTool(server);
|
|
80
|
+
// Register swarm task tools (blackboard for explicit task management)
|
|
81
|
+
createSwarmPostTaskTool(server);
|
|
82
|
+
createSwarmClaimTaskTool(server);
|
|
83
|
+
createSwarmCompleteTaskTool(server);
|
|
84
|
+
createSwarmGetTasksTool(server);
|
|
85
|
+
// Register swarm orchestration tool (meta-tool for coordinating multi-agent work)
|
|
86
|
+
createSwarmOrchestrateTool(server);
|
|
75
87
|
};
|
|
@@ -0,0 +1,331 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Swarm Claim Task Tool
|
|
3
|
+
* Allow an agent to claim an available task from the blackboard
|
|
4
|
+
*/
|
|
5
|
+
import { z } from 'zod';
|
|
6
|
+
import { Neo4jService } from '../../storage/neo4j/neo4j.service.js';
|
|
7
|
+
import { TOOL_NAMES, TOOL_METADATA } from '../constants.js';
|
|
8
|
+
import { createErrorResponse, createSuccessResponse, resolveProjectIdOrError, debugLog } from '../utils.js';
|
|
9
|
+
import { TASK_TYPES, TASK_PRIORITIES } from './swarm-constants.js';
|
|
10
|
+
/**
|
|
11
|
+
* Query to claim a specific task by ID
|
|
12
|
+
* Uses atomic update to prevent race conditions
|
|
13
|
+
*/
|
|
14
|
+
const CLAIM_TASK_BY_ID_QUERY = `
|
|
15
|
+
MATCH (t:SwarmTask {id: $taskId, projectId: $projectId})
|
|
16
|
+
WHERE t.status IN ['available', 'blocked']
|
|
17
|
+
|
|
18
|
+
// Check if dependencies are complete
|
|
19
|
+
OPTIONAL MATCH (t)-[:DEPENDS_ON]->(dep:SwarmTask)
|
|
20
|
+
WHERE dep.status <> 'completed'
|
|
21
|
+
WITH t, count(dep) as incompleteDeps
|
|
22
|
+
|
|
23
|
+
// Only claim if no incomplete dependencies (or task was already available)
|
|
24
|
+
WHERE incompleteDeps = 0 OR t.status = 'available'
|
|
25
|
+
|
|
26
|
+
// Atomic claim
|
|
27
|
+
SET t.status = 'claimed',
|
|
28
|
+
t.claimedBy = $agentId,
|
|
29
|
+
t.claimedAt = timestamp(),
|
|
30
|
+
t.updatedAt = timestamp()
|
|
31
|
+
|
|
32
|
+
// Return task details with target info
|
|
33
|
+
WITH t
|
|
34
|
+
OPTIONAL MATCH (t)-[:TARGETS]->(target)
|
|
35
|
+
RETURN t.id as id,
|
|
36
|
+
t.projectId as projectId,
|
|
37
|
+
t.swarmId as swarmId,
|
|
38
|
+
t.title as title,
|
|
39
|
+
t.description as description,
|
|
40
|
+
t.type as type,
|
|
41
|
+
t.priority as priority,
|
|
42
|
+
t.priorityScore as priorityScore,
|
|
43
|
+
t.status as status,
|
|
44
|
+
t.targetNodeIds as targetNodeIds,
|
|
45
|
+
t.targetFilePaths as targetFilePaths,
|
|
46
|
+
t.dependencies as dependencies,
|
|
47
|
+
t.claimedBy as claimedBy,
|
|
48
|
+
t.claimedAt as claimedAt,
|
|
49
|
+
t.createdBy as createdBy,
|
|
50
|
+
t.metadata as metadata,
|
|
51
|
+
collect(DISTINCT {
|
|
52
|
+
id: target.id,
|
|
53
|
+
type: labels(target)[0],
|
|
54
|
+
name: target.name,
|
|
55
|
+
filePath: target.filePath
|
|
56
|
+
}) as targets
|
|
57
|
+
`;
|
|
58
|
+
/**
|
|
59
|
+
* Query to claim the highest priority available task matching criteria
|
|
60
|
+
*/
|
|
61
|
+
const CLAIM_NEXT_TASK_QUERY = `
|
|
62
|
+
// Find available tasks not blocked by dependencies
|
|
63
|
+
MATCH (t:SwarmTask {projectId: $projectId, swarmId: $swarmId})
|
|
64
|
+
WHERE t.status = 'available'
|
|
65
|
+
AND ($types IS NULL OR size($types) = 0 OR t.type IN $types)
|
|
66
|
+
AND ($minPriority IS NULL OR t.priorityScore >= $minPriority)
|
|
67
|
+
|
|
68
|
+
// Exclude tasks with incomplete dependencies
|
|
69
|
+
OPTIONAL MATCH (t)-[:DEPENDS_ON]->(dep:SwarmTask)
|
|
70
|
+
WHERE dep.status <> 'completed'
|
|
71
|
+
WITH t, count(dep) as incompleteDeps
|
|
72
|
+
WHERE incompleteDeps = 0
|
|
73
|
+
|
|
74
|
+
// Order by priority (highest first), then by creation time (oldest first)
|
|
75
|
+
ORDER BY t.priorityScore DESC, t.createdAt ASC
|
|
76
|
+
LIMIT 1
|
|
77
|
+
|
|
78
|
+
// Atomic claim
|
|
79
|
+
SET t.status = 'claimed',
|
|
80
|
+
t.claimedBy = $agentId,
|
|
81
|
+
t.claimedAt = timestamp(),
|
|
82
|
+
t.updatedAt = timestamp()
|
|
83
|
+
|
|
84
|
+
// Return task details with target info
|
|
85
|
+
WITH t
|
|
86
|
+
OPTIONAL MATCH (t)-[:TARGETS]->(target)
|
|
87
|
+
RETURN t.id as id,
|
|
88
|
+
t.projectId as projectId,
|
|
89
|
+
t.swarmId as swarmId,
|
|
90
|
+
t.title as title,
|
|
91
|
+
t.description as description,
|
|
92
|
+
t.type as type,
|
|
93
|
+
t.priority as priority,
|
|
94
|
+
t.priorityScore as priorityScore,
|
|
95
|
+
t.status as status,
|
|
96
|
+
t.targetNodeIds as targetNodeIds,
|
|
97
|
+
t.targetFilePaths as targetFilePaths,
|
|
98
|
+
t.dependencies as dependencies,
|
|
99
|
+
t.claimedBy as claimedBy,
|
|
100
|
+
t.claimedAt as claimedAt,
|
|
101
|
+
t.createdBy as createdBy,
|
|
102
|
+
t.metadata as metadata,
|
|
103
|
+
collect(DISTINCT {
|
|
104
|
+
id: target.id,
|
|
105
|
+
type: labels(target)[0],
|
|
106
|
+
name: target.name,
|
|
107
|
+
filePath: target.filePath
|
|
108
|
+
}) as targets
|
|
109
|
+
`;
|
|
110
|
+
/**
|
|
111
|
+
* Query to start working on a claimed task (transition to in_progress)
|
|
112
|
+
*/
|
|
113
|
+
const START_TASK_QUERY = `
|
|
114
|
+
MATCH (t:SwarmTask {id: $taskId, projectId: $projectId})
|
|
115
|
+
WHERE t.status = 'claimed' AND t.claimedBy = $agentId
|
|
116
|
+
|
|
117
|
+
SET t.status = 'in_progress',
|
|
118
|
+
t.startedAt = timestamp(),
|
|
119
|
+
t.updatedAt = timestamp()
|
|
120
|
+
|
|
121
|
+
RETURN t.id as id,
|
|
122
|
+
t.status as status,
|
|
123
|
+
t.claimedBy as claimedBy,
|
|
124
|
+
t.startedAt as startedAt
|
|
125
|
+
`;
|
|
126
|
+
/**
|
|
127
|
+
* Query to release a claimed task (unclaim it)
|
|
128
|
+
*/
|
|
129
|
+
const RELEASE_TASK_QUERY = `
|
|
130
|
+
MATCH (t:SwarmTask {id: $taskId, projectId: $projectId})
|
|
131
|
+
WHERE t.status IN ['claimed', 'in_progress'] AND t.claimedBy = $agentId
|
|
132
|
+
|
|
133
|
+
SET t.status = 'available',
|
|
134
|
+
t.claimedBy = null,
|
|
135
|
+
t.claimedAt = null,
|
|
136
|
+
t.startedAt = null,
|
|
137
|
+
t.updatedAt = timestamp(),
|
|
138
|
+
t.releaseReason = $reason
|
|
139
|
+
|
|
140
|
+
RETURN t.id as id,
|
|
141
|
+
t.title as title,
|
|
142
|
+
t.status as status
|
|
143
|
+
`;
|
|
144
|
+
export const createSwarmClaimTaskTool = (server) => {
|
|
145
|
+
server.registerTool(TOOL_NAMES.swarmClaimTask, {
|
|
146
|
+
title: TOOL_METADATA[TOOL_NAMES.swarmClaimTask].title,
|
|
147
|
+
description: TOOL_METADATA[TOOL_NAMES.swarmClaimTask].description,
|
|
148
|
+
inputSchema: {
|
|
149
|
+
projectId: z.string().describe('Project ID, name, or path'),
|
|
150
|
+
swarmId: z.string().describe('Swarm ID to find tasks in'),
|
|
151
|
+
agentId: z.string().describe('Your unique agent identifier'),
|
|
152
|
+
taskId: z
|
|
153
|
+
.string()
|
|
154
|
+
.optional()
|
|
155
|
+
.describe('Specific task ID to claim (if omitted, claims highest priority available task)'),
|
|
156
|
+
types: z
|
|
157
|
+
.array(z.enum(TASK_TYPES))
|
|
158
|
+
.optional()
|
|
159
|
+
.describe('Filter by task types when auto-selecting (e.g., ["implement", "fix"])'),
|
|
160
|
+
minPriority: z
|
|
161
|
+
.enum(Object.keys(TASK_PRIORITIES))
|
|
162
|
+
.optional()
|
|
163
|
+
.describe('Minimum priority level when auto-selecting'),
|
|
164
|
+
action: z
|
|
165
|
+
.enum(['claim', 'start', 'release'])
|
|
166
|
+
.optional()
|
|
167
|
+
.default('claim')
|
|
168
|
+
.describe('Action: claim (reserve task), start (begin work), release (give up task)'),
|
|
169
|
+
releaseReason: z
|
|
170
|
+
.string()
|
|
171
|
+
.optional()
|
|
172
|
+
.describe('Reason for releasing the task (required if action=release)'),
|
|
173
|
+
},
|
|
174
|
+
}, async ({ projectId, swarmId, agentId, taskId, types, minPriority, action = 'claim', releaseReason, }) => {
|
|
175
|
+
const neo4jService = new Neo4jService();
|
|
176
|
+
// Resolve project ID
|
|
177
|
+
const projectResult = await resolveProjectIdOrError(projectId, neo4jService);
|
|
178
|
+
if (!projectResult.success) {
|
|
179
|
+
await neo4jService.close();
|
|
180
|
+
return projectResult.error;
|
|
181
|
+
}
|
|
182
|
+
const resolvedProjectId = projectResult.projectId;
|
|
183
|
+
try {
|
|
184
|
+
await debugLog('Swarm claim task', {
|
|
185
|
+
action,
|
|
186
|
+
projectId: resolvedProjectId,
|
|
187
|
+
swarmId,
|
|
188
|
+
agentId,
|
|
189
|
+
taskId,
|
|
190
|
+
types,
|
|
191
|
+
minPriority,
|
|
192
|
+
});
|
|
193
|
+
// Handle release action
|
|
194
|
+
if (action === 'release') {
|
|
195
|
+
if (!taskId) {
|
|
196
|
+
return createErrorResponse('taskId is required for release action');
|
|
197
|
+
}
|
|
198
|
+
const result = await neo4jService.run(RELEASE_TASK_QUERY, {
|
|
199
|
+
taskId,
|
|
200
|
+
projectId: resolvedProjectId,
|
|
201
|
+
agentId,
|
|
202
|
+
reason: releaseReason || 'No reason provided',
|
|
203
|
+
});
|
|
204
|
+
if (result.length === 0) {
|
|
205
|
+
return createErrorResponse(`Cannot release task ${taskId}. Either it doesn't exist, isn't claimed/in_progress, or you don't own it.`);
|
|
206
|
+
}
|
|
207
|
+
return createSuccessResponse(JSON.stringify({
|
|
208
|
+
success: true,
|
|
209
|
+
action: 'released',
|
|
210
|
+
task: {
|
|
211
|
+
id: result[0].id,
|
|
212
|
+
title: result[0].title,
|
|
213
|
+
status: result[0].status,
|
|
214
|
+
},
|
|
215
|
+
message: `Task released and now available for other agents`,
|
|
216
|
+
}));
|
|
217
|
+
}
|
|
218
|
+
// Handle start action
|
|
219
|
+
if (action === 'start') {
|
|
220
|
+
if (!taskId) {
|
|
221
|
+
return createErrorResponse('taskId is required for start action');
|
|
222
|
+
}
|
|
223
|
+
const result = await neo4jService.run(START_TASK_QUERY, {
|
|
224
|
+
taskId,
|
|
225
|
+
projectId: resolvedProjectId,
|
|
226
|
+
agentId,
|
|
227
|
+
});
|
|
228
|
+
if (result.length === 0) {
|
|
229
|
+
return createErrorResponse(`Cannot start task ${taskId}. Either it doesn't exist, isn't claimed, or you don't own it.`);
|
|
230
|
+
}
|
|
231
|
+
return createSuccessResponse(JSON.stringify({
|
|
232
|
+
success: true,
|
|
233
|
+
action: 'started',
|
|
234
|
+
task: {
|
|
235
|
+
id: result[0].id,
|
|
236
|
+
status: result[0].status,
|
|
237
|
+
claimedBy: result[0].claimedBy,
|
|
238
|
+
startedAt: typeof result[0].startedAt === 'object'
|
|
239
|
+
? result[0].startedAt.toNumber()
|
|
240
|
+
: result[0].startedAt,
|
|
241
|
+
},
|
|
242
|
+
message: 'Task is now in progress',
|
|
243
|
+
}));
|
|
244
|
+
}
|
|
245
|
+
// Handle claim action
|
|
246
|
+
let result;
|
|
247
|
+
if (taskId) {
|
|
248
|
+
// Claim specific task
|
|
249
|
+
result = await neo4jService.run(CLAIM_TASK_BY_ID_QUERY, {
|
|
250
|
+
taskId,
|
|
251
|
+
projectId: resolvedProjectId,
|
|
252
|
+
agentId,
|
|
253
|
+
});
|
|
254
|
+
if (result.length === 0) {
|
|
255
|
+
return createErrorResponse(`Cannot claim task ${taskId}. It may not exist, already be claimed, or have incomplete dependencies.`);
|
|
256
|
+
}
|
|
257
|
+
}
|
|
258
|
+
else {
|
|
259
|
+
// Auto-select highest priority available task
|
|
260
|
+
const minPriorityScore = minPriority
|
|
261
|
+
? TASK_PRIORITIES[minPriority]
|
|
262
|
+
: null;
|
|
263
|
+
result = await neo4jService.run(CLAIM_NEXT_TASK_QUERY, {
|
|
264
|
+
projectId: resolvedProjectId,
|
|
265
|
+
swarmId,
|
|
266
|
+
agentId,
|
|
267
|
+
types: types || null,
|
|
268
|
+
minPriority: minPriorityScore,
|
|
269
|
+
});
|
|
270
|
+
if (result.length === 0) {
|
|
271
|
+
return createSuccessResponse(JSON.stringify({
|
|
272
|
+
success: true,
|
|
273
|
+
action: 'no_tasks',
|
|
274
|
+
message: 'No available tasks matching criteria. All tasks may be claimed, blocked, or completed.',
|
|
275
|
+
filters: {
|
|
276
|
+
swarmId,
|
|
277
|
+
types: types || 'any',
|
|
278
|
+
minPriority: minPriority || 'any',
|
|
279
|
+
},
|
|
280
|
+
}));
|
|
281
|
+
}
|
|
282
|
+
}
|
|
283
|
+
const task = result[0];
|
|
284
|
+
// Parse metadata if present
|
|
285
|
+
let metadata = null;
|
|
286
|
+
if (task.metadata) {
|
|
287
|
+
try {
|
|
288
|
+
metadata = JSON.parse(task.metadata);
|
|
289
|
+
}
|
|
290
|
+
catch {
|
|
291
|
+
metadata = task.metadata;
|
|
292
|
+
}
|
|
293
|
+
}
|
|
294
|
+
// Filter out null targets
|
|
295
|
+
const targets = (task.targets || []).filter((t) => t.id !== null);
|
|
296
|
+
return createSuccessResponse(JSON.stringify({
|
|
297
|
+
success: true,
|
|
298
|
+
action: 'claimed',
|
|
299
|
+
task: {
|
|
300
|
+
id: task.id,
|
|
301
|
+
projectId: task.projectId,
|
|
302
|
+
swarmId: task.swarmId,
|
|
303
|
+
title: task.title,
|
|
304
|
+
description: task.description,
|
|
305
|
+
type: task.type,
|
|
306
|
+
priority: task.priority,
|
|
307
|
+
priorityScore: task.priorityScore,
|
|
308
|
+
status: task.status,
|
|
309
|
+
targetNodeIds: task.targetNodeIds,
|
|
310
|
+
targetFilePaths: task.targetFilePaths,
|
|
311
|
+
dependencies: task.dependencies,
|
|
312
|
+
claimedBy: task.claimedBy,
|
|
313
|
+
claimedAt: typeof task.claimedAt === 'object'
|
|
314
|
+
? task.claimedAt.toNumber()
|
|
315
|
+
: task.claimedAt,
|
|
316
|
+
createdBy: task.createdBy,
|
|
317
|
+
metadata,
|
|
318
|
+
targets,
|
|
319
|
+
},
|
|
320
|
+
message: 'Task claimed successfully. Use action="start" when you begin working.',
|
|
321
|
+
}));
|
|
322
|
+
}
|
|
323
|
+
catch (error) {
|
|
324
|
+
await debugLog('Swarm claim task error', { error: String(error) });
|
|
325
|
+
return createErrorResponse(error instanceof Error ? error : String(error));
|
|
326
|
+
}
|
|
327
|
+
finally {
|
|
328
|
+
await neo4jService.close();
|
|
329
|
+
}
|
|
330
|
+
});
|
|
331
|
+
};
|