mcp-coordinator 0.2.0 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +846 -835
- package/dashboard/Dockerfile +19 -19
- package/dashboard/public/index.html +1178 -1178
- package/dist/cli/dashboard.js +9 -5
- package/dist/cli/server/start.js +24 -1
- package/dist/cli/server/status.js +16 -23
- package/dist/src/agent-activity.js +6 -6
- package/dist/src/agent-registry.js +6 -6
- package/dist/src/announce-workflow.d.ts +52 -0
- package/dist/src/announce-workflow.js +91 -0
- package/dist/src/consultation.d.ts +14 -0
- package/dist/src/consultation.js +110 -45
- package/dist/src/database.js +126 -126
- package/dist/src/dependency-map.js +3 -3
- package/dist/src/file-tracker.js +8 -8
- package/dist/src/http/handle-rest.d.ts +23 -0
- package/dist/src/http/handle-rest.js +374 -0
- package/dist/src/http/utils.d.ts +15 -0
- package/dist/src/http/utils.js +39 -0
- package/dist/src/introspection.js +1 -1
- package/dist/src/mqtt-bridge.d.ts +2 -0
- package/dist/src/mqtt-bridge.js +2 -0
- package/dist/src/mqtt-broker.d.ts +16 -0
- package/dist/src/mqtt-broker.js +16 -1
- package/dist/src/path-guard.d.ts +14 -0
- package/dist/src/path-guard.js +44 -0
- package/dist/src/reset-guard.d.ts +16 -0
- package/dist/src/reset-guard.js +24 -0
- package/dist/src/serve-http.d.ts +31 -1
- package/dist/src/serve-http.js +154 -445
- package/dist/src/server-setup.js +15 -364
- package/dist/src/tools/agents-tools.d.ts +8 -0
- package/dist/src/tools/agents-tools.js +46 -0
- package/dist/src/tools/consultation-tools.d.ts +21 -0
- package/dist/src/tools/consultation-tools.js +170 -0
- package/dist/src/tools/dependencies-tools.d.ts +8 -0
- package/dist/src/tools/dependencies-tools.js +27 -0
- package/dist/src/tools/files-tools.d.ts +8 -0
- package/dist/src/tools/files-tools.js +28 -0
- package/dist/src/tools/mqtt-tools.d.ts +9 -0
- package/dist/src/tools/mqtt-tools.js +33 -0
- package/dist/src/tools/status-tools.d.ts +8 -0
- package/dist/src/tools/status-tools.js +63 -0
- package/package.json +81 -80
package/dist/src/server-setup.js
CHANGED
|
@@ -1,6 +1,11 @@
|
|
|
1
1
|
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
2
|
-
import { z } from "zod";
|
|
3
2
|
import { initDatabase } from "./database.js";
|
|
3
|
+
import { registerConsultationTools } from "./tools/consultation-tools.js";
|
|
4
|
+
import { registerAgentTools } from "./tools/agents-tools.js";
|
|
5
|
+
import { registerFilesTools } from "./tools/files-tools.js";
|
|
6
|
+
import { registerDependenciesTools } from "./tools/dependencies-tools.js";
|
|
7
|
+
import { registerStatusTools } from "./tools/status-tools.js";
|
|
8
|
+
import { registerMqttTools } from "./tools/mqtt-tools.js";
|
|
4
9
|
import { AgentRegistry } from "./agent-registry.js";
|
|
5
10
|
import { Consultation } from "./consultation.js";
|
|
6
11
|
import { ConflictDetector } from "./conflict-detector.js";
|
|
@@ -11,7 +16,6 @@ import { SummaryContextProvider } from "./context-provider.js";
|
|
|
11
16
|
import { IntrospectionManager } from "./introspection.js";
|
|
12
17
|
import { SseEmitter } from "./sse-emitter.js";
|
|
13
18
|
import { MqttBridge } from "./mqtt-bridge.js";
|
|
14
|
-
import { assessPlanQuality } from "./plan-quality.js";
|
|
15
19
|
import { AgentActivityTracker } from "./agent-activity.js";
|
|
16
20
|
import { QuotaCache } from "./quota/quota-cache.js";
|
|
17
21
|
import { createLogger } from "./logger.js";
|
|
@@ -87,367 +91,14 @@ export function createMcpServer(services) {
|
|
|
87
91
|
name: "mcp-coordinator-v3",
|
|
88
92
|
version: VERSION,
|
|
89
93
|
});
|
|
90
|
-
//
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
mqttBridge.registerAgent(agent_id, name);
|
|
100
|
-
return { content: [{ type: "text", text: JSON.stringify(agent) }] };
|
|
101
|
-
});
|
|
102
|
-
server.tool("list_agents", "List registered agents", {
|
|
103
|
-
online_only: z.boolean().optional(),
|
|
104
|
-
}, async ({ online_only }) => {
|
|
105
|
-
const agents = online_only ? registry.listOnline() : registry.listAll();
|
|
106
|
-
return { content: [{ type: "text", text: JSON.stringify(agents) }] };
|
|
107
|
-
});
|
|
108
|
-
server.tool("heartbeat", "Update agent activity status and last seen timestamp", {
|
|
109
|
-
agent_id: z.string(),
|
|
110
|
-
current_file: z.string().optional(),
|
|
111
|
-
current_thread: z.string().optional(),
|
|
112
|
-
}, async ({ agent_id, current_file, current_thread }) => {
|
|
113
|
-
registry.heartbeat(agent_id);
|
|
114
|
-
activityTracker.heartbeat(agent_id, {
|
|
115
|
-
currentFile: current_file || null,
|
|
116
|
-
currentThread: current_thread || null,
|
|
117
|
-
});
|
|
118
|
-
const activity = activityTracker.getActivity(agent_id);
|
|
119
|
-
sseEmitter.emit("agent_activity", {
|
|
120
|
-
agent_id, activity_status: activity.activity_status,
|
|
121
|
-
current_file: activity.current_file, current_thread: activity.current_thread,
|
|
122
|
-
});
|
|
123
|
-
return { content: [{ type: "text", text: JSON.stringify(activity) }] };
|
|
124
|
-
});
|
|
125
|
-
server.tool("agent_activity", "Get activity status for all online agents", {}, async () => {
|
|
126
|
-
const activities = activityTracker.listAll({ idleAfterMinutes: 5 });
|
|
127
|
-
return { content: [{ type: "text", text: JSON.stringify(activities) }] };
|
|
128
|
-
});
|
|
129
|
-
// ── CONSULTATION TOOLS ──
|
|
130
|
-
server.tool("announce_work", "Open a consultation thread before starting work", {
|
|
131
|
-
agent_id: z.string(),
|
|
132
|
-
subject: z.string(),
|
|
133
|
-
plan: z.string().optional(),
|
|
134
|
-
target_modules: z.array(z.string()),
|
|
135
|
-
target_files: z.array(z.string()),
|
|
136
|
-
depends_on_files: z.array(z.string()).optional(),
|
|
137
|
-
exports_affected: z.array(z.string()).optional(),
|
|
138
|
-
keep_open: z.boolean().optional().describe("Keep thread open even if no agents are concerned (for manual coordination like games or debates)"),
|
|
139
|
-
assigned_to: z.string().optional().describe("Directed-dispatch: only this agent_id will be allowed to claim the thread. Use for lead→worker handoffs in maitre/chaine/relais presets. Implies keep_open=true."),
|
|
140
|
-
}, async ({ agent_id, subject, plan, target_modules, target_files, depends_on_files, exports_affected, keep_open, assigned_to }) => {
|
|
141
|
-
mcpLog.info({ tool: "announce_work", agent_id, subject, target_modules, target_files, assigned_to }, "Tool called");
|
|
142
|
-
// Quality gate on plan
|
|
143
|
-
const planQuality = assessPlanQuality(plan);
|
|
144
|
-
const effectiveMode = planQuality.mode;
|
|
145
|
-
const conflicts = conflictDetector.detect({ agent_id, target_modules, target_files });
|
|
146
|
-
const thread = consultation.announceWork({
|
|
147
|
-
agent_id, subject, plan, target_modules, target_files, depends_on_files, exports_affected, keep_open, assigned_to,
|
|
148
|
-
});
|
|
149
|
-
// Store conflicts on thread
|
|
150
|
-
if (conflicts.length > 0) {
|
|
151
|
-
const db = (await import("./database.js")).getDb();
|
|
152
|
-
db.prepare("UPDATE threads SET conflicts = ? WHERE id = ?")
|
|
153
|
-
.run(JSON.stringify(conflicts), thread.id);
|
|
154
|
-
}
|
|
155
|
-
// Impact scoring: categorize all online agents
|
|
156
|
-
const categorized = impactScorer.categorize({
|
|
157
|
-
agent_id, target_modules, target_files, depends_on_files, exports_affected,
|
|
158
|
-
});
|
|
159
|
-
// Override expected_respondents with concerned agents from scorer
|
|
160
|
-
{
|
|
161
|
-
const db = (await import("./database.js")).getDb();
|
|
162
|
-
const concernedIds = categorized.concerned.map(s => s.agent_id);
|
|
163
|
-
db.prepare("UPDATE threads SET expected_respondents = ? WHERE id = ?")
|
|
164
|
-
.run(JSON.stringify(concernedIds), thread.id);
|
|
165
|
-
// Only auto-resolve when truly alone — no other online agents.
|
|
166
|
-
// If peers are online but not yet concerned, keep the thread open so
|
|
167
|
-
// a subsequent announce can still match via Layer 0. Thread timeouts
|
|
168
|
-
// naturally if no one joins.
|
|
169
|
-
const otherOnlineCount = registry.listOnline().filter((a) => a.id !== agent_id).length;
|
|
170
|
-
const shouldAutoResolve = concernedIds.length === 0 && otherOnlineCount === 0;
|
|
171
|
-
if (shouldAutoResolve && thread.status === "open" && !keep_open) {
|
|
172
|
-
db.prepare("UPDATE threads SET status = 'resolved', resolved_at = ? WHERE id = ?")
|
|
173
|
-
.run(new Date().toISOString(), thread.id);
|
|
174
|
-
consultation.emitResolution(thread.id, "auto_resolved");
|
|
175
|
-
}
|
|
176
|
-
}
|
|
177
|
-
// Emit impact_scored SSE events for all agents
|
|
178
|
-
for (const s of [...categorized.concerned, ...categorized.gray_zone, ...categorized.pass]) {
|
|
179
|
-
sseEmitter.emit("impact_scored", {
|
|
180
|
-
thread_id: thread.id, agent_id: s.agent_id, agent_name: s.agent_name,
|
|
181
|
-
score: s.score, reasons: s.reasons, category: s.score >= 90 ? "concerned" : s.score >= 30 ? "gray_zone" : "pass",
|
|
182
|
-
});
|
|
183
|
-
}
|
|
184
|
-
// Create introspection records and emit introspection_requested for gray_zone agents
|
|
185
|
-
for (const s of categorized.gray_zone) {
|
|
186
|
-
introspection.create({ thread_id: thread.id, agent_id: s.agent_id, score: s.score, reasons: s.reasons });
|
|
187
|
-
sseEmitter.emit("introspection_requested", {
|
|
188
|
-
thread_id: thread.id, agent_id: s.agent_id, agent_name: s.agent_name, score: s.score, reasons: s.reasons,
|
|
189
|
-
});
|
|
190
|
-
}
|
|
191
|
-
// Emit downgrade event when plan is provided but quality is insufficient
|
|
192
|
-
if (plan && effectiveMode === "discovery") {
|
|
193
|
-
sseEmitter.emit("impact_scored", {
|
|
194
|
-
thread_id: thread.id,
|
|
195
|
-
agent_id: agent_id,
|
|
196
|
-
agent_name: registry.get(agent_id)?.name || agent_id,
|
|
197
|
-
score: planQuality.score,
|
|
198
|
-
reasons: [`plan downgraded: score ${planQuality.score}/3 — ${!planQuality.checks.mentions_files ? 'no files' : ''} ${!planQuality.checks.concrete_approach ? 'vague approach' : ''} ${!planQuality.checks.sufficient_detail ? 'too short' : ''}`.trim()],
|
|
199
|
-
category: "plan_quality",
|
|
200
|
-
});
|
|
201
|
-
}
|
|
202
|
-
const updated = consultation.getThread(thread.id);
|
|
203
|
-
const respondents = JSON.parse(updated.expected_respondents || "[]");
|
|
204
|
-
sseEmitter.emit("thread_opened", {
|
|
205
|
-
thread_id: thread.id, initiator: agent_id, subject, target_modules, conflicts,
|
|
206
|
-
expected_respondents: respondents,
|
|
207
|
-
mode: effectiveMode,
|
|
208
|
-
plan: plan || null,
|
|
209
|
-
plan_quality: planQuality,
|
|
210
|
-
});
|
|
211
|
-
mqttBridge.publishConsultation(thread.id, agent_id, subject, target_modules);
|
|
212
|
-
// Gather context from concerned agents for the initiator
|
|
213
|
-
const contextForInitiator = respondents.map((rid) => contextProvider.getRelevantContext(rid, { thread_id: updated.id, subject, target_modules, target_files })).filter((ctx) => ctx.modules.length > 0);
|
|
214
|
-
return {
|
|
215
|
-
content: [{
|
|
216
|
-
type: "text",
|
|
217
|
-
text: JSON.stringify({ thread: updated, conflicts, context: contextForInitiator, impact: categorized }),
|
|
218
|
-
}],
|
|
219
|
-
};
|
|
220
|
-
});
|
|
221
|
-
server.tool("post_to_thread", "Post a message to a consultation thread", {
|
|
222
|
-
thread_id: z.string(),
|
|
223
|
-
agent_id: z.string(),
|
|
224
|
-
agent_name: z.string().optional(),
|
|
225
|
-
type: z.enum(["context", "suggestion", "warning"]),
|
|
226
|
-
content: z.string(),
|
|
227
|
-
context_snapshot: z.string().optional(),
|
|
228
|
-
in_reply_to: z.string().optional(),
|
|
229
|
-
}, async ({ thread_id, agent_id, agent_name, type, content, context_snapshot, in_reply_to }) => {
|
|
230
|
-
mcpLog.info({ tool: "post_to_thread", thread_id, agent_id, type }, "Tool called");
|
|
231
|
-
const msg = consultation.postToThread({
|
|
232
|
-
thread_id, agent_id, agent_name, type, content, context_snapshot, in_reply_to,
|
|
233
|
-
});
|
|
234
|
-
const thread = consultation.getThread(thread_id);
|
|
235
|
-
sseEmitter.emit("message_posted", {
|
|
236
|
-
thread_id, agent_id, agent_name: agent_name || agent_id,
|
|
237
|
-
type, content, round: thread?.round || 1,
|
|
238
|
-
token_estimate: msg.token_estimate || 0,
|
|
239
|
-
});
|
|
240
|
-
mqttBridge.publishMessage(thread_id, agent_id, type, content);
|
|
241
|
-
return { content: [{ type: "text", text: JSON.stringify(msg) }] };
|
|
242
|
-
});
|
|
243
|
-
server.tool("propose_resolution", "Propose a resolution for the consultation", {
|
|
244
|
-
thread_id: z.string(),
|
|
245
|
-
agent_id: z.string(),
|
|
246
|
-
summary: z.string(),
|
|
247
|
-
plan: z.string().optional(),
|
|
248
|
-
}, async ({ thread_id, agent_id, summary, plan }) => {
|
|
249
|
-
mcpLog.info({ tool: "propose_resolution", thread_id, agent_id }, "Tool called");
|
|
250
|
-
consultation.proposeResolution(thread_id, agent_id, summary);
|
|
251
|
-
sseEmitter.emit("resolution_proposed", { thread_id, agent_id, summary });
|
|
252
|
-
mqttBridge.publishResolution(thread_id, "resolving", summary);
|
|
253
|
-
const thread = consultation.getThread(thread_id);
|
|
254
|
-
return { content: [{ type: "text", text: JSON.stringify(thread) }] };
|
|
255
|
-
});
|
|
256
|
-
server.tool("approve_resolution", "Approve the proposed resolution", {
|
|
257
|
-
thread_id: z.string(),
|
|
258
|
-
agent_id: z.string(),
|
|
259
|
-
}, async ({ thread_id, agent_id }) => {
|
|
260
|
-
mcpLog.info({ tool: "approve_resolution", thread_id, agent_id }, "Tool called");
|
|
261
|
-
const agentInfo = registry.get(agent_id);
|
|
262
|
-
consultation.approveResolution(thread_id, agent_id, agentInfo?.name);
|
|
263
|
-
const thread = consultation.getThread(thread_id);
|
|
264
|
-
return { content: [{ type: "text", text: JSON.stringify(thread) }] };
|
|
265
|
-
});
|
|
266
|
-
server.tool("contest_resolution", "Contest the proposed resolution", {
|
|
267
|
-
thread_id: z.string(),
|
|
268
|
-
agent_id: z.string(),
|
|
269
|
-
reason: z.string(),
|
|
270
|
-
}, async ({ thread_id, agent_id, reason }) => {
|
|
271
|
-
mcpLog.info({ tool: "contest_resolution", thread_id, agent_id }, "Tool called");
|
|
272
|
-
consultation.contestResolution(thread_id, agent_id, reason);
|
|
273
|
-
const thread = consultation.getThread(thread_id);
|
|
274
|
-
return { content: [{ type: "text", text: JSON.stringify(thread) }] };
|
|
275
|
-
});
|
|
276
|
-
server.tool("close_thread", "Close a consultation thread", {
|
|
277
|
-
thread_id: z.string(),
|
|
278
|
-
agent_id: z.string(),
|
|
279
|
-
summary: z.string(),
|
|
280
|
-
}, async ({ thread_id, agent_id, summary }) => {
|
|
281
|
-
mcpLog.info({ tool: "close_thread", thread_id, agent_id }, "Tool called");
|
|
282
|
-
consultation.closeThread(thread_id, agent_id, summary);
|
|
283
|
-
return { content: [{ type: "text", text: "closed" }] };
|
|
284
|
-
});
|
|
285
|
-
server.tool("cancel_thread", "Cancel a consultation thread", {
|
|
286
|
-
thread_id: z.string(),
|
|
287
|
-
agent_id: z.string(),
|
|
288
|
-
reason: z.string().optional(),
|
|
289
|
-
}, async ({ thread_id, agent_id, reason }) => {
|
|
290
|
-
mcpLog.info({ tool: "cancel_thread", thread_id, agent_id }, "Tool called");
|
|
291
|
-
consultation.cancelThread(thread_id, agent_id, reason);
|
|
292
|
-
sseEmitter.emit("thread_cancelled", { thread_id, reason });
|
|
293
|
-
return { content: [{ type: "text", text: "cancelled" }] };
|
|
294
|
-
});
|
|
295
|
-
server.tool("get_thread", "Get a thread with all messages", {
|
|
296
|
-
thread_id: z.string(),
|
|
297
|
-
}, async ({ thread_id }) => {
|
|
298
|
-
const result = consultation.getThreadWithMessages(thread_id);
|
|
299
|
-
mcpLog.debug({ tool: "get_thread", thread_id, message_count: result?.messages.length }, "Tool called");
|
|
300
|
-
return { content: [{ type: "text", text: JSON.stringify(result) }] };
|
|
301
|
-
});
|
|
302
|
-
server.tool("get_thread_updates", "Get new messages since timestamp", {
|
|
303
|
-
agent_id: z.string(),
|
|
304
|
-
since: z.string().optional(),
|
|
305
|
-
}, async ({ agent_id, since }) => {
|
|
306
|
-
const updates = consultation.getThreadUpdates(agent_id, since);
|
|
307
|
-
return { content: [{ type: "text", text: JSON.stringify(updates) }] };
|
|
308
|
-
});
|
|
309
|
-
server.tool("list_threads", "List consultation threads", {
|
|
310
|
-
status: z.string().optional(),
|
|
311
|
-
agent_id: z.string().optional(),
|
|
312
|
-
module: z.string().optional(),
|
|
313
|
-
assigned_to_me: z.string().optional().describe("Filter to threads claimable by this agent_id: open pool (assigned_to NULL) OR directed to me. Use for worker agents receiving directed dispatches."),
|
|
314
|
-
}, async ({ status, agent_id, module, assigned_to_me }) => {
|
|
315
|
-
const threads = consultation.listThreads({ status, agent_id, module, assigned_to_me });
|
|
316
|
-
mcpLog.debug({ tool: "list_threads", status, agent_id, module, assigned_to_me, result_count: threads.length }, "Tool called");
|
|
317
|
-
return { content: [{ type: "text", text: JSON.stringify(threads) }] };
|
|
318
|
-
});
|
|
319
|
-
server.tool("log_action_summary", "Log a one-liner summary of an action", {
|
|
320
|
-
session_id: z.string(),
|
|
321
|
-
agent_id: z.string(),
|
|
322
|
-
file_path: z.string().optional(),
|
|
323
|
-
summary: z.string(),
|
|
324
|
-
}, async ({ session_id, agent_id, file_path, summary }) => {
|
|
325
|
-
const result = consultation.logActionSummary({ session_id, agent_id, file_path, summary });
|
|
326
|
-
sseEmitter.emit("action_summary", { agent_id, file_path, summary });
|
|
327
|
-
return { content: [{ type: "text", text: JSON.stringify(result) }] };
|
|
328
|
-
});
|
|
329
|
-
// ── FILE TRACKING TOOLS ──
|
|
330
|
-
server.tool("hot_files", "List files modified by multiple agents", {
|
|
331
|
-
since_minutes: z.number().optional(),
|
|
332
|
-
}, async ({ since_minutes }) => {
|
|
333
|
-
const files = fileTracker.getHotFiles(since_minutes || 30);
|
|
334
|
-
return { content: [{ type: "text", text: JSON.stringify(files) }] };
|
|
335
|
-
});
|
|
336
|
-
server.tool("get_session_files", "Get files modified in a session", {
|
|
337
|
-
session_id: z.string(),
|
|
338
|
-
}, async ({ session_id }) => {
|
|
339
|
-
const files = fileTracker.getBySession(session_id);
|
|
340
|
-
return { content: [{ type: "text", text: JSON.stringify(files) }] };
|
|
341
|
-
});
|
|
342
|
-
server.tool("check_file_conflict", "Check if another agent is editing a file", {
|
|
343
|
-
file_path: z.string(),
|
|
344
|
-
agent_id: z.string(),
|
|
345
|
-
within_minutes: z.number().optional(),
|
|
346
|
-
}, async ({ file_path, agent_id, within_minutes }) => {
|
|
347
|
-
const result = fileTracker.checkFileConflict(file_path, agent_id, within_minutes || 30);
|
|
348
|
-
return { content: [{ type: "text", text: JSON.stringify(result) }] };
|
|
349
|
-
});
|
|
350
|
-
// ── DEPENDENCY MAP TOOLS ──
|
|
351
|
-
server.tool("set_dependency_map", "Load module dependency graph", {
|
|
352
|
-
modules: z.string(), // JSON DependencyMap
|
|
353
|
-
}, async ({ modules }) => {
|
|
354
|
-
const map = JSON.parse(modules);
|
|
355
|
-
depMap.setMap(map);
|
|
356
|
-
return { content: [{ type: "text", text: "ok" }] };
|
|
357
|
-
});
|
|
358
|
-
server.tool("get_blast_radius", "Calculate impact of changes to a module", {
|
|
359
|
-
module_id: z.string(),
|
|
360
|
-
}, async ({ module_id }) => {
|
|
361
|
-
const radius = depMap.getBlastRadius(module_id);
|
|
362
|
-
return { content: [{ type: "text", text: JSON.stringify(radius) }] };
|
|
363
|
-
});
|
|
364
|
-
server.tool("get_module_info", "Get module dependency info", {
|
|
365
|
-
module_id: z.string(),
|
|
366
|
-
}, async ({ module_id }) => {
|
|
367
|
-
const info = depMap.getModuleInfo(module_id);
|
|
368
|
-
return { content: [{ type: "text", text: JSON.stringify(info) }] };
|
|
369
|
-
});
|
|
370
|
-
// ── STATUS TOOL ──
|
|
371
|
-
server.tool("coordinator_status", "Full system status", {}, async () => {
|
|
372
|
-
const online = registry.listOnline();
|
|
373
|
-
const openThreads = consultation.listThreads({ status: "open" });
|
|
374
|
-
const resolvingThreads = consultation.listThreads({ status: "resolving" });
|
|
375
|
-
const hotFiles = fileTracker.getHotFiles(30);
|
|
376
|
-
const status = {
|
|
377
|
-
agents_online: online.length,
|
|
378
|
-
agents: online.map((a) => ({ id: a.id, name: a.name, modules: JSON.parse(a.modules) })),
|
|
379
|
-
open_threads: openThreads.length,
|
|
380
|
-
resolving_threads: resolvingThreads.length,
|
|
381
|
-
hot_files: hotFiles.length,
|
|
382
|
-
mqtt_connected: mqttBridge.isConnected(),
|
|
383
|
-
};
|
|
384
|
-
mcpLog.debug({ tool: "coordinator_status", agents_online: online.length, open_threads: openThreads.length }, "Tool called");
|
|
385
|
-
return { content: [{ type: "text", text: JSON.stringify(status) }] };
|
|
386
|
-
});
|
|
387
|
-
// ── COORDINATION HELPERS ──
|
|
388
|
-
server.tool("wait_for_peers", "Block until at least N other online agents are registered, or timeout. Use before the first announce_work to avoid the race where one agent announces before peers have booted.", {
|
|
389
|
-
agent_id: z.string(),
|
|
390
|
-
min_peers: z.number().optional(),
|
|
391
|
-
timeout_seconds: z.number().optional(),
|
|
392
|
-
}, async ({ agent_id, min_peers, timeout_seconds }) => {
|
|
393
|
-
const targetPeers = min_peers ?? 1;
|
|
394
|
-
const timeoutMs = (timeout_seconds ?? 30) * 1000;
|
|
395
|
-
const pollIntervalMs = 1000;
|
|
396
|
-
const startedAt = Date.now();
|
|
397
|
-
mcpLog.info({ tool: "wait_for_peers", agent_id, min_peers: targetPeers, timeout_seconds: timeoutMs / 1000 }, "Tool called");
|
|
398
|
-
while (Date.now() - startedAt < timeoutMs) {
|
|
399
|
-
const peers = registry.listOnline().filter((a) => a.id !== agent_id);
|
|
400
|
-
if (peers.length >= targetPeers) {
|
|
401
|
-
return {
|
|
402
|
-
content: [{
|
|
403
|
-
type: "text",
|
|
404
|
-
text: JSON.stringify({
|
|
405
|
-
ready: true,
|
|
406
|
-
online_peers: peers.map((p) => ({ id: p.id, name: p.name })),
|
|
407
|
-
waited_ms: Date.now() - startedAt,
|
|
408
|
-
}),
|
|
409
|
-
}],
|
|
410
|
-
};
|
|
411
|
-
}
|
|
412
|
-
await new Promise((r) => setTimeout(r, pollIntervalMs));
|
|
413
|
-
}
|
|
414
|
-
const finalPeers = registry.listOnline().filter((a) => a.id !== agent_id);
|
|
415
|
-
return {
|
|
416
|
-
content: [{
|
|
417
|
-
type: "text",
|
|
418
|
-
text: JSON.stringify({
|
|
419
|
-
ready: false,
|
|
420
|
-
timeout: true,
|
|
421
|
-
online_peers: finalPeers.map((p) => ({ id: p.id, name: p.name })),
|
|
422
|
-
waited_ms: Date.now() - startedAt,
|
|
423
|
-
}),
|
|
424
|
-
}],
|
|
425
|
-
};
|
|
426
|
-
});
|
|
427
|
-
// ── MQTT LISTENER TOOLS (replaces standalone mqtt-mcp-bridge) ──
|
|
428
|
-
server.tool("wait_for_message", "Block until an MQTT consultation message arrives or timeout", {
|
|
429
|
-
agent_id: z.string(),
|
|
430
|
-
timeout_seconds: z.number().optional(),
|
|
431
|
-
}, async ({ agent_id, timeout_seconds }) => {
|
|
432
|
-
const timeoutMs = (timeout_seconds || 15) * 1000;
|
|
433
|
-
const msg = await mqttBridge.waitForMessage(agent_id, timeoutMs);
|
|
434
|
-
if (msg) {
|
|
435
|
-
return { content: [{ type: "text", text: JSON.stringify(msg) }] };
|
|
436
|
-
}
|
|
437
|
-
return { content: [{ type: "text", text: JSON.stringify({ timeout: true }) }] };
|
|
438
|
-
});
|
|
439
|
-
server.tool("get_queued_messages", "Get all queued MQTT messages without blocking", {
|
|
440
|
-
agent_id: z.string(),
|
|
441
|
-
}, async ({ agent_id }) => {
|
|
442
|
-
const messages = mqttBridge.getQueuedMessages(agent_id);
|
|
443
|
-
return { content: [{ type: "text", text: JSON.stringify(messages) }] };
|
|
444
|
-
});
|
|
445
|
-
server.tool("mqtt_publish", "Publish a message to an MQTT topic", {
|
|
446
|
-
topic: z.string(),
|
|
447
|
-
payload: z.string(),
|
|
448
|
-
}, async ({ topic, payload }) => {
|
|
449
|
-
mqttBridge.mqttPublish(topic, payload);
|
|
450
|
-
return { content: [{ type: "text", text: "published" }] };
|
|
451
|
-
});
|
|
94
|
+
// S1: all 23 MCP tools registered via per-domain modules under src/tools/.
|
|
95
|
+
// Each register*Tools function takes (server, services, mcpLog) and wires
|
|
96
|
+
// its tool group; nothing else lives here. See src/tools/*.ts for behavior.
|
|
97
|
+
registerAgentTools(server, services, mcpLog);
|
|
98
|
+
registerConsultationTools(server, services, mcpLog);
|
|
99
|
+
registerFilesTools(server, services, mcpLog);
|
|
100
|
+
registerDependenciesTools(server, services, mcpLog);
|
|
101
|
+
registerStatusTools(server, services, mcpLog);
|
|
102
|
+
registerMqttTools(server, services, mcpLog);
|
|
452
103
|
return server;
|
|
453
104
|
}
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
import type { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
2
|
+
import type { CoordinatorServices } from "../server-setup.js";
|
|
3
|
+
import type { Logger } from "../logger.js";
|
|
4
|
+
/**
|
|
5
|
+
* S1: agent registry MCP tools (4 tools).
|
|
6
|
+
* register_agent, list_agents, heartbeat, agent_activity.
|
|
7
|
+
*/
|
|
8
|
+
export declare function registerAgentTools(server: McpServer, services: CoordinatorServices, mcpLog: Logger): void;
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
/**
|
|
3
|
+
* S1: agent registry MCP tools (4 tools).
|
|
4
|
+
* register_agent, list_agents, heartbeat, agent_activity.
|
|
5
|
+
*/
|
|
6
|
+
export function registerAgentTools(server, services, mcpLog) {
|
|
7
|
+
const { registry, activityTracker, sseEmitter, mqttBridge } = services;
|
|
8
|
+
server.tool("register_agent", "Register agent as online with module list", {
|
|
9
|
+
agent_id: z.string(),
|
|
10
|
+
name: z.string(),
|
|
11
|
+
modules: z.array(z.string()),
|
|
12
|
+
}, async ({ agent_id, name, modules }) => {
|
|
13
|
+
mcpLog.info({ tool: "register_agent", agent_id, name, module_count: modules.length }, "Tool called");
|
|
14
|
+
const agent = registry.register(agent_id, name, modules);
|
|
15
|
+
sseEmitter.emit("agent_online", { agent_id, name, modules });
|
|
16
|
+
mqttBridge.registerAgent(agent_id, name);
|
|
17
|
+
return { content: [{ type: "text", text: JSON.stringify(agent) }] };
|
|
18
|
+
});
|
|
19
|
+
server.tool("list_agents", "List registered agents", {
|
|
20
|
+
online_only: z.boolean().optional(),
|
|
21
|
+
}, async ({ online_only }) => {
|
|
22
|
+
const agents = online_only ? registry.listOnline() : registry.listAll();
|
|
23
|
+
return { content: [{ type: "text", text: JSON.stringify(agents) }] };
|
|
24
|
+
});
|
|
25
|
+
server.tool("heartbeat", "Update agent activity status and last seen timestamp", {
|
|
26
|
+
agent_id: z.string(),
|
|
27
|
+
current_file: z.string().optional(),
|
|
28
|
+
current_thread: z.string().optional(),
|
|
29
|
+
}, async ({ agent_id, current_file, current_thread }) => {
|
|
30
|
+
registry.heartbeat(agent_id);
|
|
31
|
+
activityTracker.heartbeat(agent_id, {
|
|
32
|
+
currentFile: current_file || null,
|
|
33
|
+
currentThread: current_thread || null,
|
|
34
|
+
});
|
|
35
|
+
const activity = activityTracker.getActivity(agent_id);
|
|
36
|
+
sseEmitter.emit("agent_activity", {
|
|
37
|
+
agent_id, activity_status: activity.activity_status,
|
|
38
|
+
current_file: activity.current_file, current_thread: activity.current_thread,
|
|
39
|
+
});
|
|
40
|
+
return { content: [{ type: "text", text: JSON.stringify(activity) }] };
|
|
41
|
+
});
|
|
42
|
+
server.tool("agent_activity", "Get activity status for all online agents", {}, async () => {
|
|
43
|
+
const activities = activityTracker.listAll({ idleAfterMinutes: 5 });
|
|
44
|
+
return { content: [{ type: "text", text: JSON.stringify(activities) }] };
|
|
45
|
+
});
|
|
46
|
+
}
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
import type { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
2
|
+
import type { CoordinatorServices } from "../server-setup.js";
|
|
3
|
+
import type { Logger } from "../logger.js";
|
|
4
|
+
/**
|
|
5
|
+
* S1 fix (partial): consultation domain tools extracted from server-setup.ts.
|
|
6
|
+
*
|
|
7
|
+
* Originally these 11 tools (announce_work, post_to_thread, propose/approve/
|
|
8
|
+
* contest_resolution, close/cancel_thread, get_thread, get_thread_updates,
|
|
9
|
+
* list_threads, log_action_summary) lived inline in createMcpServer's
|
|
10
|
+
* 420-line body. Extraction here:
|
|
11
|
+
* - reduces server-setup.ts by ~200 lines
|
|
12
|
+
* - groups related tools by domain (= one file per concern)
|
|
13
|
+
* - keeps behavior identical (no signature changes, no SSE/MQTT shape
|
|
14
|
+
* changes — verified by all existing tests)
|
|
15
|
+
*
|
|
16
|
+
* Other tool groups (agents, files, dependencies, mqtt, status) remain in
|
|
17
|
+
* server-setup.ts under their existing section comments. Splitting them is
|
|
18
|
+
* straightforward following this pattern but kept out of this PR to minimize
|
|
19
|
+
* the diff for reviewers.
|
|
20
|
+
*/
|
|
21
|
+
export declare function registerConsultationTools(server: McpServer, services: CoordinatorServices, mcpLog: Logger): void;
|
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
import { getDb } from "../database.js";
|
|
3
|
+
import { runCommonAnnounceFlow } from "../announce-workflow.js";
|
|
4
|
+
/**
|
|
5
|
+
* S1 fix (partial): consultation domain tools extracted from server-setup.ts.
|
|
6
|
+
*
|
|
7
|
+
* Originally these 11 tools (announce_work, post_to_thread, propose/approve/
|
|
8
|
+
* contest_resolution, close/cancel_thread, get_thread, get_thread_updates,
|
|
9
|
+
* list_threads, log_action_summary) lived inline in createMcpServer's
|
|
10
|
+
* 420-line body. Extraction here:
|
|
11
|
+
* - reduces server-setup.ts by ~200 lines
|
|
12
|
+
* - groups related tools by domain (= one file per concern)
|
|
13
|
+
* - keeps behavior identical (no signature changes, no SSE/MQTT shape
|
|
14
|
+
* changes — verified by all existing tests)
|
|
15
|
+
*
|
|
16
|
+
* Other tool groups (agents, files, dependencies, mqtt, status) remain in
|
|
17
|
+
* server-setup.ts under their existing section comments. Splitting them is
|
|
18
|
+
* straightforward following this pattern but kept out of this PR to minimize
|
|
19
|
+
* the diff for reviewers.
|
|
20
|
+
*/
|
|
21
|
+
export function registerConsultationTools(server, services, mcpLog) {
|
|
22
|
+
const { registry, consultation, conflictDetector, contextProvider, sseEmitter, mqttBridge } = services;
|
|
23
|
+
server.tool("announce_work", "Open a consultation thread before starting work", {
|
|
24
|
+
agent_id: z.string(),
|
|
25
|
+
subject: z.string(),
|
|
26
|
+
plan: z.string().optional(),
|
|
27
|
+
target_modules: z.array(z.string()),
|
|
28
|
+
target_files: z.array(z.string()),
|
|
29
|
+
depends_on_files: z.array(z.string()).optional(),
|
|
30
|
+
exports_affected: z.array(z.string()).optional(),
|
|
31
|
+
keep_open: z.boolean().optional().describe("Keep thread open even if no agents are concerned (for manual coordination like games or debates)"),
|
|
32
|
+
assigned_to: z.string().optional().describe("Directed-dispatch: only this agent_id will be allowed to claim the thread. Use for lead→worker handoffs in maitre/chaine/relais presets. Implies keep_open=true."),
|
|
33
|
+
}, async ({ agent_id, subject, plan, target_modules, target_files, depends_on_files, exports_affected, keep_open, assigned_to }) => {
|
|
34
|
+
mcpLog.info({ tool: "announce_work", agent_id, subject, target_modules, target_files, assigned_to }, "Tool called");
|
|
35
|
+
const conflicts = conflictDetector.detect({ agent_id, target_modules, target_files });
|
|
36
|
+
const thread = consultation.announceWork({
|
|
37
|
+
agent_id, subject, plan, target_modules, target_files, depends_on_files, exports_affected, keep_open, assigned_to,
|
|
38
|
+
});
|
|
39
|
+
if (conflicts.length > 0) {
|
|
40
|
+
getDb().prepare("UPDATE threads SET conflicts = ? WHERE id = ?")
|
|
41
|
+
.run(JSON.stringify(conflicts), thread.id);
|
|
42
|
+
}
|
|
43
|
+
const { updated, categorized, respondents, planQuality } = runCommonAnnounceFlow(services, thread.id, {
|
|
44
|
+
agent_id, subject, plan, target_modules, target_files, depends_on_files, exports_affected, keep_open,
|
|
45
|
+
});
|
|
46
|
+
sseEmitter.emit("thread_opened", {
|
|
47
|
+
thread_id: thread.id, initiator: agent_id, subject, target_modules, conflicts,
|
|
48
|
+
expected_respondents: respondents,
|
|
49
|
+
mode: planQuality.mode,
|
|
50
|
+
plan: plan || null,
|
|
51
|
+
plan_quality: planQuality,
|
|
52
|
+
});
|
|
53
|
+
mqttBridge.publishConsultation(thread.id, agent_id, subject, target_modules);
|
|
54
|
+
const contextForInitiator = respondents.map((rid) => contextProvider.getRelevantContext(rid, { thread_id: updated.id, subject, target_modules, target_files })).filter((ctx) => ctx.modules.length > 0);
|
|
55
|
+
return {
|
|
56
|
+
content: [{
|
|
57
|
+
type: "text",
|
|
58
|
+
text: JSON.stringify({ thread: updated, conflicts, context: contextForInitiator, impact: categorized }),
|
|
59
|
+
}],
|
|
60
|
+
};
|
|
61
|
+
});
|
|
62
|
+
server.tool("post_to_thread", "Post a message to a consultation thread", {
|
|
63
|
+
thread_id: z.string(),
|
|
64
|
+
agent_id: z.string(),
|
|
65
|
+
agent_name: z.string().optional(),
|
|
66
|
+
type: z.enum(["context", "suggestion", "warning"]),
|
|
67
|
+
content: z.string(),
|
|
68
|
+
context_snapshot: z.string().optional(),
|
|
69
|
+
in_reply_to: z.string().optional(),
|
|
70
|
+
}, async ({ thread_id, agent_id, agent_name, type, content, context_snapshot, in_reply_to }) => {
|
|
71
|
+
mcpLog.info({ tool: "post_to_thread", thread_id, agent_id, type }, "Tool called");
|
|
72
|
+
const msg = consultation.postToThread({
|
|
73
|
+
thread_id, agent_id, agent_name, type, content, context_snapshot, in_reply_to,
|
|
74
|
+
});
|
|
75
|
+
const thread = consultation.getThread(thread_id);
|
|
76
|
+
sseEmitter.emit("message_posted", {
|
|
77
|
+
thread_id, agent_id, agent_name: agent_name || agent_id,
|
|
78
|
+
type, content, round: thread?.round || 1,
|
|
79
|
+
token_estimate: msg.token_estimate || 0,
|
|
80
|
+
});
|
|
81
|
+
mqttBridge.publishMessage(thread_id, agent_id, type, content);
|
|
82
|
+
return { content: [{ type: "text", text: JSON.stringify(msg) }] };
|
|
83
|
+
});
|
|
84
|
+
server.tool("propose_resolution", "Propose a resolution for the consultation", {
|
|
85
|
+
thread_id: z.string(),
|
|
86
|
+
agent_id: z.string(),
|
|
87
|
+
summary: z.string(),
|
|
88
|
+
plan: z.string().optional(),
|
|
89
|
+
}, async ({ thread_id, agent_id, summary }) => {
|
|
90
|
+
mcpLog.info({ tool: "propose_resolution", thread_id, agent_id }, "Tool called");
|
|
91
|
+
consultation.proposeResolution(thread_id, agent_id, summary);
|
|
92
|
+
sseEmitter.emit("resolution_proposed", { thread_id, agent_id, summary });
|
|
93
|
+
mqttBridge.publishResolution(thread_id, "resolving", summary);
|
|
94
|
+
const thread = consultation.getThread(thread_id);
|
|
95
|
+
return { content: [{ type: "text", text: JSON.stringify(thread) }] };
|
|
96
|
+
});
|
|
97
|
+
server.tool("approve_resolution", "Approve the proposed resolution", {
|
|
98
|
+
thread_id: z.string(),
|
|
99
|
+
agent_id: z.string(),
|
|
100
|
+
}, async ({ thread_id, agent_id }) => {
|
|
101
|
+
mcpLog.info({ tool: "approve_resolution", thread_id, agent_id }, "Tool called");
|
|
102
|
+
const agentInfo = registry.get(agent_id);
|
|
103
|
+
consultation.approveResolution(thread_id, agent_id, agentInfo?.name);
|
|
104
|
+
const thread = consultation.getThread(thread_id);
|
|
105
|
+
return { content: [{ type: "text", text: JSON.stringify(thread) }] };
|
|
106
|
+
});
|
|
107
|
+
server.tool("contest_resolution", "Contest the proposed resolution", {
|
|
108
|
+
thread_id: z.string(),
|
|
109
|
+
agent_id: z.string(),
|
|
110
|
+
reason: z.string(),
|
|
111
|
+
}, async ({ thread_id, agent_id, reason }) => {
|
|
112
|
+
mcpLog.info({ tool: "contest_resolution", thread_id, agent_id }, "Tool called");
|
|
113
|
+
consultation.contestResolution(thread_id, agent_id, reason);
|
|
114
|
+
const thread = consultation.getThread(thread_id);
|
|
115
|
+
return { content: [{ type: "text", text: JSON.stringify(thread) }] };
|
|
116
|
+
});
|
|
117
|
+
server.tool("close_thread", "Close a consultation thread", {
|
|
118
|
+
thread_id: z.string(),
|
|
119
|
+
agent_id: z.string(),
|
|
120
|
+
summary: z.string(),
|
|
121
|
+
}, async ({ thread_id, agent_id, summary }) => {
|
|
122
|
+
mcpLog.info({ tool: "close_thread", thread_id, agent_id }, "Tool called");
|
|
123
|
+
consultation.closeThread(thread_id, agent_id, summary);
|
|
124
|
+
return { content: [{ type: "text", text: "closed" }] };
|
|
125
|
+
});
|
|
126
|
+
server.tool("cancel_thread", "Cancel a consultation thread", {
|
|
127
|
+
thread_id: z.string(),
|
|
128
|
+
agent_id: z.string(),
|
|
129
|
+
reason: z.string().optional(),
|
|
130
|
+
}, async ({ thread_id, agent_id, reason }) => {
|
|
131
|
+
mcpLog.info({ tool: "cancel_thread", thread_id, agent_id }, "Tool called");
|
|
132
|
+
consultation.cancelThread(thread_id, agent_id, reason);
|
|
133
|
+
sseEmitter.emit("thread_cancelled", { thread_id, reason });
|
|
134
|
+
return { content: [{ type: "text", text: "cancelled" }] };
|
|
135
|
+
});
|
|
136
|
+
server.tool("get_thread", "Get a thread with all messages", {
|
|
137
|
+
thread_id: z.string(),
|
|
138
|
+
}, async ({ thread_id }) => {
|
|
139
|
+
const result = consultation.getThreadWithMessages(thread_id);
|
|
140
|
+
mcpLog.debug({ tool: "get_thread", thread_id, message_count: result?.messages.length }, "Tool called");
|
|
141
|
+
return { content: [{ type: "text", text: JSON.stringify(result) }] };
|
|
142
|
+
});
|
|
143
|
+
server.tool("get_thread_updates", "Get new messages since timestamp", {
|
|
144
|
+
agent_id: z.string(),
|
|
145
|
+
since: z.string().optional(),
|
|
146
|
+
}, async ({ agent_id, since }) => {
|
|
147
|
+
const updates = consultation.getThreadUpdates(agent_id, since);
|
|
148
|
+
return { content: [{ type: "text", text: JSON.stringify(updates) }] };
|
|
149
|
+
});
|
|
150
|
+
server.tool("list_threads", "List consultation threads", {
|
|
151
|
+
status: z.string().optional(),
|
|
152
|
+
agent_id: z.string().optional(),
|
|
153
|
+
module: z.string().optional(),
|
|
154
|
+
assigned_to_me: z.string().optional().describe("Filter to threads claimable by this agent_id: open pool (assigned_to NULL) OR directed to me. Use for worker agents receiving directed dispatches."),
|
|
155
|
+
}, async ({ status, agent_id, module, assigned_to_me }) => {
|
|
156
|
+
const threads = consultation.listThreads({ status, agent_id, module, assigned_to_me });
|
|
157
|
+
mcpLog.debug({ tool: "list_threads", status, agent_id, module, assigned_to_me, result_count: threads.length }, "Tool called");
|
|
158
|
+
return { content: [{ type: "text", text: JSON.stringify(threads) }] };
|
|
159
|
+
});
|
|
160
|
+
server.tool("log_action_summary", "Log a one-liner summary of an action", {
|
|
161
|
+
session_id: z.string(),
|
|
162
|
+
agent_id: z.string(),
|
|
163
|
+
file_path: z.string().optional(),
|
|
164
|
+
summary: z.string(),
|
|
165
|
+
}, async ({ session_id, agent_id, file_path, summary }) => {
|
|
166
|
+
const result = consultation.logActionSummary({ session_id, agent_id, file_path, summary });
|
|
167
|
+
sseEmitter.emit("action_summary", { agent_id, file_path, summary });
|
|
168
|
+
return { content: [{ type: "text", text: JSON.stringify(result) }] };
|
|
169
|
+
});
|
|
170
|
+
}
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
import type { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
2
|
+
import type { CoordinatorServices } from "../server-setup.js";
|
|
3
|
+
import type { Logger } from "../logger.js";
|
|
4
|
+
/**
|
|
5
|
+
* S1: dependency map MCP tools (3 tools).
|
|
6
|
+
* set_dependency_map, get_blast_radius, get_module_info.
|
|
7
|
+
*/
|
|
8
|
+
export declare function registerDependenciesTools(server: McpServer, services: CoordinatorServices, _mcpLog: Logger): void;
|