mcp-coordinator 0.3.0 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli/server/backup.d.ts +7 -0
- package/dist/cli/server/backup.js +162 -0
- package/dist/cli/server/index.js +5 -0
- package/dist/cli/server/restore.d.ts +2 -0
- package/dist/cli/server/restore.js +117 -0
- package/dist/src/consultation.d.ts +8 -0
- package/dist/src/consultation.js +8 -0
- package/dist/src/db-adapter.d.ts +30 -0
- package/dist/src/db-adapter.js +32 -1
- package/dist/src/dependency-map.js +2 -2
- package/dist/src/file-tracker.d.ts +10 -0
- package/dist/src/file-tracker.js +32 -0
- package/dist/src/http/handle-health.d.ts +23 -0
- package/dist/src/http/handle-health.js +86 -0
- package/dist/src/impact-scorer.js +87 -50
- package/dist/src/metrics.d.ts +83 -0
- package/dist/src/metrics.js +162 -0
- package/dist/src/mqtt-bridge.d.ts +19 -0
- package/dist/src/mqtt-bridge.js +53 -5
- package/dist/src/serve-http.js +35 -1
- package/dist/src/server-setup.d.ts +2 -0
- package/dist/src/server-setup.js +10 -2
- package/dist/src/sse-emitter.d.ts +6 -0
- package/dist/src/sse-emitter.js +50 -2
- package/package.json +3 -1
|
@@ -1,3 +1,12 @@
|
|
|
1
|
+
// Layer 0 (announced-intent) recency window. Resolved threads older than this
|
|
2
|
+
// are excluded — yesterday's resolved work shouldn't trigger today's scoring.
|
|
3
|
+
// Aligned with file-tracker's default conflict window per the audit guidance.
|
|
4
|
+
const LAYER_0_WINDOW_MINUTES = 30;
|
|
5
|
+
// Layer 1 / 2 (file-activity) recency window. Preserved at 60 minutes to keep
|
|
6
|
+
// strict behavioral parity with the original scorer (the prior implementation
|
|
7
|
+
// hard-coded 60 in the checkFileConflict calls). Performance optimizations
|
|
8
|
+
// must not change scoring outcomes for existing callers.
|
|
9
|
+
const FILE_ACTIVITY_WINDOW_MINUTES = 60;
|
|
1
10
|
export class ImpactScorer {
|
|
2
11
|
registry;
|
|
3
12
|
fileTracker;
|
|
@@ -11,64 +20,92 @@ export class ImpactScorer {
|
|
|
11
20
|
const onlineAgents = this.registry
|
|
12
21
|
.listOnline()
|
|
13
22
|
.filter((a) => a.id !== params.agent_id);
|
|
23
|
+
if (onlineAgents.length === 0)
|
|
24
|
+
return [];
|
|
25
|
+
// O1: cache parsed agent.modules JSON ONCE per scoring call.
|
|
26
|
+
// Previously each agent's modules were JSON.parse'd inside the hot path
|
|
27
|
+
// (Layer 3), which is O(A) parses for A agents. With Layer 0 also reading
|
|
28
|
+
// thread.target_files / depends_on_files per agent, the original code
|
|
29
|
+
// re-parsed agent state up to ~4·A times per call.
|
|
30
|
+
const moduleCache = new Map();
|
|
31
|
+
for (const a of onlineAgents) {
|
|
32
|
+
moduleCache.set(a.id, JSON.parse(a.modules));
|
|
33
|
+
}
|
|
34
|
+
// O3: pre-compute file → set<agent_id> for every file we'll inspect.
|
|
35
|
+
// Replaces N `checkFileConflict` calls (each = 1 SQL round-trip) with a
|
|
36
|
+
// single batched query, and turns the inner per-agent file check into
|
|
37
|
+
// an O(1) Set.has() lookup.
|
|
38
|
+
const filesToIndex = [
|
|
39
|
+
...params.target_files,
|
|
40
|
+
...(params.depends_on_files || []),
|
|
41
|
+
];
|
|
42
|
+
const fileToAgents = filesToIndex.length > 0
|
|
43
|
+
? this.fileTracker.getFileToAgentsIndex(filesToIndex, params.agent_id, FILE_ACTIVITY_WINDOW_MINUTES)
|
|
44
|
+
: new Map();
|
|
45
|
+
// O2: bound the resolved-thread query to a recency window. Without this,
|
|
46
|
+
// listThreads({status:'resolved'}) returns ALL historical resolved threads
|
|
47
|
+
// (unbounded growth). The Layer 0 filter only keeps threads where the
|
|
48
|
+
// initiator is the currently-evaluated agent, but the SQL still scanned
|
|
49
|
+
// every row before the JS filter ran. Since-bound at the SQL layer.
|
|
50
|
+
let activeThreadsByAgent = null;
|
|
51
|
+
if (this.consultation) {
|
|
52
|
+
const allActive = [
|
|
53
|
+
...this.consultation.listThreads({ status: "open" }),
|
|
54
|
+
...this.consultation.listThreads({ status: "resolving" }),
|
|
55
|
+
...this.consultation.listThreads({ status: "resolved", since_minutes: LAYER_0_WINDOW_MINUTES }),
|
|
56
|
+
];
|
|
57
|
+
// Group by initiator_id so the per-agent loop is O(threads-for-this-agent)
|
|
58
|
+
// rather than O(all-active-threads). Avoids an outer-product scan over
|
|
59
|
+
// (agents × threads) when both sets are large.
|
|
60
|
+
activeThreadsByAgent = new Map();
|
|
61
|
+
for (const t of allActive) {
|
|
62
|
+
const list = activeThreadsByAgent.get(t.initiator_id);
|
|
63
|
+
if (list) {
|
|
64
|
+
list.push(t);
|
|
65
|
+
}
|
|
66
|
+
else {
|
|
67
|
+
activeThreadsByAgent.set(t.initiator_id, [t]);
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
}
|
|
14
71
|
return onlineAgents.map((agent) => {
|
|
15
|
-
const agentModules =
|
|
72
|
+
const agentModules = moduleCache.get(agent.id);
|
|
16
73
|
const reasons = [];
|
|
17
74
|
let maxScore = 0;
|
|
18
75
|
// Layer 0: Announced intent overlap (checks active threads from this agent).
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
].filter((t) => t.initiator_id === agent.id);
|
|
43
|
-
for (const thread of activeThreads) {
|
|
44
|
-
const threadFiles = JSON.parse(thread.target_files || "[]");
|
|
45
|
-
const threadDeps = JSON.parse(thread.depends_on_files || "[]");
|
|
46
|
-
// 0a: My target_files ∩ their target_files → score 100
|
|
47
|
-
const fileOverlap = params.target_files.filter((f) => threadFiles.includes(f));
|
|
48
|
-
if (fileOverlap.length > 0) {
|
|
49
|
-
maxScore = Math.max(maxScore, 100);
|
|
50
|
-
reasons.push(`announced same file: ${fileOverlap.join(", ")} (thread ${thread.id.slice(0, 8)})`);
|
|
51
|
-
}
|
|
52
|
-
// 0b: My depends_on ∩ their target_files → score 80 (they modify what I depend on)
|
|
53
|
-
if (params.depends_on_files) {
|
|
54
|
-
const depOverlap = params.depends_on_files.filter((f) => threadFiles.includes(f));
|
|
55
|
-
if (depOverlap.length > 0) {
|
|
76
|
+
if (activeThreadsByAgent) {
|
|
77
|
+
const agentThreads = activeThreadsByAgent.get(agent.id);
|
|
78
|
+
if (agentThreads) {
|
|
79
|
+
for (const thread of agentThreads) {
|
|
80
|
+
const threadFiles = JSON.parse(thread.target_files || "[]");
|
|
81
|
+
const threadDeps = JSON.parse(thread.depends_on_files || "[]");
|
|
82
|
+
// 0a: My target_files ∩ their target_files → score 100
|
|
83
|
+
const fileOverlap = params.target_files.filter((f) => threadFiles.includes(f));
|
|
84
|
+
if (fileOverlap.length > 0) {
|
|
85
|
+
maxScore = Math.max(maxScore, 100);
|
|
86
|
+
reasons.push(`announced same file: ${fileOverlap.join(", ")} (thread ${thread.id.slice(0, 8)})`);
|
|
87
|
+
}
|
|
88
|
+
// 0b: My depends_on ∩ their target_files → score 80 (they modify what I depend on)
|
|
89
|
+
if (params.depends_on_files) {
|
|
90
|
+
const depOverlap = params.depends_on_files.filter((f) => threadFiles.includes(f));
|
|
91
|
+
if (depOverlap.length > 0) {
|
|
92
|
+
maxScore = Math.max(maxScore, 80);
|
|
93
|
+
reasons.push(`modifies my dependency: ${depOverlap.join(", ")} (thread ${thread.id.slice(0, 8)})`);
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
// 0c: My target_files ∩ their depends_on → score 80 (I modify what they depend on)
|
|
97
|
+
const reverseDepOverlap = params.target_files.filter((f) => threadDeps.includes(f));
|
|
98
|
+
if (reverseDepOverlap.length > 0) {
|
|
56
99
|
maxScore = Math.max(maxScore, 80);
|
|
57
|
-
reasons.push(`
|
|
100
|
+
reasons.push(`they depend on my target: ${reverseDepOverlap.join(", ")} (thread ${thread.id.slice(0, 8)})`);
|
|
58
101
|
}
|
|
59
102
|
}
|
|
60
|
-
// 0c: My target_files ∩ their depends_on → score 80 (I modify what they depend on)
|
|
61
|
-
const reverseDepOverlap = params.target_files.filter((f) => threadDeps.includes(f));
|
|
62
|
-
if (reverseDepOverlap.length > 0) {
|
|
63
|
-
maxScore = Math.max(maxScore, 80);
|
|
64
|
-
reasons.push(`they depend on my target: ${reverseDepOverlap.join(", ")} (thread ${thread.id.slice(0, 8)})`);
|
|
65
|
-
}
|
|
66
103
|
}
|
|
67
104
|
}
|
|
68
|
-
// Layer 1: Same file recently modified (score 100)
|
|
105
|
+
// Layer 1: Same file recently modified (score 100) — uses pre-built index.
|
|
69
106
|
for (const targetFile of params.target_files) {
|
|
70
|
-
const
|
|
71
|
-
if (
|
|
107
|
+
const agentsForFile = fileToAgents.get(targetFile);
|
|
108
|
+
if (agentsForFile && agentsForFile.has(agent.id)) {
|
|
72
109
|
maxScore = Math.max(maxScore, 100);
|
|
73
110
|
reasons.push(`same file: ${targetFile}`);
|
|
74
111
|
}
|
|
@@ -76,8 +113,8 @@ export class ImpactScorer {
|
|
|
76
113
|
// Layer 2: Depends-on file recently modified (score 80)
|
|
77
114
|
if (params.depends_on_files) {
|
|
78
115
|
for (const depFile of params.depends_on_files) {
|
|
79
|
-
const
|
|
80
|
-
if (
|
|
116
|
+
const agentsForFile = fileToAgents.get(depFile);
|
|
117
|
+
if (agentsForFile && agentsForFile.has(agent.id)) {
|
|
81
118
|
maxScore = Math.max(maxScore, 80);
|
|
82
119
|
reasons.push(`depends on: ${depFile}`);
|
|
83
120
|
}
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Prometheus /metrics endpoint for mcp-coordinator (v0.4 Operability fix).
|
|
3
|
+
*
|
|
4
|
+
* Audit gap: README claims "Production-ready" but only exposed /health stub.
|
|
5
|
+
* This module wires prom-client counters/gauges keyed off the events that
|
|
6
|
+
* already flow through the coordinator (announces, resolutions, MQTT
|
|
7
|
+
* publishes, REST requests, auth rejections) plus a snapshot of the live
|
|
8
|
+
* system state (agents, threads, MQTT listeners, SSE clients).
|
|
9
|
+
*
|
|
10
|
+
* Design notes:
|
|
11
|
+
* - Uses a per-instance `Registry` (not the global default registry) so that
|
|
12
|
+
* multiple Coordinator instances in the same process (essaim's orchestrator
|
|
13
|
+
* pattern) get isolated metric counters instead of cross-contaminating.
|
|
14
|
+
* - Gauges are pulled lazily from a `services` snapshot at scrape time via
|
|
15
|
+
* `gaugeSnapshot()` — cheaper than maintaining mirror state and guarantees
|
|
16
|
+
* the value matches the DB (no drift from missed events).
|
|
17
|
+
* - SSE clients and MQTT listeners aren't exposed as public counts on those
|
|
18
|
+
* classes today. Callers update those gauges directly via `setSseClients`
|
|
19
|
+
* / `setMqttListeners` from the request lifecycle (see integration patch).
|
|
20
|
+
*/
|
|
21
|
+
import type { IncomingMessage, ServerResponse } from "http";
|
|
22
|
+
import { Registry, Counter, Gauge } from "prom-client";
|
|
23
|
+
import type { CoordinatorServices } from "./server-setup.js";
|
|
24
|
+
export type AnnounceResult = "thread_opened" | "auto_resolved";
|
|
25
|
+
export type ResolutionType = "consensus" | "timeout" | "auto_resolved" | "agent_departure" | "max_rounds" | "closed";
|
|
26
|
+
export interface MetricsOptions {
|
|
27
|
+
/**
|
|
28
|
+
* If true, also collect Node.js process metrics (CPU, memory, event-loop
|
|
29
|
+
* lag, etc.) via prom-client's collectDefaultMetrics. Default: true.
|
|
30
|
+
*/
|
|
31
|
+
collectDefault?: boolean;
|
|
32
|
+
}
|
|
33
|
+
export declare class Metrics {
|
|
34
|
+
readonly registry: Registry;
|
|
35
|
+
readonly announces: Counter<"result">;
|
|
36
|
+
readonly threadsResolved: Counter<"type">;
|
|
37
|
+
readonly mqttPublishes: Counter<string>;
|
|
38
|
+
readonly httpRequests: Counter<"route" | "status">;
|
|
39
|
+
readonly authRejected: Counter<string>;
|
|
40
|
+
readonly agentsOnline: Gauge<string>;
|
|
41
|
+
readonly threadsOpen: Gauge<string>;
|
|
42
|
+
readonly threadsResolving: Gauge<string>;
|
|
43
|
+
readonly mqttListenersActive: Gauge<string>;
|
|
44
|
+
readonly sseClientsActive: Gauge<string>;
|
|
45
|
+
constructor(opts?: MetricsOptions);
|
|
46
|
+
recordAnnounce(result: AnnounceResult): void;
|
|
47
|
+
recordThreadResolved(type: ResolutionType): void;
|
|
48
|
+
recordMqttPublish(): void;
|
|
49
|
+
recordHttpRequest(route: string, status: number): void;
|
|
50
|
+
recordAuthRejected(): void;
|
|
51
|
+
setSseClients(n: number): void;
|
|
52
|
+
incSseClients(): void;
|
|
53
|
+
decSseClients(): void;
|
|
54
|
+
setMqttListeners(n: number): void;
|
|
55
|
+
/**
|
|
56
|
+
* Snapshot the gauges that derive from durable state (agents/threads).
|
|
57
|
+
* Called at scrape time so the values are fresh without us having to mirror
|
|
58
|
+
* every state transition. Safe to call repeatedly.
|
|
59
|
+
*
|
|
60
|
+
* Reads via the DB directly because AgentRegistry/Consultation don't yet
|
|
61
|
+
* expose count getters — adding them would touch unrelated modules.
|
|
62
|
+
*/
|
|
63
|
+
gaugeSnapshot(services: CoordinatorServices): void;
|
|
64
|
+
/**
|
|
65
|
+
* Render the current registry as Prometheus text exposition format.
|
|
66
|
+
* Returns the body string + the content-type to set on the response.
|
|
67
|
+
*/
|
|
68
|
+
render(): Promise<{
|
|
69
|
+
body: string;
|
|
70
|
+
contentType: string;
|
|
71
|
+
}>;
|
|
72
|
+
}
|
|
73
|
+
/**
|
|
74
|
+
* HTTP handler for GET /metrics. Refreshes the derived gauges from a
|
|
75
|
+
* services snapshot, then writes the prom-client text exposition.
|
|
76
|
+
*
|
|
77
|
+
* Wire from serve-http.ts:
|
|
78
|
+
* if (url === "/metrics" && req.method === "GET") {
|
|
79
|
+
* await serveMetrics(req, res, services, metrics);
|
|
80
|
+
* return;
|
|
81
|
+
* }
|
|
82
|
+
*/
|
|
83
|
+
export declare function serveMetrics(_req: IncomingMessage, res: ServerResponse, services: CoordinatorServices, metrics: Metrics): Promise<void>;
|
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
import { Registry, Counter, Gauge, collectDefaultMetrics, } from "prom-client";
|
|
2
|
+
import { getDb } from "./database.js";
|
|
3
|
+
export class Metrics {
|
|
4
|
+
registry;
|
|
5
|
+
// Counters
|
|
6
|
+
announces;
|
|
7
|
+
threadsResolved;
|
|
8
|
+
mqttPublishes;
|
|
9
|
+
httpRequests;
|
|
10
|
+
authRejected;
|
|
11
|
+
// Gauges
|
|
12
|
+
agentsOnline;
|
|
13
|
+
threadsOpen;
|
|
14
|
+
threadsResolving;
|
|
15
|
+
mqttListenersActive;
|
|
16
|
+
sseClientsActive;
|
|
17
|
+
constructor(opts = {}) {
|
|
18
|
+
this.registry = new Registry();
|
|
19
|
+
if (opts.collectDefault !== false) {
|
|
20
|
+
collectDefaultMetrics({ register: this.registry });
|
|
21
|
+
}
|
|
22
|
+
this.announces = new Counter({
|
|
23
|
+
name: "mcp_coordinator_announces_total",
|
|
24
|
+
help: "Total announce_work calls, partitioned by outcome",
|
|
25
|
+
labelNames: ["result"],
|
|
26
|
+
registers: [this.registry],
|
|
27
|
+
});
|
|
28
|
+
this.threadsResolved = new Counter({
|
|
29
|
+
name: "mcp_coordinator_threads_resolved_total",
|
|
30
|
+
help: "Total threads resolved, partitioned by resolution type",
|
|
31
|
+
labelNames: ["type"],
|
|
32
|
+
registers: [this.registry],
|
|
33
|
+
});
|
|
34
|
+
this.mqttPublishes = new Counter({
|
|
35
|
+
name: "mcp_coordinator_mqtt_publishes_total",
|
|
36
|
+
help: "Total MQTT publishes by the coordinator bridge",
|
|
37
|
+
registers: [this.registry],
|
|
38
|
+
});
|
|
39
|
+
this.httpRequests = new Counter({
|
|
40
|
+
name: "mcp_coordinator_http_requests_total",
|
|
41
|
+
help: "Total HTTP requests handled, partitioned by route + status",
|
|
42
|
+
labelNames: ["route", "status"],
|
|
43
|
+
registers: [this.registry],
|
|
44
|
+
});
|
|
45
|
+
this.authRejected = new Counter({
|
|
46
|
+
name: "mcp_coordinator_auth_rejected_total",
|
|
47
|
+
help: "Total authentication rejections",
|
|
48
|
+
registers: [this.registry],
|
|
49
|
+
});
|
|
50
|
+
this.agentsOnline = new Gauge({
|
|
51
|
+
name: "mcp_coordinator_agents_online",
|
|
52
|
+
help: "Current number of agents reporting status=online",
|
|
53
|
+
registers: [this.registry],
|
|
54
|
+
});
|
|
55
|
+
this.threadsOpen = new Gauge({
|
|
56
|
+
name: "mcp_coordinator_threads_open",
|
|
57
|
+
help: "Current number of threads in status=open",
|
|
58
|
+
registers: [this.registry],
|
|
59
|
+
});
|
|
60
|
+
this.threadsResolving = new Gauge({
|
|
61
|
+
name: "mcp_coordinator_threads_resolving",
|
|
62
|
+
help: "Current number of threads in status=resolving",
|
|
63
|
+
registers: [this.registry],
|
|
64
|
+
});
|
|
65
|
+
this.mqttListenersActive = new Gauge({
|
|
66
|
+
name: "mcp_coordinator_mqtt_listeners_active",
|
|
67
|
+
help: "Current number of registered MQTT consultation listeners",
|
|
68
|
+
registers: [this.registry],
|
|
69
|
+
});
|
|
70
|
+
this.sseClientsActive = new Gauge({
|
|
71
|
+
name: "mcp_coordinator_sse_clients_active",
|
|
72
|
+
help: "Current number of connected SSE clients",
|
|
73
|
+
registers: [this.registry],
|
|
74
|
+
});
|
|
75
|
+
}
|
|
76
|
+
// ── Counter helpers (named methods make hook points obvious) ──
|
|
77
|
+
recordAnnounce(result) {
|
|
78
|
+
this.announces.inc({ result }, 1);
|
|
79
|
+
}
|
|
80
|
+
recordThreadResolved(type) {
|
|
81
|
+
this.threadsResolved.inc({ type }, 1);
|
|
82
|
+
}
|
|
83
|
+
recordMqttPublish() {
|
|
84
|
+
this.mqttPublishes.inc(1);
|
|
85
|
+
}
|
|
86
|
+
recordHttpRequest(route, status) {
|
|
87
|
+
this.httpRequests.inc({ route, status: String(status) }, 1);
|
|
88
|
+
}
|
|
89
|
+
recordAuthRejected() {
|
|
90
|
+
this.authRejected.inc(1);
|
|
91
|
+
}
|
|
92
|
+
// ── Gauge setters (called from request lifecycle, see integration patch) ──
|
|
93
|
+
setSseClients(n) {
|
|
94
|
+
this.sseClientsActive.set(n);
|
|
95
|
+
}
|
|
96
|
+
incSseClients() {
|
|
97
|
+
this.sseClientsActive.inc(1);
|
|
98
|
+
}
|
|
99
|
+
decSseClients() {
|
|
100
|
+
this.sseClientsActive.dec(1);
|
|
101
|
+
}
|
|
102
|
+
setMqttListeners(n) {
|
|
103
|
+
this.mqttListenersActive.set(n);
|
|
104
|
+
}
|
|
105
|
+
/**
|
|
106
|
+
* Snapshot the gauges that derive from durable state (agents/threads).
|
|
107
|
+
* Called at scrape time so the values are fresh without us having to mirror
|
|
108
|
+
* every state transition. Safe to call repeatedly.
|
|
109
|
+
*
|
|
110
|
+
* Reads via the DB directly because AgentRegistry/Consultation don't yet
|
|
111
|
+
* expose count getters — adding them would touch unrelated modules.
|
|
112
|
+
*/
|
|
113
|
+
gaugeSnapshot(services) {
|
|
114
|
+
try {
|
|
115
|
+
this.agentsOnline.set(services.registry.listOnline().length);
|
|
116
|
+
}
|
|
117
|
+
catch {
|
|
118
|
+
// Registry not initialised yet (test bootstrap race) — leave gauge at 0.
|
|
119
|
+
}
|
|
120
|
+
try {
|
|
121
|
+
const db = getDb();
|
|
122
|
+
const open = db
|
|
123
|
+
.prepare("SELECT COUNT(*) as c FROM threads WHERE status = 'open'")
|
|
124
|
+
.get();
|
|
125
|
+
const resolving = db
|
|
126
|
+
.prepare("SELECT COUNT(*) as c FROM threads WHERE status = 'resolving'")
|
|
127
|
+
.get();
|
|
128
|
+
this.threadsOpen.set(open.c);
|
|
129
|
+
this.threadsResolving.set(resolving.c);
|
|
130
|
+
}
|
|
131
|
+
catch {
|
|
132
|
+
// DB not initialised — leave gauges at their last-set values.
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
/**
|
|
136
|
+
* Render the current registry as Prometheus text exposition format.
|
|
137
|
+
* Returns the body string + the content-type to set on the response.
|
|
138
|
+
*/
|
|
139
|
+
async render() {
|
|
140
|
+
const body = await this.registry.metrics();
|
|
141
|
+
return { body, contentType: this.registry.contentType };
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
/**
|
|
145
|
+
* HTTP handler for GET /metrics. Refreshes the derived gauges from a
|
|
146
|
+
* services snapshot, then writes the prom-client text exposition.
|
|
147
|
+
*
|
|
148
|
+
* Wire from serve-http.ts:
|
|
149
|
+
* if (url === "/metrics" && req.method === "GET") {
|
|
150
|
+
* await serveMetrics(req, res, services, metrics);
|
|
151
|
+
* return;
|
|
152
|
+
* }
|
|
153
|
+
*/
|
|
154
|
+
export async function serveMetrics(_req, res, services, metrics) {
|
|
155
|
+
metrics.gaugeSnapshot(services);
|
|
156
|
+
const { body, contentType } = await metrics.render();
|
|
157
|
+
res.writeHead(200, {
|
|
158
|
+
"Content-Type": contentType,
|
|
159
|
+
"Cache-Control": "no-cache",
|
|
160
|
+
});
|
|
161
|
+
res.end(body);
|
|
162
|
+
}
|
|
@@ -10,16 +10,35 @@ export declare class MqttBridge {
|
|
|
10
10
|
private onOfflineHandler;
|
|
11
11
|
private listeners;
|
|
12
12
|
private log;
|
|
13
|
+
private agentId;
|
|
14
|
+
/**
|
|
15
|
+
* P1: track the last threadId we retained on `coordinator/consultations/new`.
|
|
16
|
+
* The topic is fixed (not per-thread), so retain holds only the LAST event.
|
|
17
|
+
* `clearRetainedConsultation(threadId)` only clears when it matches, so a
|
|
18
|
+
* later consultation isn't accidentally wiped by a stale resolve callback.
|
|
19
|
+
*/
|
|
20
|
+
private lastRetainedConsultationThreadId;
|
|
13
21
|
constructor(logger?: Logger);
|
|
14
22
|
connect(config: {
|
|
15
23
|
url: string;
|
|
16
24
|
username?: string;
|
|
17
25
|
password?: string;
|
|
26
|
+
agentId?: string;
|
|
18
27
|
}): Promise<void>;
|
|
19
28
|
isConnected(): boolean;
|
|
20
29
|
onOffline(handler: (agentId: string) => void): void;
|
|
21
30
|
registerAgent(agentId: string, name: string): void;
|
|
22
31
|
publishConsultation(threadId: string, agentId: string, subject: string, targetModules: string[]): void;
|
|
32
|
+
/**
|
|
33
|
+
* P1 fix: clear the retained `coordinator/consultations/new` event when the
|
|
34
|
+
* matching thread resolves. The topic is fixed (not per-thread), so retain
|
|
35
|
+
* holds only the LAST consultation — clearing here means a coordinator
|
|
36
|
+
* restart after resolution doesn't re-broadcast a stale "new" event.
|
|
37
|
+
*
|
|
38
|
+
* No-op when the supplied threadId doesn't match the currently retained one
|
|
39
|
+
* (a newer consultation has already overwritten it).
|
|
40
|
+
*/
|
|
41
|
+
clearRetainedConsultation(threadId: string): void;
|
|
23
42
|
publishMessage(threadId: string, agentId: string, type: string, content: string): void;
|
|
24
43
|
publishResolution(threadId: string, status: string, summary: string): void;
|
|
25
44
|
publishBroadcast(agentId: string, message: string): void;
|
package/dist/src/mqtt-bridge.js
CHANGED
|
@@ -6,6 +6,14 @@ export class MqttBridge {
|
|
|
6
6
|
onOfflineHandler = null;
|
|
7
7
|
listeners = new Map();
|
|
8
8
|
log;
|
|
9
|
+
agentId = "coordinator-internal";
|
|
10
|
+
/**
|
|
11
|
+
* P1: track the last threadId we retained on `coordinator/consultations/new`.
|
|
12
|
+
* The topic is fixed (not per-thread), so retain holds only the LAST event.
|
|
13
|
+
* `clearRetainedConsultation(threadId)` only clears when it matches, so a
|
|
14
|
+
* later consultation isn't accidentally wiped by a stale resolve callback.
|
|
15
|
+
*/
|
|
16
|
+
lastRetainedConsultationThreadId = null;
|
|
9
17
|
constructor(logger) {
|
|
10
18
|
this.log = logger || silentLogger;
|
|
11
19
|
}
|
|
@@ -14,11 +22,24 @@ export class MqttBridge {
|
|
|
14
22
|
const timeout = setTimeout(() => {
|
|
15
23
|
reject(new Error("MQTT connection timeout"));
|
|
16
24
|
}, 5000);
|
|
25
|
+
// P1 fix: LWT requires a stable agent identifier. Default to
|
|
26
|
+
// "coordinator-internal" which matches the auth identity used by
|
|
27
|
+
// serve-http for the embedded broker bridge.
|
|
28
|
+
this.agentId = config.agentId || "coordinator-internal";
|
|
17
29
|
this.client = mqtt.connect(config.url, {
|
|
18
|
-
clientId:
|
|
30
|
+
clientId: `${this.agentId}-${Date.now()}`,
|
|
19
31
|
clean: true,
|
|
20
32
|
username: config.username,
|
|
21
33
|
password: config.password,
|
|
34
|
+
// P1 fix: register Last Will & Testament so a crashed/disconnected
|
|
35
|
+
// bridge automatically broadcasts offline status. Without this the
|
|
36
|
+
// agent appears online indefinitely after an unexpected disconnect.
|
|
37
|
+
will: {
|
|
38
|
+
topic: `coordinator/agents/${this.agentId}/status`,
|
|
39
|
+
payload: Buffer.from(JSON.stringify({ status: "offline", reason: "lwt_unexpected" })),
|
|
40
|
+
qos: 1,
|
|
41
|
+
retain: false,
|
|
42
|
+
},
|
|
22
43
|
});
|
|
23
44
|
this.client.on("connect", () => {
|
|
24
45
|
clearTimeout(timeout);
|
|
@@ -80,17 +101,40 @@ export class MqttBridge {
|
|
|
80
101
|
publishConsultation(threadId, agentId, subject, targetModules) {
|
|
81
102
|
if (!this.client || !this.connected)
|
|
82
103
|
return;
|
|
83
|
-
|
|
104
|
+
// P1 fix: QoS 1 (at-least-once) so consultation events survive transient
|
|
105
|
+
// disconnects. retain=true so a coordinator/subscriber restart can rebuild
|
|
106
|
+
// the active state without an event-history replay.
|
|
107
|
+
this.lastRetainedConsultationThreadId = threadId;
|
|
108
|
+
this.client.publish("coordinator/consultations/new", JSON.stringify({ thread_id: threadId, agent_id: agentId, subject, target_modules: targetModules }), { qos: 1, retain: true });
|
|
109
|
+
}
|
|
110
|
+
/**
|
|
111
|
+
* P1 fix: clear the retained `coordinator/consultations/new` event when the
|
|
112
|
+
* matching thread resolves. The topic is fixed (not per-thread), so retain
|
|
113
|
+
* holds only the LAST consultation — clearing here means a coordinator
|
|
114
|
+
* restart after resolution doesn't re-broadcast a stale "new" event.
|
|
115
|
+
*
|
|
116
|
+
* No-op when the supplied threadId doesn't match the currently retained one
|
|
117
|
+
* (a newer consultation has already overwritten it).
|
|
118
|
+
*/
|
|
119
|
+
clearRetainedConsultation(threadId) {
|
|
120
|
+
if (!this.client || !this.connected)
|
|
121
|
+
return;
|
|
122
|
+
if (this.lastRetainedConsultationThreadId !== threadId)
|
|
123
|
+
return;
|
|
124
|
+
this.client.publish("coordinator/consultations/new", "", { qos: 1, retain: true });
|
|
125
|
+
this.lastRetainedConsultationThreadId = null;
|
|
84
126
|
}
|
|
85
127
|
publishMessage(threadId, agentId, type, content) {
|
|
86
128
|
if (!this.client || !this.connected)
|
|
87
129
|
return;
|
|
130
|
+
// QoS 0: high-frequency chat-style traffic, lossy-OK.
|
|
88
131
|
this.client.publish(`coordinator/consultations/${threadId}/messages`, JSON.stringify({ agent_id: agentId, type, content }));
|
|
89
132
|
}
|
|
90
133
|
publishResolution(threadId, status, summary) {
|
|
91
134
|
if (!this.client || !this.connected)
|
|
92
135
|
return;
|
|
93
|
-
|
|
136
|
+
// P1 fix: QoS 1 (at-least-once) — resolution is a state-change event.
|
|
137
|
+
this.client.publish(`coordinator/consultations/${threadId}/status`, JSON.stringify({ status, summary }), { qos: 1, retain: true });
|
|
94
138
|
}
|
|
95
139
|
publishBroadcast(agentId, message) {
|
|
96
140
|
if (!this.client || !this.connected)
|
|
@@ -105,12 +149,15 @@ export class MqttBridge {
|
|
|
105
149
|
publishTaskClaimed(threadId, claimedBy) {
|
|
106
150
|
if (!this.client || !this.connected)
|
|
107
151
|
return;
|
|
108
|
-
|
|
152
|
+
// P1 fix: QoS 1 — claim is a coordination state-change. Loss would mean
|
|
153
|
+
// multiple agents think a task is unclaimed.
|
|
154
|
+
this.client.publish(`coordinator/consultations/${threadId}/claimed`, JSON.stringify({ agent_id: claimedBy, claimed_by: claimedBy, claimed_at: new Date().toISOString() }), { qos: 1 });
|
|
109
155
|
}
|
|
110
156
|
publishTaskCompleted(threadId, completedBy, summary) {
|
|
111
157
|
if (!this.client || !this.connected)
|
|
112
158
|
return;
|
|
113
|
-
|
|
159
|
+
// P1 fix: QoS 1 — completion is a coordination state-change.
|
|
160
|
+
this.client.publish(`coordinator/consultations/${threadId}/completed`, JSON.stringify({ agent_id: completedBy, completed_by: completedBy, summary }), { qos: 1 });
|
|
114
161
|
}
|
|
115
162
|
/**
|
|
116
163
|
* Fanout a refreshed QuotaInfo to live subscribers (dashboard widget,
|
|
@@ -120,6 +167,7 @@ export class MqttBridge {
|
|
|
120
167
|
publishQuotaUpdate(info) {
|
|
121
168
|
if (!this.client || !this.connected)
|
|
122
169
|
return;
|
|
170
|
+
// QoS 0: high-frequency telemetry, lossy-OK (the next refresh overwrites).
|
|
123
171
|
this.client.publish("coordinator/quota/update", JSON.stringify(info));
|
|
124
172
|
}
|
|
125
173
|
// ── Agent listener methods (for integrated MCP tools) ──
|
package/dist/src/serve-http.js
CHANGED
|
@@ -167,6 +167,16 @@ function writeSseEvent(res, event) {
|
|
|
167
167
|
const data = injectTimestamp(event.payload, event.created_at ?? new Date().toISOString());
|
|
168
168
|
res.write(`id: ${event.id}\nevent: ${event.type}\ndata: ${data}\n\n`);
|
|
169
169
|
}
|
|
170
|
+
// P3: heartbeat interval in ms. Default 30s — well under nginx/Cloudflare's
|
|
171
|
+
// typical 60s idle SSE timeout, but infrequent enough to add negligible
|
|
172
|
+
// bandwidth (one ":keep-alive\n\n" comment is ~16 bytes).
|
|
173
|
+
const SSE_HEARTBEAT_MS = (() => {
|
|
174
|
+
const raw = process.env.COORDINATOR_SSE_HEARTBEAT_MS;
|
|
175
|
+
if (!raw)
|
|
176
|
+
return 30_000;
|
|
177
|
+
const n = parseInt(raw, 10);
|
|
178
|
+
return Number.isFinite(n) && n > 0 ? n : 30_000;
|
|
179
|
+
})();
|
|
170
180
|
function handleSse(req, res) {
|
|
171
181
|
res.writeHead(200, {
|
|
172
182
|
"Content-Type": "text/event-stream",
|
|
@@ -186,7 +196,28 @@ function handleSse(req, res) {
|
|
|
186
196
|
const unsubscribe = services.sseEmitter.addListener((event) => {
|
|
187
197
|
writeSseEvent(res, event);
|
|
188
198
|
});
|
|
189
|
-
|
|
199
|
+
// P3: heartbeat. Browsers ignore the `:` comment line per the SSE spec,
|
|
200
|
+
// but it counts as activity for intermediate proxies that would otherwise
|
|
201
|
+
// kill an idle connection after ~60s. Wrapped in try/catch because once
|
|
202
|
+
// the socket is half-closed res.write throws synchronously.
|
|
203
|
+
const heartbeat = setInterval(() => {
|
|
204
|
+
try {
|
|
205
|
+
res.write(":keep-alive\n\n");
|
|
206
|
+
}
|
|
207
|
+
catch {
|
|
208
|
+
// Connection already torn down — req.on("close") will clean up shortly.
|
|
209
|
+
}
|
|
210
|
+
}, SSE_HEARTBEAT_MS);
|
|
211
|
+
// Don't keep the event loop alive solely for heartbeats; without unref()
|
|
212
|
+
// a still-open SSE connection at process shutdown delays exit.
|
|
213
|
+
if (typeof heartbeat.unref === "function")
|
|
214
|
+
heartbeat.unref();
|
|
215
|
+
req.on("close", () => {
|
|
216
|
+
// P3: clear the interval BEFORE unsubscribing so a heartbeat tick that
|
|
217
|
+
// fires between close and unsubscribe can't write to a dead socket.
|
|
218
|
+
clearInterval(heartbeat);
|
|
219
|
+
unsubscribe();
|
|
220
|
+
});
|
|
190
221
|
}
|
|
191
222
|
export async function startServer(opts) {
|
|
192
223
|
const port = opts?.port ?? PORT;
|
|
@@ -380,6 +411,9 @@ export async function startServer(opts) {
|
|
|
380
411
|
url: `mqtt://127.0.0.1:${mqttTcpPort}`,
|
|
381
412
|
username: AUTH_ENABLED ? "coordinator-internal" : undefined,
|
|
382
413
|
password: internalToken,
|
|
414
|
+
// P1 fix: stable agent identity for LWT topic
|
|
415
|
+
// (`coordinator/agents/coordinator-internal/status`).
|
|
416
|
+
agentId: "coordinator-internal",
|
|
383
417
|
});
|
|
384
418
|
services.mqttBridge.onOffline((agentId) => {
|
|
385
419
|
services.registry.setOffline(agentId);
|
|
@@ -11,6 +11,7 @@ import { SseEmitter } from "./sse-emitter.js";
|
|
|
11
11
|
import { MqttBridge } from "./mqtt-bridge.js";
|
|
12
12
|
import { AgentActivityTracker } from "./agent-activity.js";
|
|
13
13
|
import { QuotaCache } from "./quota/quota-cache.js";
|
|
14
|
+
import { Metrics } from "./metrics.js";
|
|
14
15
|
import type { CoordinatorConfig } from "./types.js";
|
|
15
16
|
import { type Logger } from "./logger.js";
|
|
16
17
|
export interface CoordinatorServices {
|
|
@@ -27,6 +28,7 @@ export interface CoordinatorServices {
|
|
|
27
28
|
sseEmitter: SseEmitter;
|
|
28
29
|
mqttBridge: MqttBridge;
|
|
29
30
|
quotaCache: QuotaCache;
|
|
31
|
+
metrics: Metrics;
|
|
30
32
|
}
|
|
31
33
|
/** Create shared services (once at startup). */
|
|
32
34
|
export declare function createServices(config: CoordinatorConfig): CoordinatorServices;
|