mcp-coordinator 0.3.0 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/README.md +14 -0
  2. package/dashboard/public/index.html +23 -0
  3. package/dist/cli/server/backup.d.ts +7 -0
  4. package/dist/cli/server/backup.js +162 -0
  5. package/dist/cli/server/index.js +5 -0
  6. package/dist/cli/server/restore.d.ts +2 -0
  7. package/dist/cli/server/restore.js +117 -0
  8. package/dist/cli/server/start.js +33 -0
  9. package/dist/src/announce-workflow.d.ts +1 -0
  10. package/dist/src/announce-workflow.js +28 -0
  11. package/dist/src/consultation.d.ts +8 -0
  12. package/dist/src/consultation.js +8 -0
  13. package/dist/src/database.js +65 -0
  14. package/dist/src/db-adapter.d.ts +30 -0
  15. package/dist/src/db-adapter.js +32 -1
  16. package/dist/src/dependency-map.js +2 -2
  17. package/dist/src/file-tracker.d.ts +12 -0
  18. package/dist/src/file-tracker.js +35 -2
  19. package/dist/src/git-cochange-builder.d.ts +32 -0
  20. package/dist/src/git-cochange-builder.js +238 -0
  21. package/dist/src/http/handle-health.d.ts +23 -0
  22. package/dist/src/http/handle-health.js +112 -0
  23. package/dist/src/http/handle-rest.js +83 -2
  24. package/dist/src/http/utils.d.ts +0 -4
  25. package/dist/src/http/utils.js +16 -2
  26. package/dist/src/impact-scorer.d.ts +5 -1
  27. package/dist/src/impact-scorer.js +182 -55
  28. package/dist/src/metrics.d.ts +88 -0
  29. package/dist/src/metrics.js +195 -0
  30. package/dist/src/mqtt-bridge.d.ts +19 -0
  31. package/dist/src/mqtt-bridge.js +53 -5
  32. package/dist/src/path-normalize.d.ts +17 -0
  33. package/dist/src/path-normalize.js +38 -0
  34. package/dist/src/serve-http.js +76 -3
  35. package/dist/src/server-setup.d.ts +8 -0
  36. package/dist/src/server-setup.js +31 -3
  37. package/dist/src/sse-emitter.d.ts +6 -0
  38. package/dist/src/sse-emitter.js +50 -2
  39. package/dist/src/tools/consultation-tools.js +4 -2
  40. package/dist/src/tree-sitter-extractor.d.ts +36 -0
  41. package/dist/src/tree-sitter-extractor.js +354 -0
  42. package/dist/src/working-files-tracker.d.ts +42 -0
  43. package/dist/src/working-files-tracker.js +111 -0
  44. package/package.json +20 -1
@@ -1,4 +1,5 @@
1
1
  import { getDb } from "./database.js";
2
+ import { withTransaction } from "./db-adapter.js";
2
3
  export class DependencyMapper {
3
4
  getMap() {
4
5
  const db = getDb();
@@ -20,12 +21,11 @@ export class DependencyMapper {
20
21
  VALUES (?, ?, ?, ?)
21
22
  ON CONFLICT(module_id) DO UPDATE SET
22
23
  depends_on = excluded.depends_on, exports = excluded.exports, owners = excluded.owners`);
23
- const tx = db.transaction(() => {
24
+ withTransaction(db, () => {
24
25
  for (const [id, info] of Object.entries(map)) {
25
26
  stmt.run(id, JSON.stringify(info.depends_on), JSON.stringify(info.exports), JSON.stringify(info.owners));
26
27
  }
27
28
  });
28
- tx();
29
29
  }
30
30
  getModuleInfo(moduleId) {
31
31
  const db = getDb();
@@ -6,6 +6,8 @@ export declare class FileTracker {
6
6
  agent_name?: string;
7
7
  tool_name: string;
8
8
  file_path: string;
9
+ content_hash?: string | null;
10
+ symbols_touched?: string[] | null;
9
11
  }): void;
10
12
  getBySession(sessionId: string): FileActivity[];
11
13
  getHotFiles(sinceMinutes?: number): {
@@ -17,5 +19,15 @@ export declare class FileTracker {
17
19
  conflict: boolean;
18
20
  agents: string[];
19
21
  };
22
+ /**
23
+ * P2 perf: batch lookup of recent file→agents activity. Replaces N
24
+ * `checkFileConflict` calls (one per file) with a single SQL query, then
25
+ * builds an in-memory reverse index. The impact scorer uses this so its
26
+ * per-file inner loop is O(1) Map.get() rather than O(F) SQL round-trips.
27
+ *
28
+ * Excludes the calling agent (so the scorer doesn't flag the announcer
29
+ * against themselves). Returns Map<file_path, Set<agent_id>>.
30
+ */
31
+ getFileToAgentsIndex(filePaths: string[], excludeAgentId: string, withinMinutes?: number): Map<string, Set<string>>;
20
32
  fileToModule(filePath: string): string;
21
33
  }
@@ -3,8 +3,9 @@ export class FileTracker {
3
3
  log(params) {
4
4
  const db = getDb();
5
5
  const module = this.fileToModule(params.file_path);
6
- db.prepare(`INSERT INTO file_activity (session_id, agent_id, agent_name, tool_name, file_path, module)
7
- VALUES (?, ?, ?, ?, ?, ?)`).run(params.session_id, params.agent_id, params.agent_name || null, params.tool_name, params.file_path, module);
6
+ db.prepare(`INSERT INTO file_activity
7
+ (session_id, agent_id, agent_name, tool_name, file_path, module, content_hash, symbols_touched)
8
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?)`).run(params.session_id, params.agent_id, params.agent_name || null, params.tool_name, params.file_path, module, params.content_hash || null, params.symbols_touched ? JSON.stringify(params.symbols_touched) : null);
8
9
  }
9
10
  getBySession(sessionId) {
10
11
  const db = getDb();
@@ -31,6 +32,38 @@ export class FileTracker {
31
32
  AND created_at > datetime('now', '-' || ? || ' minutes')`).all(filePath, agentId, withinMinutes);
32
33
  return { conflict: rows.length > 0, agents: rows.map((r) => r.agent_id) };
33
34
  }
35
+ /**
36
+ * P2 perf: batch lookup of recent file→agents activity. Replaces N
37
+ * `checkFileConflict` calls (one per file) with a single SQL query, then
38
+ * builds an in-memory reverse index. The impact scorer uses this so its
39
+ * per-file inner loop is O(1) Map.get() rather than O(F) SQL round-trips.
40
+ *
41
+ * Excludes the calling agent (so the scorer doesn't flag the announcer
42
+ * against themselves). Returns Map<file_path, Set<agent_id>>.
43
+ */
44
+ getFileToAgentsIndex(filePaths, excludeAgentId, withinMinutes = 30) {
45
+ const index = new Map();
46
+ if (filePaths.length === 0)
47
+ return index;
48
+ const db = getDb();
49
+ // Dynamic IN-list — better-sqlite3 binds each ? positionally. Cheap because
50
+ // the impact scorer only passes target_files + depends_on_files (typically
51
+ // a handful of files per announce_work call).
52
+ const placeholders = filePaths.map(() => "?").join(",");
53
+ const rows = db.prepare(`SELECT DISTINCT file_path, agent_id FROM file_activity
54
+ WHERE file_path IN (${placeholders})
55
+ AND agent_id != ?
56
+ AND created_at > datetime('now', '-' || ? || ' minutes')`).all(...filePaths, excludeAgentId, withinMinutes);
57
+ for (const r of rows) {
58
+ let set = index.get(r.file_path);
59
+ if (!set) {
60
+ set = new Set();
61
+ index.set(r.file_path, set);
62
+ }
63
+ set.add(r.agent_id);
64
+ }
65
+ return index;
66
+ }
34
67
  fileToModule(filePath) {
35
68
  // Strip leading / so "/server/src/x.ts" and "server/src/x.ts" produce the
36
69
  // same module name. Without this, split("/") on an absolute path yields
@@ -0,0 +1,32 @@
1
+ import { type Logger } from "./logger.js";
2
+ import type { Metrics } from "./metrics.js";
3
+ interface BuilderOpts {
4
+ repoRoot: string;
5
+ sinceDays?: number;
6
+ maxCount?: number;
7
+ timeoutMs?: number;
8
+ refreshMs?: number;
9
+ retryMs?: number;
10
+ logger?: Logger;
11
+ metrics?: Metrics;
12
+ }
13
+ export declare class GitCochangeBuilder {
14
+ private repoRoot;
15
+ private sinceDays;
16
+ private maxCount;
17
+ private timeoutMs;
18
+ private refreshMs;
19
+ private retryMs;
20
+ private log;
21
+ private metrics?;
22
+ private timer;
23
+ constructor(opts: BuilderOpts);
24
+ /** Build once. Resolves after persistence. */
25
+ build(): Promise<void>;
26
+ private runGitLog;
27
+ private parseLog;
28
+ /** Schedule a refresh loop. unref() so it doesn't keep the loop alive. */
29
+ startScheduler(): void;
30
+ stopScheduler(): void;
31
+ }
32
+ export {};
@@ -0,0 +1,238 @@
1
+ // src/git-cochange-builder.ts
2
+ import { spawn } from "child_process";
3
+ import { existsSync } from "fs";
4
+ import path from "path";
5
+ import { getDb } from "./database.js";
6
+ import { silentLogger } from "./logger.js";
7
+ const DEFAULT_DENYLIST = [
8
+ /package-lock\.json$/, /pnpm-lock\.yaml$/, /yarn\.lock$/, /\.lock$/,
9
+ /\/dist\//, /\/build\//, /\/\.next\//, /\/__snapshots__\//,
10
+ /\.min\.js$/, /\.map$/, /\/coverage\//, /\/node_modules\//, /\.snap$/,
11
+ ];
12
+ export class GitCochangeBuilder {
13
+ repoRoot;
14
+ sinceDays;
15
+ maxCount;
16
+ timeoutMs;
17
+ refreshMs;
18
+ retryMs;
19
+ log;
20
+ metrics;
21
+ timer = null;
22
+ constructor(opts) {
23
+ this.repoRoot = opts.repoRoot;
24
+ this.sinceDays = opts.sinceDays ?? 7;
25
+ this.maxCount = opts.maxCount ?? 2000;
26
+ this.timeoutMs = opts.timeoutMs ?? 5000;
27
+ this.refreshMs = opts.refreshMs ?? 1800000;
28
+ this.retryMs = opts.retryMs ?? 300000;
29
+ this.log = opts.logger || silentLogger;
30
+ this.metrics = opts.metrics;
31
+ }
32
+ /** Build once. Resolves after persistence. */
33
+ async build() {
34
+ const db = getDb();
35
+ const setMeta = (k, v) => db.prepare("INSERT OR REPLACE INTO git_cochange_meta (k, v) VALUES (?, ?)").run(k, v);
36
+ if (!existsSync(path.join(this.repoRoot, ".git"))) {
37
+ this.log.info({}, "Layer 4 unavailable: no .git");
38
+ setMeta("available", "false");
39
+ this.metrics?.gitCochangeBuilds.inc({ outcome: "failed" });
40
+ return;
41
+ }
42
+ if (existsSync(path.join(this.repoRoot, ".git", "shallow"))) {
43
+ this.log.info({}, "Layer 4 unavailable: shallow clone");
44
+ setMeta("available", "false");
45
+ this.metrics?.gitCochangeBuilds.inc({ outcome: "shallow_skipped" });
46
+ return;
47
+ }
48
+ let stdout = null;
49
+ try {
50
+ stdout = await this.runGitLog();
51
+ }
52
+ catch (err) {
53
+ this.log.warn({ err }, "git log failed");
54
+ setMeta("available", "false");
55
+ setMeta("last_error", String(err.message));
56
+ this.metrics?.gitCochangeBuilds.inc({ outcome: "failed" });
57
+ return;
58
+ }
59
+ if (stdout === "TIMEOUT") {
60
+ setMeta("available", "stale_partial");
61
+ this.log.warn({}, "git log timed out — Layer 4 stale_partial");
62
+ this.metrics?.gitCochangeBuilds.inc({ outcome: "timeout" });
63
+ return;
64
+ }
65
+ const { pairs, totalCommits } = this.parseLog(stdout);
66
+ // Dynamic predictor cap: any file appearing in > 40% of effective commits is
67
+ // excluded as a *predictor* (still allowed as a *target*). Prevents hotspot files
68
+ // like config or barrel index from saturating co-change with every other file.
69
+ const PREDICTOR_CAP_RATIO = 0.4;
70
+ const fileCommitCount = new Map();
71
+ for (const key of pairs.keys()) {
72
+ const [a, b] = key.split("|");
73
+ fileCommitCount.set(a, (fileCommitCount.get(a) ?? 0) + (pairs.get(key) ?? 0));
74
+ fileCommitCount.set(b, (fileCommitCount.get(b) ?? 0) + (pairs.get(key) ?? 0));
75
+ }
76
+ const promiscuous = new Set();
77
+ for (const [file, count] of fileCommitCount) {
78
+ // count is total times the file appeared in any pair; max possible is roughly
79
+ // (totalCommits) per file. Use raw count / totalCommits as an approximation
80
+ // of the file's commit frequency.
81
+ if (totalCommits > 0 && count / totalCommits > PREDICTOR_CAP_RATIO) {
82
+ promiscuous.add(file);
83
+ }
84
+ }
85
+ if (promiscuous.size > 0) {
86
+ this.log.info({ count: promiscuous.size, files: Array.from(promiscuous) }, "Layer 4 dynamic predictor cap excluded files");
87
+ }
88
+ db.exec("DELETE FROM git_cochange");
89
+ const stmt = db.prepare("INSERT INTO git_cochange (file_a, file_b, count, total_commits, computed_at) VALUES (?, ?, ?, ?, datetime('now'))");
90
+ const insertMany = db.transaction(() => {
91
+ for (const [key, count] of pairs.entries()) {
92
+ const [a, b] = key.split("|");
93
+ // Skip pairs where EITHER file is a promiscuous predictor (file is allowed
94
+ // as target only, but a pair where it's a predictor is dropped). For the
95
+ // index entry to be useful, both files must be non-promiscuous predictors.
96
+ if (promiscuous.has(a) || promiscuous.has(b))
97
+ continue;
98
+ if (a < b)
99
+ stmt.run(a, b, count, totalCommits);
100
+ }
101
+ });
102
+ insertMany();
103
+ setMeta("available", "true");
104
+ setMeta("last_built_at", new Date().toISOString());
105
+ this.metrics?.gitCochangeBuilds.inc({ outcome: "success" });
106
+ this.metrics?.gitCochangePairs.set(pairs.size);
107
+ }
108
+ runGitLog() {
109
+ return new Promise((resolve, reject) => {
110
+ const args = [
111
+ "log",
112
+ `--max-count=${this.maxCount}`,
113
+ "--diff-filter=AMRD",
114
+ `--since=${this.sinceDays} days ago`,
115
+ "--no-renames",
116
+ "--pretty=format:%H",
117
+ "--name-only",
118
+ "-z",
119
+ ];
120
+ const proc = spawn("git", args, { cwd: this.repoRoot });
121
+ let buf = "";
122
+ const timer = setTimeout(() => {
123
+ proc.kill();
124
+ resolve("TIMEOUT");
125
+ }, this.timeoutMs);
126
+ proc.stdout.on("data", (c) => (buf += c.toString("utf-8")));
127
+ proc.on("error", (err) => { clearTimeout(timer); reject(err); });
128
+ proc.on("close", (code) => {
129
+ clearTimeout(timer);
130
+ if (code === 0)
131
+ resolve(buf);
132
+ else
133
+ reject(new Error(`git log exit ${code}`));
134
+ });
135
+ });
136
+ }
137
+ parseLog(stdout) {
138
+ // git log -z --pretty=format:%H --name-only output format:
139
+ // Each commit entry: <SHA>\n<file1>\0<file2>\0...\0
140
+ // Between commits the NUL separator also acts as delimiter.
141
+ // We split on NUL first, then detect SHA boundaries within tokens.
142
+ const tokens = stdout.split("\0").filter(t => t.length > 0);
143
+ const pairs = new Map();
144
+ let totalCommits = 0;
145
+ let currentFiles = [];
146
+ const flush = () => {
147
+ if (currentFiles.length === 0)
148
+ return;
149
+ // Skip massive commits (likely sweeps)
150
+ if (currentFiles.length > 200) {
151
+ currentFiles = [];
152
+ return;
153
+ }
154
+ // Apply denylist
155
+ const eligible = currentFiles.filter(f => !DEFAULT_DENYLIST.some(re => re.test(f)));
156
+ for (let i = 0; i < eligible.length; i++) {
157
+ for (let j = i + 1; j < eligible.length; j++) {
158
+ const [a, b] = eligible[i] < eligible[j] ? [eligible[i], eligible[j]] : [eligible[j], eligible[i]];
159
+ const key = `${a}|${b}`;
160
+ pairs.set(key, (pairs.get(key) ?? 0) + 1);
161
+ }
162
+ }
163
+ totalCommits++;
164
+ currentFiles = [];
165
+ };
166
+ // SHA pattern: 40 hex chars
167
+ const shaRe = /^([0-9a-f]{40})\n(.*)$/s;
168
+ for (const t of tokens) {
169
+ // Each token after splitting on \0 may look like:
170
+ // "\nSHA40\npath" (commit boundary with preceding newline)
171
+ // "SHA40\npath" (commit boundary at start)
172
+ // "path" (file path continuation)
173
+ // "\nSHA40" (SHA only, no file on same token)
174
+ // Strip leading newlines to normalize
175
+ const stripped = t.replace(/^\n+/, "");
176
+ const shaMatch = stripped.match(shaRe);
177
+ if (shaMatch) {
178
+ // We found a SHA — flush the previous commit's files
179
+ flush();
180
+ const trailingPath = shaMatch[2].trim();
181
+ if (trailingPath)
182
+ currentFiles.push(trailingPath);
183
+ }
184
+ else {
185
+ // Check if this token itself IS a SHA (no file attached, happens when
186
+ // --pretty=format:%H emits the SHA on its own NUL-terminated chunk)
187
+ const pureSha = stripped.match(/^[0-9a-f]{40}$/);
188
+ if (pureSha) {
189
+ flush();
190
+ }
191
+ else {
192
+ // It's a file path (or part of one); newlines indicate embedded commit
193
+ // boundaries when a file is on the same NUL chunk as the next SHA.
194
+ // Handle the case where "path\nSHA\npath" might appear.
195
+ const parts = stripped.split("\n");
196
+ for (let i = 0; i < parts.length; i++) {
197
+ const part = parts[i].trim();
198
+ if (!part)
199
+ continue;
200
+ if (/^[0-9a-f]{40}$/.test(part)) {
201
+ flush();
202
+ }
203
+ else {
204
+ currentFiles.push(part);
205
+ }
206
+ }
207
+ }
208
+ }
209
+ }
210
+ flush();
211
+ return { pairs, totalCommits };
212
+ }
213
+ /** Schedule a refresh loop. unref() so it doesn't keep the loop alive. */
214
+ startScheduler() {
215
+ const tick = async () => {
216
+ try {
217
+ await this.build();
218
+ this.timer = setTimeout(tick, this.refreshMs);
219
+ }
220
+ catch (err) {
221
+ this.log.warn({ err }, "build failed, retrying");
222
+ this.timer = setTimeout(tick, this.retryMs);
223
+ }
224
+ if (this.timer && typeof this.timer.unref === "function")
225
+ this.timer.unref();
226
+ };
227
+ // First build after 5s grace
228
+ this.timer = setTimeout(tick, 5000);
229
+ if (this.timer && typeof this.timer.unref === "function")
230
+ this.timer.unref();
231
+ }
232
+ stopScheduler() {
233
+ if (this.timer) {
234
+ clearTimeout(this.timer);
235
+ this.timer = null;
236
+ }
237
+ }
238
+ }
@@ -0,0 +1,23 @@
1
+ import type { IncomingMessage, ServerResponse } from "http";
2
+ import type { CoordinatorServices } from "../server-setup.js";
3
+ /**
4
+ * Liveness probe — process is alive. Always returns 200 with no dep checks
5
+ * so orchestrators don't restart the pod over transient downstream failures.
6
+ */
7
+ export declare function handleLivez(_req: IncomingMessage, res: ServerResponse): void;
8
+ /**
9
+ * Readiness probe — downstream deps must all be green for the LB to route
10
+ * traffic here. 503 when any check fails so the pod is drained until ready.
11
+ *
12
+ * Each check is wrapped in try/catch so a thrown DB/MQTT error becomes a
13
+ * structured `{ok:false,error:"…"}` instead of a 500. The response shape is
14
+ * identical between 200 and 503 so consumers can parse uniformly.
15
+ */
16
+ export declare function handleReadyz(_req: IncomingMessage, res: ServerResponse, services: Pick<CoordinatorServices, "mqttBridge" | "treeSitter" | "gitCochange">): void;
17
+ /**
18
+ * Backwards-compatible alias. The original /health route returned a fixed
19
+ * {status:"ok",version} payload with no dep checks; semantically that is a
20
+ * liveness probe, so we delegate. Anything that polled /health for "is the
21
+ * process up" continues to work without changes.
22
+ */
23
+ export declare function handleHealth(req: IncomingMessage, res: ServerResponse): void;
@@ -0,0 +1,112 @@
1
+ import { getDb } from "../database.js";
2
+ import { json } from "./utils.js";
3
+ import { getVersion } from "../../cli/version.js";
4
+ /**
5
+ * v0.4 Operability: Kubernetes-style health probes.
6
+ *
7
+ * - /livez → is the process alive? Used by an orchestrator (k8s, systemd,
8
+ * docker swarm) to decide whether to restart the pod. MUST NOT
9
+ * check downstream deps; an unreachable DB does not mean the
10
+ * coordinator process should be killed and restarted.
11
+ *
12
+ * - /readyz → are downstream deps ready? Used by a load balancer / service
13
+ * mesh to decide whether to add this pod to rotation. Returns 503
14
+ * when the DB or MQTT broker is not reachable so the LB drains
15
+ * traffic until the coordinator can actually serve it.
16
+ *
17
+ * - /health → backwards-compat alias for /livez. The original stub returned
18
+ * {status:"ok",version} unconditionally; preserving alive-only
19
+ * semantics keeps existing dashboards and uptime probes green
20
+ * without forcing them to migrate.
21
+ */
22
+ const STARTED_AT_MS = Date.now();
23
+ const VERSION = getVersion();
24
+ function uptimeSeconds() {
25
+ return Math.floor((Date.now() - STARTED_AT_MS) / 1000);
26
+ }
27
+ /**
28
+ * Liveness probe — process is alive. Always returns 200 with no dep checks
29
+ * so orchestrators don't restart the pod over transient downstream failures.
30
+ */
31
+ export function handleLivez(_req, res) {
32
+ json(res, {
33
+ status: "alive",
34
+ uptime_seconds: uptimeSeconds(),
35
+ version: VERSION,
36
+ });
37
+ }
38
+ /**
39
+ * Readiness probe — downstream deps must all be green for the LB to route
40
+ * traffic here. 503 when any check fails so the pod is drained until ready.
41
+ *
42
+ * Each check is wrapped in try/catch so a thrown DB/MQTT error becomes a
43
+ * structured `{ok:false,error:"…"}` instead of a 500. The response shape is
44
+ * identical between 200 and 503 so consumers can parse uniformly.
45
+ */
46
+ export function handleReadyz(_req, res, services) {
47
+ const checks = {
48
+ db: { ok: false },
49
+ mqtt: { ok: false },
50
+ tree_sitter: { ok: false, grammars_loaded: 0, total_grammars: 7, optional: true },
51
+ git_cochange: { available: false, status: "unavailable", optional: true },
52
+ };
53
+ try {
54
+ // Cheapest possible round-trip that exercises the connection without
55
+ // touching application tables. Throws if the handle is closed or the
56
+ // file is locked beyond busy_timeout.
57
+ getDb().prepare("SELECT 1").get();
58
+ checks.db.ok = true;
59
+ }
60
+ catch (err) {
61
+ checks.db.error = err.message;
62
+ }
63
+ try {
64
+ if (services.mqttBridge.isConnected()) {
65
+ checks.mqtt.ok = true;
66
+ }
67
+ else {
68
+ checks.mqtt.error = "not connected";
69
+ }
70
+ }
71
+ catch (err) {
72
+ checks.mqtt.error = err.message;
73
+ }
74
+ // Optional: tree-sitter status (does NOT gate readiness — Layer 0.5 degrades gracefully)
75
+ try {
76
+ if (services.treeSitter) {
77
+ checks.tree_sitter = services.treeSitter.status();
78
+ }
79
+ }
80
+ catch {
81
+ // keep default { ok: false, grammars_loaded: 0, total_grammars: 7, optional: true }
82
+ }
83
+ // Optional: git_cochange availability (does NOT gate readiness — Layer 4 degrades gracefully)
84
+ try {
85
+ const row = getDb()
86
+ .prepare("SELECT v FROM git_cochange_meta WHERE k = ?")
87
+ .get("available");
88
+ checks.git_cochange = {
89
+ available: row?.v === "true",
90
+ status: row?.v ?? "unavailable",
91
+ optional: true,
92
+ };
93
+ }
94
+ catch {
95
+ // keep default { available: false, status: "unavailable", optional: true }
96
+ }
97
+ // Gating: only db + mqtt block readiness. tree_sitter and git_cochange are reported but optional.
98
+ const allOk = checks.db.ok && checks.mqtt.ok;
99
+ json(res, {
100
+ status: allOk ? "ready" : "not_ready",
101
+ checks,
102
+ }, allOk ? 200 : 503);
103
+ }
104
+ /**
105
+ * Backwards-compatible alias. The original /health route returned a fixed
106
+ * {status:"ok",version} payload with no dep checks; semantically that is a
107
+ * liveness probe, so we delegate. Anything that polled /health for "is the
108
+ * process up" continues to work without changes.
109
+ */
110
+ export function handleHealth(req, res) {
111
+ return handleLivez(req, res);
112
+ }
@@ -1,3 +1,4 @@
1
+ import { createHash } from "crypto";
1
2
  import { getDb } from "../database.js";
2
3
  import { runCommonAnnounceFlow } from "../announce-workflow.js";
3
4
  import { canResetDb } from "../reset-guard.js";
@@ -5,7 +6,15 @@ import { parseBody, json } from "./utils.js";
5
6
  export async function handleRest(req, res, ctx) {
6
7
  const { services, httpLog, authEnabled, getRunConfig, setRunConfig } = ctx;
7
8
  const url = req.url || "";
8
- const body = await parseBody(req);
9
+ let body;
10
+ try {
11
+ body = await parseBody(req);
12
+ }
13
+ catch (err) {
14
+ const e = err;
15
+ json(res, { error: e.message || "Invalid request" }, e.statusCode || 400);
16
+ return;
17
+ }
9
18
  const agentId = body.agent_id;
10
19
  // Dashboard/work-stealing polls these endpoints every few seconds — demote to debug
11
20
  // to keep the info log focused on coordination events (announce, claim, resolve, etc).
@@ -61,7 +70,7 @@ export async function handleRest(req, res, ctx) {
61
70
  json(res, { ok: true });
62
71
  }
63
72
  else if (url === "/api/announce") {
64
- const { agent_id, subject, plan, target_modules, target_files, depends_on_files, exports_affected, keep_open, assigned_to } = body;
73
+ const { agent_id, subject, plan, target_modules, target_files, depends_on_files, exports_affected, keep_open, assigned_to, target_symbols } = body;
65
74
  const thread = consultation.announceWork({ agent_id, subject, plan, target_modules, target_files, depends_on_files, exports_affected, keep_open, assigned_to });
66
75
  const agentInfo = registry.get(agent_id);
67
76
  // S2 fix: shared workflow (impact scoring, override respondents, auto-resolve,
@@ -69,6 +78,7 @@ export async function handleRest(req, res, ctx) {
69
78
  // function used by the MCP announce_work tool path.
70
79
  const { updated, categorized, respondents, planQuality } = runCommonAnnounceFlow(services, thread.id, {
71
80
  agent_id, subject, plan, target_modules, target_files, depends_on_files, exports_affected, keep_open,
81
+ target_symbols,
72
82
  });
73
83
  // REST-specific thread_opened SSE shape (different field set than MCP — kept
74
84
  // divergent because consumers may depend on this exact contract).
@@ -358,6 +368,77 @@ export async function handleRest(req, res, ctx) {
358
368
  json(res, { registered: true, status: agent.status, activity: activity.activity_status });
359
369
  }
360
370
  }
371
+ else if (url === "/api/file-activity" && req.method === "POST") {
372
+ if (typeof body.session_id !== "string" || typeof body.agent_id !== "string"
373
+ || typeof body.tool_name !== "string" || typeof body.file_path !== "string") {
374
+ json(res, { error: "missing required fields" }, 400);
375
+ return;
376
+ }
377
+ if (body.agent_name !== undefined && typeof body.agent_name !== "string") {
378
+ json(res, { error: "agent_name must be string when present" }, 400);
379
+ return;
380
+ }
381
+ const MAX_CONTENT = 262144;
382
+ let symbols = null;
383
+ let contentHash = null;
384
+ if (typeof body.content === "string") {
385
+ if (body.content.length > MAX_CONTENT) {
386
+ json(res, { error: "content exceeds 256 KB" }, 400);
387
+ return;
388
+ }
389
+ contentHash = createHash("sha256").update(body.content).digest("hex");
390
+ symbols = ctx.services.treeSitter.extract(body.file_path, body.content, null);
391
+ }
392
+ ctx.services.fileTracker.log({
393
+ session_id: body.session_id,
394
+ agent_id: body.agent_id,
395
+ agent_name: body.agent_name,
396
+ tool_name: body.tool_name,
397
+ file_path: body.file_path,
398
+ content_hash: contentHash,
399
+ symbols_touched: symbols,
400
+ });
401
+ json(res, { ok: true });
402
+ }
403
+ else if (url === "/api/working-files/start" && req.method === "POST") {
404
+ if (typeof body.agent_id !== "string" || typeof body.file_path !== "string") {
405
+ json(res, { error: "agent_id and file_path required" }, 400);
406
+ return;
407
+ }
408
+ const ttl = parseInt(process.env.COORDINATOR_WORKING_FILES_TTL_MIN || "30", 10);
409
+ services.workingFiles.start(body.agent_id, body.file_path, ttl);
410
+ json(res, { ok: true });
411
+ }
412
+ else if (url === "/api/working-files/stop" && req.method === "POST") {
413
+ if (typeof body.agent_id !== "string" || typeof body.file_path !== "string") {
414
+ json(res, { error: "agent_id and file_path required" }, 400);
415
+ return;
416
+ }
417
+ services.workingFiles.stop(body.agent_id, body.file_path);
418
+ json(res, { ok: true });
419
+ }
420
+ else if (url?.startsWith("/api/scoring-stats") && req.method === "GET") {
421
+ const u = new URL(url, "http://localhost");
422
+ const sinceParam = u.searchParams.get("since") || "24h";
423
+ const sinceMin = sinceParam.endsWith("h") ? parseInt(sinceParam) * 60
424
+ : sinceParam.endsWith("d") ? parseInt(sinceParam) * 60 * 24
425
+ : 60 * 24;
426
+ const db = getDb();
427
+ const layers = db.prepare(`SELECT layer, COUNT(*) AS fire_count, AVG(score) AS avg_score
428
+ FROM layer_firings
429
+ WHERE fired_at > datetime('now', '-' || ? || ' minutes')
430
+ GROUP BY layer
431
+ ORDER BY fire_count DESC`).all(sinceMin);
432
+ json(res, {
433
+ window: { since: sinceParam, now: new Date().toISOString() },
434
+ layers: layers.map(l => ({
435
+ layer: l.layer,
436
+ fire_count: l.fire_count,
437
+ avg_score: l.avg_score,
438
+ outcomes: { auto_resolved: 0, consensus: 0, timeout: 0, cancelled: 0 },
439
+ })),
440
+ });
441
+ }
361
442
  else if (url === "/api/status") {
362
443
  const online = registry.listOnline();
363
444
  const openThreads = consultation.listThreads({ status: "open" });
@@ -1,8 +1,4 @@
1
1
  import type { IncomingMessage, ServerResponse } from "http";
2
- /**
3
- * S1: shared HTTP helpers extracted from serve-http.ts.
4
- * parseBody, json, decodeJwtPayload, safeEqual.
5
- */
6
2
  export declare function parseBody(req: IncomingMessage): Promise<Record<string, unknown>>;
7
3
  export declare function json(res: ServerResponse, data: unknown, status?: number): void;
8
4
  /**
@@ -3,11 +3,25 @@ import { timingSafeEqual } from "crypto";
3
3
  * S1: shared HTTP helpers extracted from serve-http.ts.
4
4
  * parseBody, json, decodeJwtPayload, safeEqual.
5
5
  */
6
+ const MAX_BODY_BYTES = parseInt(process.env.COORDINATOR_MAX_BODY_BYTES || "1048576", 10);
6
7
  export function parseBody(req) {
7
8
  return new Promise((resolve, reject) => {
8
- let body = "";
9
- req.on("data", (chunk) => (body += chunk.toString()));
9
+ let bytes = 0;
10
+ const chunks = [];
11
+ req.on("data", (chunk) => {
12
+ bytes += chunk.length;
13
+ if (bytes > MAX_BODY_BYTES) {
14
+ const err = new Error("Payload too large");
15
+ err.statusCode = 413;
16
+ // destroy() may not exist on every IncomingMessage-like input (test stub).
17
+ req.destroy?.(err);
18
+ reject(err);
19
+ return;
20
+ }
21
+ chunks.push(chunk);
22
+ });
10
23
  req.on("end", () => {
24
+ const body = Buffer.concat(chunks).toString("utf-8");
11
25
  try {
12
26
  resolve(body ? JSON.parse(body) : {});
13
27
  }