mcp-coordinator 0.4.0 → 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +938 -846
- package/dashboard/Dockerfile +19 -19
- package/dashboard/public/index.html +1201 -1178
- package/dist/cli/server/start.js +33 -0
- package/dist/src/agent-activity.js +6 -6
- package/dist/src/agent-registry.js +6 -6
- package/dist/src/announce-workflow.d.ts +1 -0
- package/dist/src/announce-workflow.js +28 -0
- package/dist/src/consultation.js +20 -20
- package/dist/src/database.js +191 -126
- package/dist/src/dependency-map.js +3 -3
- package/dist/src/file-tracker.d.ts +2 -0
- package/dist/src/file-tracker.js +13 -12
- package/dist/src/git-cochange-builder.d.ts +32 -0
- package/dist/src/git-cochange-builder.js +238 -0
- package/dist/src/http/handle-health.d.ts +1 -1
- package/dist/src/http/handle-health.js +26 -0
- package/dist/src/http/handle-rest.js +98 -2
- package/dist/src/http/utils.d.ts +0 -4
- package/dist/src/http/utils.js +16 -2
- package/dist/src/impact-scorer.d.ts +5 -1
- package/dist/src/impact-scorer.js +98 -8
- package/dist/src/introspection.js +1 -1
- package/dist/src/metrics.d.ts +5 -0
- package/dist/src/metrics.js +33 -0
- package/dist/src/path-normalize.d.ts +17 -0
- package/dist/src/path-normalize.js +38 -0
- package/dist/src/serve-http.js +41 -2
- package/dist/src/server-setup.d.ts +6 -0
- package/dist/src/server-setup.js +23 -3
- package/dist/src/tools/consultation-tools.js +4 -2
- package/dist/src/tree-sitter-extractor.d.ts +36 -0
- package/dist/src/tree-sitter-extractor.js +354 -0
- package/dist/src/working-files-tracker.d.ts +42 -0
- package/dist/src/working-files-tracker.js +111 -0
- package/package.json +100 -83
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import { type Logger } from "./logger.js";
|
|
2
|
+
import type { Metrics } from "./metrics.js";
|
|
3
|
+
interface BuilderOpts {
|
|
4
|
+
repoRoot: string;
|
|
5
|
+
sinceDays?: number;
|
|
6
|
+
maxCount?: number;
|
|
7
|
+
timeoutMs?: number;
|
|
8
|
+
refreshMs?: number;
|
|
9
|
+
retryMs?: number;
|
|
10
|
+
logger?: Logger;
|
|
11
|
+
metrics?: Metrics;
|
|
12
|
+
}
|
|
13
|
+
export declare class GitCochangeBuilder {
|
|
14
|
+
private repoRoot;
|
|
15
|
+
private sinceDays;
|
|
16
|
+
private maxCount;
|
|
17
|
+
private timeoutMs;
|
|
18
|
+
private refreshMs;
|
|
19
|
+
private retryMs;
|
|
20
|
+
private log;
|
|
21
|
+
private metrics?;
|
|
22
|
+
private timer;
|
|
23
|
+
constructor(opts: BuilderOpts);
|
|
24
|
+
/** Build once. Resolves after persistence. */
|
|
25
|
+
build(): Promise<void>;
|
|
26
|
+
private runGitLog;
|
|
27
|
+
private parseLog;
|
|
28
|
+
/** Schedule a refresh loop. unref() so it doesn't keep the loop alive. */
|
|
29
|
+
startScheduler(): void;
|
|
30
|
+
stopScheduler(): void;
|
|
31
|
+
}
|
|
32
|
+
export {};
|
|
@@ -0,0 +1,238 @@
|
|
|
1
|
+
// src/git-cochange-builder.ts
|
|
2
|
+
import { spawn } from "child_process";
|
|
3
|
+
import { existsSync } from "fs";
|
|
4
|
+
import path from "path";
|
|
5
|
+
import { getDb } from "./database.js";
|
|
6
|
+
import { silentLogger } from "./logger.js";
|
|
7
|
+
const DEFAULT_DENYLIST = [
|
|
8
|
+
/package-lock\.json$/, /pnpm-lock\.yaml$/, /yarn\.lock$/, /\.lock$/,
|
|
9
|
+
/\/dist\//, /\/build\//, /\/\.next\//, /\/__snapshots__\//,
|
|
10
|
+
/\.min\.js$/, /\.map$/, /\/coverage\//, /\/node_modules\//, /\.snap$/,
|
|
11
|
+
];
|
|
12
|
+
export class GitCochangeBuilder {
|
|
13
|
+
repoRoot;
|
|
14
|
+
sinceDays;
|
|
15
|
+
maxCount;
|
|
16
|
+
timeoutMs;
|
|
17
|
+
refreshMs;
|
|
18
|
+
retryMs;
|
|
19
|
+
log;
|
|
20
|
+
metrics;
|
|
21
|
+
timer = null;
|
|
22
|
+
constructor(opts) {
|
|
23
|
+
this.repoRoot = opts.repoRoot;
|
|
24
|
+
this.sinceDays = opts.sinceDays ?? 7;
|
|
25
|
+
this.maxCount = opts.maxCount ?? 2000;
|
|
26
|
+
this.timeoutMs = opts.timeoutMs ?? 5000;
|
|
27
|
+
this.refreshMs = opts.refreshMs ?? 1800000;
|
|
28
|
+
this.retryMs = opts.retryMs ?? 300000;
|
|
29
|
+
this.log = opts.logger || silentLogger;
|
|
30
|
+
this.metrics = opts.metrics;
|
|
31
|
+
}
|
|
32
|
+
/** Build once. Resolves after persistence. */
|
|
33
|
+
async build() {
|
|
34
|
+
const db = getDb();
|
|
35
|
+
const setMeta = (k, v) => db.prepare("INSERT OR REPLACE INTO git_cochange_meta (k, v) VALUES (?, ?)").run(k, v);
|
|
36
|
+
if (!existsSync(path.join(this.repoRoot, ".git"))) {
|
|
37
|
+
this.log.info({}, "Layer 4 unavailable: no .git");
|
|
38
|
+
setMeta("available", "false");
|
|
39
|
+
this.metrics?.gitCochangeBuilds.inc({ outcome: "failed" });
|
|
40
|
+
return;
|
|
41
|
+
}
|
|
42
|
+
if (existsSync(path.join(this.repoRoot, ".git", "shallow"))) {
|
|
43
|
+
this.log.info({}, "Layer 4 unavailable: shallow clone");
|
|
44
|
+
setMeta("available", "false");
|
|
45
|
+
this.metrics?.gitCochangeBuilds.inc({ outcome: "shallow_skipped" });
|
|
46
|
+
return;
|
|
47
|
+
}
|
|
48
|
+
let stdout = null;
|
|
49
|
+
try {
|
|
50
|
+
stdout = await this.runGitLog();
|
|
51
|
+
}
|
|
52
|
+
catch (err) {
|
|
53
|
+
this.log.warn({ err }, "git log failed");
|
|
54
|
+
setMeta("available", "false");
|
|
55
|
+
setMeta("last_error", String(err.message));
|
|
56
|
+
this.metrics?.gitCochangeBuilds.inc({ outcome: "failed" });
|
|
57
|
+
return;
|
|
58
|
+
}
|
|
59
|
+
if (stdout === "TIMEOUT") {
|
|
60
|
+
setMeta("available", "stale_partial");
|
|
61
|
+
this.log.warn({}, "git log timed out — Layer 4 stale_partial");
|
|
62
|
+
this.metrics?.gitCochangeBuilds.inc({ outcome: "timeout" });
|
|
63
|
+
return;
|
|
64
|
+
}
|
|
65
|
+
const { pairs, totalCommits } = this.parseLog(stdout);
|
|
66
|
+
// Dynamic predictor cap: any file appearing in > 40% of effective commits is
|
|
67
|
+
// excluded as a *predictor* (still allowed as a *target*). Prevents hotspot files
|
|
68
|
+
// like config or barrel index from saturating co-change with every other file.
|
|
69
|
+
const PREDICTOR_CAP_RATIO = 0.4;
|
|
70
|
+
const fileCommitCount = new Map();
|
|
71
|
+
for (const key of pairs.keys()) {
|
|
72
|
+
const [a, b] = key.split("|");
|
|
73
|
+
fileCommitCount.set(a, (fileCommitCount.get(a) ?? 0) + (pairs.get(key) ?? 0));
|
|
74
|
+
fileCommitCount.set(b, (fileCommitCount.get(b) ?? 0) + (pairs.get(key) ?? 0));
|
|
75
|
+
}
|
|
76
|
+
const promiscuous = new Set();
|
|
77
|
+
for (const [file, count] of fileCommitCount) {
|
|
78
|
+
// count is total times the file appeared in any pair; max possible is roughly
|
|
79
|
+
// (totalCommits) per file. Use raw count / totalCommits as an approximation
|
|
80
|
+
// of the file's commit frequency.
|
|
81
|
+
if (totalCommits > 0 && count / totalCommits > PREDICTOR_CAP_RATIO) {
|
|
82
|
+
promiscuous.add(file);
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
if (promiscuous.size > 0) {
|
|
86
|
+
this.log.info({ count: promiscuous.size, files: Array.from(promiscuous) }, "Layer 4 dynamic predictor cap excluded files");
|
|
87
|
+
}
|
|
88
|
+
db.exec("DELETE FROM git_cochange");
|
|
89
|
+
const stmt = db.prepare("INSERT INTO git_cochange (file_a, file_b, count, total_commits, computed_at) VALUES (?, ?, ?, ?, datetime('now'))");
|
|
90
|
+
const insertMany = db.transaction(() => {
|
|
91
|
+
for (const [key, count] of pairs.entries()) {
|
|
92
|
+
const [a, b] = key.split("|");
|
|
93
|
+
// Skip pairs where EITHER file is a promiscuous predictor (file is allowed
|
|
94
|
+
// as target only, but a pair where it's a predictor is dropped). For the
|
|
95
|
+
// index entry to be useful, both files must be non-promiscuous predictors.
|
|
96
|
+
if (promiscuous.has(a) || promiscuous.has(b))
|
|
97
|
+
continue;
|
|
98
|
+
if (a < b)
|
|
99
|
+
stmt.run(a, b, count, totalCommits);
|
|
100
|
+
}
|
|
101
|
+
});
|
|
102
|
+
insertMany();
|
|
103
|
+
setMeta("available", "true");
|
|
104
|
+
setMeta("last_built_at", new Date().toISOString());
|
|
105
|
+
this.metrics?.gitCochangeBuilds.inc({ outcome: "success" });
|
|
106
|
+
this.metrics?.gitCochangePairs.set(pairs.size);
|
|
107
|
+
}
|
|
108
|
+
runGitLog() {
|
|
109
|
+
return new Promise((resolve, reject) => {
|
|
110
|
+
const args = [
|
|
111
|
+
"log",
|
|
112
|
+
`--max-count=${this.maxCount}`,
|
|
113
|
+
"--diff-filter=AMRD",
|
|
114
|
+
`--since=${this.sinceDays} days ago`,
|
|
115
|
+
"--no-renames",
|
|
116
|
+
"--pretty=format:%H",
|
|
117
|
+
"--name-only",
|
|
118
|
+
"-z",
|
|
119
|
+
];
|
|
120
|
+
const proc = spawn("git", args, { cwd: this.repoRoot });
|
|
121
|
+
let buf = "";
|
|
122
|
+
const timer = setTimeout(() => {
|
|
123
|
+
proc.kill();
|
|
124
|
+
resolve("TIMEOUT");
|
|
125
|
+
}, this.timeoutMs);
|
|
126
|
+
proc.stdout.on("data", (c) => (buf += c.toString("utf-8")));
|
|
127
|
+
proc.on("error", (err) => { clearTimeout(timer); reject(err); });
|
|
128
|
+
proc.on("close", (code) => {
|
|
129
|
+
clearTimeout(timer);
|
|
130
|
+
if (code === 0)
|
|
131
|
+
resolve(buf);
|
|
132
|
+
else
|
|
133
|
+
reject(new Error(`git log exit ${code}`));
|
|
134
|
+
});
|
|
135
|
+
});
|
|
136
|
+
}
|
|
137
|
+
parseLog(stdout) {
|
|
138
|
+
// git log -z --pretty=format:%H --name-only output format:
|
|
139
|
+
// Each commit entry: <SHA>\n<file1>\0<file2>\0...\0
|
|
140
|
+
// Between commits the NUL separator also acts as delimiter.
|
|
141
|
+
// We split on NUL first, then detect SHA boundaries within tokens.
|
|
142
|
+
const tokens = stdout.split("\0").filter(t => t.length > 0);
|
|
143
|
+
const pairs = new Map();
|
|
144
|
+
let totalCommits = 0;
|
|
145
|
+
let currentFiles = [];
|
|
146
|
+
const flush = () => {
|
|
147
|
+
if (currentFiles.length === 0)
|
|
148
|
+
return;
|
|
149
|
+
// Skip massive commits (likely sweeps)
|
|
150
|
+
if (currentFiles.length > 200) {
|
|
151
|
+
currentFiles = [];
|
|
152
|
+
return;
|
|
153
|
+
}
|
|
154
|
+
// Apply denylist
|
|
155
|
+
const eligible = currentFiles.filter(f => !DEFAULT_DENYLIST.some(re => re.test(f)));
|
|
156
|
+
for (let i = 0; i < eligible.length; i++) {
|
|
157
|
+
for (let j = i + 1; j < eligible.length; j++) {
|
|
158
|
+
const [a, b] = eligible[i] < eligible[j] ? [eligible[i], eligible[j]] : [eligible[j], eligible[i]];
|
|
159
|
+
const key = `${a}|${b}`;
|
|
160
|
+
pairs.set(key, (pairs.get(key) ?? 0) + 1);
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
totalCommits++;
|
|
164
|
+
currentFiles = [];
|
|
165
|
+
};
|
|
166
|
+
// SHA pattern: 40 hex chars
|
|
167
|
+
const shaRe = /^([0-9a-f]{40})\n(.*)$/s;
|
|
168
|
+
for (const t of tokens) {
|
|
169
|
+
// Each token after splitting on \0 may look like:
|
|
170
|
+
// "\nSHA40\npath" (commit boundary with preceding newline)
|
|
171
|
+
// "SHA40\npath" (commit boundary at start)
|
|
172
|
+
// "path" (file path continuation)
|
|
173
|
+
// "\nSHA40" (SHA only, no file on same token)
|
|
174
|
+
// Strip leading newlines to normalize
|
|
175
|
+
const stripped = t.replace(/^\n+/, "");
|
|
176
|
+
const shaMatch = stripped.match(shaRe);
|
|
177
|
+
if (shaMatch) {
|
|
178
|
+
// We found a SHA — flush the previous commit's files
|
|
179
|
+
flush();
|
|
180
|
+
const trailingPath = shaMatch[2].trim();
|
|
181
|
+
if (trailingPath)
|
|
182
|
+
currentFiles.push(trailingPath);
|
|
183
|
+
}
|
|
184
|
+
else {
|
|
185
|
+
// Check if this token itself IS a SHA (no file attached, happens when
|
|
186
|
+
// --pretty=format:%H emits the SHA on its own NUL-terminated chunk)
|
|
187
|
+
const pureSha = stripped.match(/^[0-9a-f]{40}$/);
|
|
188
|
+
if (pureSha) {
|
|
189
|
+
flush();
|
|
190
|
+
}
|
|
191
|
+
else {
|
|
192
|
+
// It's a file path (or part of one); newlines indicate embedded commit
|
|
193
|
+
// boundaries when a file is on the same NUL chunk as the next SHA.
|
|
194
|
+
// Handle the case where "path\nSHA\npath" might appear.
|
|
195
|
+
const parts = stripped.split("\n");
|
|
196
|
+
for (let i = 0; i < parts.length; i++) {
|
|
197
|
+
const part = parts[i].trim();
|
|
198
|
+
if (!part)
|
|
199
|
+
continue;
|
|
200
|
+
if (/^[0-9a-f]{40}$/.test(part)) {
|
|
201
|
+
flush();
|
|
202
|
+
}
|
|
203
|
+
else {
|
|
204
|
+
currentFiles.push(part);
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
flush();
|
|
211
|
+
return { pairs, totalCommits };
|
|
212
|
+
}
|
|
213
|
+
/** Schedule a refresh loop. unref() so it doesn't keep the loop alive. */
|
|
214
|
+
startScheduler() {
|
|
215
|
+
const tick = async () => {
|
|
216
|
+
try {
|
|
217
|
+
await this.build();
|
|
218
|
+
this.timer = setTimeout(tick, this.refreshMs);
|
|
219
|
+
}
|
|
220
|
+
catch (err) {
|
|
221
|
+
this.log.warn({ err }, "build failed, retrying");
|
|
222
|
+
this.timer = setTimeout(tick, this.retryMs);
|
|
223
|
+
}
|
|
224
|
+
if (this.timer && typeof this.timer.unref === "function")
|
|
225
|
+
this.timer.unref();
|
|
226
|
+
};
|
|
227
|
+
// First build after 5s grace
|
|
228
|
+
this.timer = setTimeout(tick, 5000);
|
|
229
|
+
if (this.timer && typeof this.timer.unref === "function")
|
|
230
|
+
this.timer.unref();
|
|
231
|
+
}
|
|
232
|
+
stopScheduler() {
|
|
233
|
+
if (this.timer) {
|
|
234
|
+
clearTimeout(this.timer);
|
|
235
|
+
this.timer = null;
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
}
|
|
@@ -13,7 +13,7 @@ export declare function handleLivez(_req: IncomingMessage, res: ServerResponse):
|
|
|
13
13
|
* structured `{ok:false,error:"…"}` instead of a 500. The response shape is
|
|
14
14
|
* identical between 200 and 503 so consumers can parse uniformly.
|
|
15
15
|
*/
|
|
16
|
-
export declare function handleReadyz(_req: IncomingMessage, res: ServerResponse, services: Pick<CoordinatorServices, "mqttBridge">): void;
|
|
16
|
+
export declare function handleReadyz(_req: IncomingMessage, res: ServerResponse, services: Pick<CoordinatorServices, "mqttBridge" | "treeSitter" | "gitCochange">): void;
|
|
17
17
|
/**
|
|
18
18
|
* Backwards-compatible alias. The original /health route returned a fixed
|
|
19
19
|
* {status:"ok",version} payload with no dep checks; semantically that is a
|
|
@@ -47,6 +47,8 @@ export function handleReadyz(_req, res, services) {
|
|
|
47
47
|
const checks = {
|
|
48
48
|
db: { ok: false },
|
|
49
49
|
mqtt: { ok: false },
|
|
50
|
+
tree_sitter: { ok: false, grammars_loaded: 0, total_grammars: 7, optional: true },
|
|
51
|
+
git_cochange: { available: false, status: "unavailable", optional: true },
|
|
50
52
|
};
|
|
51
53
|
try {
|
|
52
54
|
// Cheapest possible round-trip that exercises the connection without
|
|
@@ -69,6 +71,30 @@ export function handleReadyz(_req, res, services) {
|
|
|
69
71
|
catch (err) {
|
|
70
72
|
checks.mqtt.error = err.message;
|
|
71
73
|
}
|
|
74
|
+
// Optional: tree-sitter status (does NOT gate readiness — Layer 0.5 degrades gracefully)
|
|
75
|
+
try {
|
|
76
|
+
if (services.treeSitter) {
|
|
77
|
+
checks.tree_sitter = services.treeSitter.status();
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
catch {
|
|
81
|
+
// keep default { ok: false, grammars_loaded: 0, total_grammars: 7, optional: true }
|
|
82
|
+
}
|
|
83
|
+
// Optional: git_cochange availability (does NOT gate readiness — Layer 4 degrades gracefully)
|
|
84
|
+
try {
|
|
85
|
+
const row = getDb()
|
|
86
|
+
.prepare("SELECT v FROM git_cochange_meta WHERE k = ?")
|
|
87
|
+
.get("available");
|
|
88
|
+
checks.git_cochange = {
|
|
89
|
+
available: row?.v === "true",
|
|
90
|
+
status: row?.v ?? "unavailable",
|
|
91
|
+
optional: true,
|
|
92
|
+
};
|
|
93
|
+
}
|
|
94
|
+
catch {
|
|
95
|
+
// keep default { available: false, status: "unavailable", optional: true }
|
|
96
|
+
}
|
|
97
|
+
// Gating: only db + mqtt block readiness. tree_sitter and git_cochange are reported but optional.
|
|
72
98
|
const allOk = checks.db.ok && checks.mqtt.ok;
|
|
73
99
|
json(res, {
|
|
74
100
|
status: allOk ? "ready" : "not_ready",
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import { createHash } from "crypto";
|
|
1
2
|
import { getDb } from "../database.js";
|
|
2
3
|
import { runCommonAnnounceFlow } from "../announce-workflow.js";
|
|
3
4
|
import { canResetDb } from "../reset-guard.js";
|
|
@@ -5,7 +6,15 @@ import { parseBody, json } from "./utils.js";
|
|
|
5
6
|
export async function handleRest(req, res, ctx) {
|
|
6
7
|
const { services, httpLog, authEnabled, getRunConfig, setRunConfig } = ctx;
|
|
7
8
|
const url = req.url || "";
|
|
8
|
-
|
|
9
|
+
let body;
|
|
10
|
+
try {
|
|
11
|
+
body = await parseBody(req);
|
|
12
|
+
}
|
|
13
|
+
catch (err) {
|
|
14
|
+
const e = err;
|
|
15
|
+
json(res, { error: e.message || "Invalid request" }, e.statusCode || 400);
|
|
16
|
+
return;
|
|
17
|
+
}
|
|
9
18
|
const agentId = body.agent_id;
|
|
10
19
|
// Dashboard/work-stealing polls these endpoints every few seconds — demote to debug
|
|
11
20
|
// to keep the info log focused on coordination events (announce, claim, resolve, etc).
|
|
@@ -61,7 +70,7 @@ export async function handleRest(req, res, ctx) {
|
|
|
61
70
|
json(res, { ok: true });
|
|
62
71
|
}
|
|
63
72
|
else if (url === "/api/announce") {
|
|
64
|
-
const { agent_id, subject, plan, target_modules, target_files, depends_on_files, exports_affected, keep_open, assigned_to } = body;
|
|
73
|
+
const { agent_id, subject, plan, target_modules, target_files, depends_on_files, exports_affected, keep_open, assigned_to, target_symbols } = body;
|
|
65
74
|
const thread = consultation.announceWork({ agent_id, subject, plan, target_modules, target_files, depends_on_files, exports_affected, keep_open, assigned_to });
|
|
66
75
|
const agentInfo = registry.get(agent_id);
|
|
67
76
|
// S2 fix: shared workflow (impact scoring, override respondents, auto-resolve,
|
|
@@ -69,6 +78,7 @@ export async function handleRest(req, res, ctx) {
|
|
|
69
78
|
// function used by the MCP announce_work tool path.
|
|
70
79
|
const { updated, categorized, respondents, planQuality } = runCommonAnnounceFlow(services, thread.id, {
|
|
71
80
|
agent_id, subject, plan, target_modules, target_files, depends_on_files, exports_affected, keep_open,
|
|
81
|
+
target_symbols,
|
|
72
82
|
});
|
|
73
83
|
// REST-specific thread_opened SSE shape (different field set than MCP — kept
|
|
74
84
|
// divergent because consumers may depend on this exact contract).
|
|
@@ -358,6 +368,92 @@ export async function handleRest(req, res, ctx) {
|
|
|
358
368
|
json(res, { registered: true, status: agent.status, activity: activity.activity_status });
|
|
359
369
|
}
|
|
360
370
|
}
|
|
371
|
+
else if (url === "/api/file-activity" && req.method === "POST") {
|
|
372
|
+
if (typeof body.session_id !== "string" || typeof body.agent_id !== "string"
|
|
373
|
+
|| typeof body.tool_name !== "string" || typeof body.file_path !== "string") {
|
|
374
|
+
json(res, { error: "missing required fields" }, 400);
|
|
375
|
+
return;
|
|
376
|
+
}
|
|
377
|
+
if (body.agent_name !== undefined && typeof body.agent_name !== "string") {
|
|
378
|
+
json(res, { error: "agent_name must be string when present" }, 400);
|
|
379
|
+
return;
|
|
380
|
+
}
|
|
381
|
+
const MAX_CONTENT = 262144;
|
|
382
|
+
let symbols = null;
|
|
383
|
+
let contentHash = null;
|
|
384
|
+
if (typeof body.content === "string") {
|
|
385
|
+
if (body.content.length > MAX_CONTENT) {
|
|
386
|
+
json(res, { error: "content exceeds 256 KB" }, 400);
|
|
387
|
+
return;
|
|
388
|
+
}
|
|
389
|
+
contentHash = createHash("sha256").update(body.content).digest("hex");
|
|
390
|
+
symbols = ctx.services.treeSitter.extract(body.file_path, body.content, null);
|
|
391
|
+
}
|
|
392
|
+
ctx.services.fileTracker.log({
|
|
393
|
+
session_id: body.session_id,
|
|
394
|
+
agent_id: body.agent_id,
|
|
395
|
+
agent_name: body.agent_name,
|
|
396
|
+
tool_name: body.tool_name,
|
|
397
|
+
file_path: body.file_path,
|
|
398
|
+
content_hash: contentHash,
|
|
399
|
+
symbols_touched: symbols,
|
|
400
|
+
});
|
|
401
|
+
json(res, { ok: true });
|
|
402
|
+
}
|
|
403
|
+
else if (url === "/api/working-files/start" && req.method === "POST") {
|
|
404
|
+
if (typeof body.agent_id !== "string" || typeof body.file_path !== "string") {
|
|
405
|
+
json(res, { error: "agent_id and file_path required" }, 400);
|
|
406
|
+
return;
|
|
407
|
+
}
|
|
408
|
+
const ttl = parseInt(process.env.COORDINATOR_WORKING_FILES_TTL_MIN || "30", 10);
|
|
409
|
+
services.workingFiles.start(body.agent_id, body.file_path, ttl);
|
|
410
|
+
json(res, { ok: true });
|
|
411
|
+
}
|
|
412
|
+
else if (url === "/api/working-files/stop" && req.method === "POST") {
|
|
413
|
+
if (typeof body.agent_id !== "string" || typeof body.file_path !== "string") {
|
|
414
|
+
json(res, { error: "agent_id and file_path required" }, 400);
|
|
415
|
+
return;
|
|
416
|
+
}
|
|
417
|
+
services.workingFiles.stop(body.agent_id, body.file_path);
|
|
418
|
+
json(res, { ok: true });
|
|
419
|
+
}
|
|
420
|
+
else if (url?.startsWith("/api/scoring-stats") && req.method === "GET") {
|
|
421
|
+
const u = new URL(url, "http://localhost");
|
|
422
|
+
const sinceParam = u.searchParams.get("since") || "24h";
|
|
423
|
+
const sinceMin = sinceParam.endsWith("h") ? parseInt(sinceParam) * 60
|
|
424
|
+
: sinceParam.endsWith("d") ? parseInt(sinceParam) * 60 * 24
|
|
425
|
+
: 60 * 24;
|
|
426
|
+
const db = getDb();
|
|
427
|
+
const rows = db.prepare(`SELECT
|
|
428
|
+
lf.layer,
|
|
429
|
+
COUNT(*) AS fire_count,
|
|
430
|
+
AVG(lf.score) AS avg_score,
|
|
431
|
+
SUM(CASE WHEN json_extract(e.payload, '$.resolution_type') = 'auto_resolved' THEN 1 ELSE 0 END) AS auto_resolved,
|
|
432
|
+
SUM(CASE WHEN json_extract(e.payload, '$.resolution_type') = 'consensus' THEN 1 ELSE 0 END) AS consensus,
|
|
433
|
+
SUM(CASE WHEN json_extract(e.payload, '$.resolution_type') = 'timeout' THEN 1 ELSE 0 END) AS timeout_count,
|
|
434
|
+
SUM(CASE WHEN json_extract(e.payload, '$.resolution_type') IN ('agent_departure','closed') THEN 1 ELSE 0 END) AS cancelled
|
|
435
|
+
FROM layer_firings lf
|
|
436
|
+
LEFT JOIN events e
|
|
437
|
+
ON e.type = 'thread_resolved'
|
|
438
|
+
AND json_extract(e.payload, '$.thread_id') = lf.thread_id
|
|
439
|
+
WHERE lf.fired_at > datetime('now', '-' || ? || ' minutes')
|
|
440
|
+
GROUP BY lf.layer
|
|
441
|
+
ORDER BY fire_count DESC`).all(sinceMin);
|
|
442
|
+
json(res, {
|
|
443
|
+
window: { since: sinceParam, now: new Date().toISOString() },
|
|
444
|
+
layers: rows.map(r => ({
|
|
445
|
+
layer: r.layer,
|
|
446
|
+
fire_count: r.fire_count,
|
|
447
|
+
avg_score: r.avg_score,
|
|
448
|
+
outcomes: {
|
|
449
|
+
auto_resolved: r.auto_resolved,
|
|
450
|
+
consensus: r.consensus,
|
|
451
|
+
timeout: r.timeout_count,
|
|
452
|
+
cancelled: r.cancelled,
|
|
453
|
+
},
|
|
454
|
+
})),
|
|
455
|
+
});
|
|
456
|
+
}
|
|
361
457
|
else if (url === "/api/status") {
|
|
362
458
|
const online = registry.listOnline();
|
|
363
459
|
const openThreads = consultation.listThreads({ status: "open" });
|
package/dist/src/http/utils.d.ts
CHANGED
|
@@ -1,8 +1,4 @@
|
|
|
1
1
|
import type { IncomingMessage, ServerResponse } from "http";
|
|
2
|
-
/**
|
|
3
|
-
* S1: shared HTTP helpers extracted from serve-http.ts.
|
|
4
|
-
* parseBody, json, decodeJwtPayload, safeEqual.
|
|
5
|
-
*/
|
|
6
2
|
export declare function parseBody(req: IncomingMessage): Promise<Record<string, unknown>>;
|
|
7
3
|
export declare function json(res: ServerResponse, data: unknown, status?: number): void;
|
|
8
4
|
/**
|
package/dist/src/http/utils.js
CHANGED
|
@@ -3,11 +3,25 @@ import { timingSafeEqual } from "crypto";
|
|
|
3
3
|
* S1: shared HTTP helpers extracted from serve-http.ts.
|
|
4
4
|
* parseBody, json, decodeJwtPayload, safeEqual.
|
|
5
5
|
*/
|
|
6
|
+
const MAX_BODY_BYTES = parseInt(process.env.COORDINATOR_MAX_BODY_BYTES || "1048576", 10);
|
|
6
7
|
export function parseBody(req) {
|
|
7
8
|
return new Promise((resolve, reject) => {
|
|
8
|
-
let
|
|
9
|
-
|
|
9
|
+
let bytes = 0;
|
|
10
|
+
const chunks = [];
|
|
11
|
+
req.on("data", (chunk) => {
|
|
12
|
+
bytes += chunk.length;
|
|
13
|
+
if (bytes > MAX_BODY_BYTES) {
|
|
14
|
+
const err = new Error("Payload too large");
|
|
15
|
+
err.statusCode = 413;
|
|
16
|
+
// destroy() may not exist on every IncomingMessage-like input (test stub).
|
|
17
|
+
req.destroy?.(err);
|
|
18
|
+
reject(err);
|
|
19
|
+
return;
|
|
20
|
+
}
|
|
21
|
+
chunks.push(chunk);
|
|
22
|
+
});
|
|
10
23
|
req.on("end", () => {
|
|
24
|
+
const body = Buffer.concat(chunks).toString("utf-8");
|
|
11
25
|
try {
|
|
12
26
|
resolve(body ? JSON.parse(body) : {});
|
|
13
27
|
}
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import type { AgentRegistry } from "./agent-registry.js";
|
|
2
2
|
import type { FileTracker } from "./file-tracker.js";
|
|
3
3
|
import type { Consultation } from "./consultation.js";
|
|
4
|
+
import type { WorkingFilesTracker } from "./working-files-tracker.js";
|
|
4
5
|
export interface ImpactScore {
|
|
5
6
|
agent_id: string;
|
|
6
7
|
agent_name: string;
|
|
@@ -19,13 +20,16 @@ interface AnnounceParams {
|
|
|
19
20
|
target_files: string[];
|
|
20
21
|
depends_on_files?: string[];
|
|
21
22
|
exports_affected?: string[];
|
|
23
|
+
target_symbols?: string[];
|
|
22
24
|
}
|
|
23
25
|
export declare class ImpactScorer {
|
|
24
26
|
private registry;
|
|
25
27
|
private fileTracker;
|
|
26
28
|
private consultation?;
|
|
27
|
-
|
|
29
|
+
private workingFiles?;
|
|
30
|
+
constructor(registry: AgentRegistry, fileTracker: FileTracker, consultation?: Consultation | undefined, workingFiles?: WorkingFilesTracker | undefined);
|
|
28
31
|
score(params: AnnounceParams): ImpactScore[];
|
|
29
32
|
categorize(params: AnnounceParams): CategorizedImpact;
|
|
33
|
+
private getRecentSymbolsForFile;
|
|
30
34
|
}
|
|
31
35
|
export {};
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import { getDb } from "./database.js";
|
|
1
2
|
// Layer 0 (announced-intent) recency window. Resolved threads older than this
|
|
2
3
|
// are excluded — yesterday's resolved work shouldn't trigger today's scoring.
|
|
3
4
|
// Aligned with file-tracker's default conflict window per the audit guidance.
|
|
@@ -11,10 +12,12 @@ export class ImpactScorer {
|
|
|
11
12
|
registry;
|
|
12
13
|
fileTracker;
|
|
13
14
|
consultation;
|
|
14
|
-
|
|
15
|
+
workingFiles;
|
|
16
|
+
constructor(registry, fileTracker, consultation, workingFiles) {
|
|
15
17
|
this.registry = registry;
|
|
16
18
|
this.fileTracker = fileTracker;
|
|
17
19
|
this.consultation = consultation;
|
|
20
|
+
this.workingFiles = workingFiles;
|
|
18
21
|
}
|
|
19
22
|
score(params) {
|
|
20
23
|
const onlineAgents = this.registry
|
|
@@ -42,6 +45,34 @@ export class ImpactScorer {
|
|
|
42
45
|
const fileToAgents = filesToIndex.length > 0
|
|
43
46
|
? this.fileTracker.getFileToAgentsIndex(filesToIndex, params.agent_id, FILE_ACTIVITY_WINDOW_MINUTES)
|
|
44
47
|
: new Map();
|
|
48
|
+
const inFlightToAgents = this.workingFiles
|
|
49
|
+
? this.workingFiles.getIndex(filesToIndex, params.agent_id)
|
|
50
|
+
: new Map();
|
|
51
|
+
// Pre-load symbols_touched for the target_files × online_agents matrix once,
|
|
52
|
+
// keyed by (file_path, agent_id). Avoids N*M DB roundtrips inside the score loop.
|
|
53
|
+
let symbolsByFileAgent = null;
|
|
54
|
+
if (params.target_symbols && params.target_symbols.length > 0 && params.target_files.length > 0) {
|
|
55
|
+
const db = getDb();
|
|
56
|
+
const placeholders = params.target_files.map(() => "?").join(",");
|
|
57
|
+
const rows = db.prepare(`SELECT agent_id, file_path, symbols_touched
|
|
58
|
+
FROM file_activity
|
|
59
|
+
WHERE file_path IN (${placeholders})
|
|
60
|
+
AND symbols_touched IS NOT NULL
|
|
61
|
+
AND id IN (
|
|
62
|
+
SELECT MAX(id) FROM file_activity
|
|
63
|
+
WHERE file_path IN (${placeholders})
|
|
64
|
+
AND symbols_touched IS NOT NULL
|
|
65
|
+
GROUP BY agent_id, file_path
|
|
66
|
+
)`).all(...params.target_files, ...params.target_files);
|
|
67
|
+
symbolsByFileAgent = new Map();
|
|
68
|
+
for (const r of rows) {
|
|
69
|
+
try {
|
|
70
|
+
const arr = JSON.parse(r.symbols_touched);
|
|
71
|
+
symbolsByFileAgent.set(`${r.file_path}|${r.agent_id}`, arr);
|
|
72
|
+
}
|
|
73
|
+
catch { /* malformed JSON: ignore */ }
|
|
74
|
+
}
|
|
75
|
+
}
|
|
45
76
|
// O2: bound the resolved-thread query to a recency window. Without this,
|
|
46
77
|
// listThreads({status:'resolved'}) returns ALL historical resolved threads
|
|
47
78
|
// (unbounded growth). The Layer 0 filter only keeps threads where the
|
|
@@ -102,12 +133,32 @@ export class ImpactScorer {
|
|
|
102
133
|
}
|
|
103
134
|
}
|
|
104
135
|
}
|
|
105
|
-
// Layer 1: Same file recently modified (
|
|
136
|
+
// Layer 1: Same file recently modified (file_activity) OR currently in flight (working_files).
|
|
106
137
|
for (const targetFile of params.target_files) {
|
|
107
|
-
const
|
|
108
|
-
|
|
138
|
+
const recentAgents = fileToAgents.get(targetFile);
|
|
139
|
+
const inFlightAgents = inFlightToAgents.get(targetFile);
|
|
140
|
+
if (recentAgents && recentAgents.has(agent.id)) {
|
|
109
141
|
maxScore = Math.max(maxScore, 100);
|
|
110
|
-
|
|
142
|
+
let annotated = false;
|
|
143
|
+
if (params.target_symbols && params.target_symbols.length > 0) {
|
|
144
|
+
const theirSymbols = symbolsByFileAgent?.get(`${targetFile}|${agent.id}`) || null;
|
|
145
|
+
if (theirSymbols && theirSymbols.length > 0) {
|
|
146
|
+
const mine = new Set(params.target_symbols);
|
|
147
|
+
const theirs = new Set(theirSymbols);
|
|
148
|
+
const overlap = [...mine].some(s => theirs.has(s));
|
|
149
|
+
if (!overlap) {
|
|
150
|
+
reasons.push(`same file: ${targetFile}; disjoint symbols: you=[${[...mine].join(",")}], them=[${[...theirs].join(",")}] — verify shared module state`);
|
|
151
|
+
annotated = true;
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
if (!annotated) {
|
|
156
|
+
reasons.push(`same file (recent): ${targetFile}`);
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
if (inFlightAgents && inFlightAgents.has(agent.id)) {
|
|
160
|
+
maxScore = Math.max(maxScore, 100);
|
|
161
|
+
reasons.push(`same file (in flight): ${targetFile}`);
|
|
111
162
|
}
|
|
112
163
|
}
|
|
113
164
|
// Layer 2: Depends-on file recently modified (score 80)
|
|
@@ -126,9 +177,34 @@ export class ImpactScorer {
|
|
|
126
177
|
maxScore = Math.max(maxScore, 30);
|
|
127
178
|
reasons.push(`module overlap: ${overlapping.join(", ")}`);
|
|
128
179
|
}
|
|
129
|
-
// Layer 4
|
|
130
|
-
//
|
|
131
|
-
//
|
|
180
|
+
// Layer 4: git co-change. For each target_file F, find rows in git_cochange where
|
|
181
|
+
// (LEAST(F,partner), GREATEST(F,partner)) match. If the OTHER agent recently
|
|
182
|
+
// touched the partner file, apply the co-change score.
|
|
183
|
+
const db = getDb();
|
|
184
|
+
for (const targetFile of params.target_files) {
|
|
185
|
+
const rows = db.prepare(`SELECT file_a, file_b, count, total_commits FROM git_cochange
|
|
186
|
+
WHERE file_a = ? OR file_b = ?`).all(targetFile, targetFile);
|
|
187
|
+
for (const r of rows) {
|
|
188
|
+
const partner = r.file_a === targetFile ? r.file_b : r.file_a;
|
|
189
|
+
const ratio = r.count / Math.max(r.total_commits, 1);
|
|
190
|
+
let layer4Score = 0;
|
|
191
|
+
if (ratio > 0.5)
|
|
192
|
+
layer4Score = 60;
|
|
193
|
+
else if (ratio > 0.2)
|
|
194
|
+
layer4Score = 40;
|
|
195
|
+
if (layer4Score === 0)
|
|
196
|
+
continue;
|
|
197
|
+
// Did the OTHER agent touch the partner file recently?
|
|
198
|
+
const partnerActivity = db.prepare(`SELECT 1 FROM file_activity
|
|
199
|
+
WHERE file_path = ? AND agent_id = ?
|
|
200
|
+
AND created_at > datetime('now', '-60 minutes')
|
|
201
|
+
LIMIT 1`).get(partner, agent.id);
|
|
202
|
+
if (partnerActivity) {
|
|
203
|
+
maxScore = Math.max(maxScore, layer4Score);
|
|
204
|
+
reasons.push(`co-change: ${targetFile} ↔ ${partner} (ratio ${ratio.toFixed(2)})`);
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
}
|
|
132
208
|
return {
|
|
133
209
|
agent_id: agent.id,
|
|
134
210
|
agent_name: agent.name,
|
|
@@ -146,4 +222,18 @@ export class ImpactScorer {
|
|
|
146
222
|
pass: scores.filter((s) => s.score < 30),
|
|
147
223
|
};
|
|
148
224
|
}
|
|
225
|
+
getRecentSymbolsForFile(filePath, agentId) {
|
|
226
|
+
const db = getDb();
|
|
227
|
+
const row = db.prepare(`SELECT symbols_touched FROM file_activity
|
|
228
|
+
WHERE agent_id = ? AND file_path = ? AND symbols_touched IS NOT NULL
|
|
229
|
+
ORDER BY id DESC LIMIT 1`).get(agentId, filePath);
|
|
230
|
+
if (!row || !row.symbols_touched)
|
|
231
|
+
return null;
|
|
232
|
+
try {
|
|
233
|
+
return JSON.parse(row.symbols_touched);
|
|
234
|
+
}
|
|
235
|
+
catch {
|
|
236
|
+
return null;
|
|
237
|
+
}
|
|
238
|
+
}
|
|
149
239
|
}
|
|
@@ -4,7 +4,7 @@ export class IntrospectionManager {
|
|
|
4
4
|
create(params) {
|
|
5
5
|
const db = getDb();
|
|
6
6
|
const id = randomUUID();
|
|
7
|
-
db.prepare(`INSERT INTO introspections (id, thread_id, agent_id, score, reasons)
|
|
7
|
+
db.prepare(`INSERT INTO introspections (id, thread_id, agent_id, score, reasons)
|
|
8
8
|
VALUES (?, ?, ?, ?, ?)`).run(id, params.thread_id, params.agent_id, params.score, JSON.stringify(params.reasons));
|
|
9
9
|
return this.get(id);
|
|
10
10
|
}
|