@coderule/mcp 2.0.3 → 2.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli.cjs +176 -48
- package/dist/cli.cjs.map +1 -1
- package/dist/cli.js +176 -48
- package/dist/cli.js.map +1 -1
- package/dist/index.cjs +176 -48
- package/dist/index.cjs.map +1 -1
- package/dist/index.js +176 -48
- package/dist/index.js.map +1 -1
- package/dist/mcp-cli.cjs +281 -119
- package/dist/mcp-cli.cjs.map +1 -1
- package/dist/mcp-cli.js +281 -119
- package/dist/mcp-cli.js.map +1 -1
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -524,7 +524,10 @@ var FilesRepo = class {
|
|
|
524
524
|
`UPDATE files
|
|
525
525
|
SET content_sha256 = ?, service_file_hash = ?, hash_state = 'clean',
|
|
526
526
|
hash_owner = NULL, hash_lease_expires_at = NULL, hash_started_at = NULL
|
|
527
|
-
WHERE id =
|
|
527
|
+
WHERE id = ?
|
|
528
|
+
AND hash_state = 'hashing'
|
|
529
|
+
AND hash_owner = ?
|
|
530
|
+
AND hash_started_at = ?`
|
|
528
531
|
);
|
|
529
532
|
this.requeueExpiredHashingStmt = this.db.prepare(
|
|
530
533
|
`UPDATE files
|
|
@@ -655,13 +658,30 @@ var FilesRepo = class {
|
|
|
655
658
|
tx(ids);
|
|
656
659
|
}
|
|
657
660
|
applyHashResults(results) {
|
|
658
|
-
if (!results.length) return;
|
|
661
|
+
if (!results.length) return 0;
|
|
662
|
+
let applied = 0;
|
|
659
663
|
const tx = this.db.transaction((batch) => {
|
|
660
|
-
for (const {
|
|
661
|
-
|
|
664
|
+
for (const {
|
|
665
|
+
id,
|
|
666
|
+
contentSha256,
|
|
667
|
+
serviceFileHash,
|
|
668
|
+
hashOwner,
|
|
669
|
+
hashStartedAt
|
|
670
|
+
} of batch) {
|
|
671
|
+
const result = this.applyHashesStmt.run(
|
|
672
|
+
contentSha256,
|
|
673
|
+
serviceFileHash,
|
|
674
|
+
id,
|
|
675
|
+
hashOwner,
|
|
676
|
+
hashStartedAt
|
|
677
|
+
);
|
|
678
|
+
if ((result.changes ?? 0) > 0) {
|
|
679
|
+
applied += 1;
|
|
680
|
+
}
|
|
662
681
|
}
|
|
663
682
|
});
|
|
664
683
|
tx(results);
|
|
684
|
+
return applied;
|
|
665
685
|
}
|
|
666
686
|
getCleanFilesForSnapshot() {
|
|
667
687
|
return this.selectCleanSnapshotStmt.all();
|
|
@@ -711,7 +731,7 @@ var Outbox = class {
|
|
|
711
731
|
this.log = logger2.child({ scope: "outbox" });
|
|
712
732
|
this.queue = new Qulite(db, {
|
|
713
733
|
logger: this.log,
|
|
714
|
-
defaultLeaseMs:
|
|
734
|
+
defaultLeaseMs: 6e5,
|
|
715
735
|
defaultMaxAttempts: 10
|
|
716
736
|
});
|
|
717
737
|
this.markKindStmt = db.prepare(
|
|
@@ -769,7 +789,7 @@ var Outbox = class {
|
|
|
769
789
|
this.log.debug({ rootId }, "Enqueued heartbeat job");
|
|
770
790
|
}
|
|
771
791
|
}
|
|
772
|
-
claimFsControlJob(leaseOwner, leaseMs =
|
|
792
|
+
claimFsControlJob(leaseOwner, leaseMs = 6e5) {
|
|
773
793
|
return this.queue.claimNext({ type: "fs_control", leaseOwner, leaseMs });
|
|
774
794
|
}
|
|
775
795
|
ack(jobId, leaseOwner) {
|
|
@@ -1085,7 +1105,9 @@ var Hasher = class {
|
|
|
1085
1105
|
successes.push({
|
|
1086
1106
|
id: record.id,
|
|
1087
1107
|
contentSha256: result.contentSha256,
|
|
1088
|
-
serviceFileHash: result.serviceFileHash
|
|
1108
|
+
serviceFileHash: result.serviceFileHash,
|
|
1109
|
+
hashOwner: record.hash_owner ?? this.ownerId,
|
|
1110
|
+
hashStartedAt: record.hash_started_at ?? 0
|
|
1089
1111
|
});
|
|
1090
1112
|
} catch (error) {
|
|
1091
1113
|
if (error?.code === "ENOENT") {
|
|
@@ -1244,6 +1266,42 @@ async function runInventory(opts) {
|
|
|
1244
1266
|
async function sleep(ms) {
|
|
1245
1267
|
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
1246
1268
|
}
|
|
1269
|
+
var SnapshotStaleError = class extends Error {
|
|
1270
|
+
constructor(message = "snapshot became stale") {
|
|
1271
|
+
super(message);
|
|
1272
|
+
this.name = "SnapshotStaleError";
|
|
1273
|
+
}
|
|
1274
|
+
};
|
|
1275
|
+
function assertNotStale(opts) {
|
|
1276
|
+
if (opts?.isStale?.()) {
|
|
1277
|
+
throw new SnapshotStaleError();
|
|
1278
|
+
}
|
|
1279
|
+
}
|
|
1280
|
+
function assertBeforeDeadline(opts) {
|
|
1281
|
+
if (opts?.deadlineMs !== void 0 && Date.now() >= opts.deadlineMs) {
|
|
1282
|
+
throw new Error("snapshot publish deadline exceeded");
|
|
1283
|
+
}
|
|
1284
|
+
}
|
|
1285
|
+
function assertValid(opts) {
|
|
1286
|
+
assertNotStale(opts);
|
|
1287
|
+
assertBeforeDeadline(opts);
|
|
1288
|
+
}
|
|
1289
|
+
function withDeadline(promise, opts) {
|
|
1290
|
+
if (opts?.deadlineMs === void 0) return promise;
|
|
1291
|
+
const remaining = opts.deadlineMs - Date.now();
|
|
1292
|
+
if (remaining <= 0) {
|
|
1293
|
+
return Promise.reject(new Error("snapshot publish deadline exceeded"));
|
|
1294
|
+
}
|
|
1295
|
+
return Promise.race([
|
|
1296
|
+
promise,
|
|
1297
|
+
new Promise(
|
|
1298
|
+
(_, reject) => setTimeout(
|
|
1299
|
+
() => reject(new Error("snapshot publish deadline exceeded")),
|
|
1300
|
+
remaining
|
|
1301
|
+
)
|
|
1302
|
+
)
|
|
1303
|
+
]);
|
|
1304
|
+
}
|
|
1247
1305
|
function computeSnapshot(filesRepo) {
|
|
1248
1306
|
const files = filesRepo.getCleanFilesForSnapshot();
|
|
1249
1307
|
const hashes = files.map((file) => file.service_file_hash).filter((hash) => typeof hash === "string");
|
|
@@ -1259,12 +1317,17 @@ function computeSnapshot(filesRepo) {
|
|
|
1259
1317
|
totalSize
|
|
1260
1318
|
};
|
|
1261
1319
|
}
|
|
1262
|
-
async function withRetries(op, logger2, context, maxAttempts) {
|
|
1320
|
+
async function withRetries(op, logger2, context, maxAttempts, opts) {
|
|
1263
1321
|
let attempt = 0;
|
|
1264
1322
|
while (true) {
|
|
1323
|
+
assertValid(opts);
|
|
1265
1324
|
try {
|
|
1266
|
-
return await op();
|
|
1325
|
+
return await withDeadline(op(), opts);
|
|
1267
1326
|
} catch (err) {
|
|
1327
|
+
if (err instanceof SnapshotStaleError) throw err;
|
|
1328
|
+
if (typeof err?.message === "string" && err.message.includes("deadline exceeded")) {
|
|
1329
|
+
throw err;
|
|
1330
|
+
}
|
|
1268
1331
|
attempt += 1;
|
|
1269
1332
|
if (attempt >= maxAttempts) {
|
|
1270
1333
|
logger2.error(
|
|
@@ -1282,7 +1345,14 @@ async function withRetries(op, logger2, context, maxAttempts) {
|
|
|
1282
1345
|
}
|
|
1283
1346
|
}
|
|
1284
1347
|
}
|
|
1285
|
-
|
|
1348
|
+
function computeServiceFileHash(relPath, content) {
|
|
1349
|
+
const hash = createHash("sha256");
|
|
1350
|
+
hash.update(relPath);
|
|
1351
|
+
hash.update("\n");
|
|
1352
|
+
hash.update(content);
|
|
1353
|
+
return hash.digest("hex");
|
|
1354
|
+
}
|
|
1355
|
+
async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts, chunkSize = 1, opts) {
|
|
1286
1356
|
if (!missing || missing.length === 0) return;
|
|
1287
1357
|
const total = missing.length;
|
|
1288
1358
|
const chunks = [];
|
|
@@ -1290,17 +1360,36 @@ async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts
|
|
|
1290
1360
|
chunks.push(missing.slice(i, i + chunkSize));
|
|
1291
1361
|
}
|
|
1292
1362
|
for (let idx = 0; idx < chunks.length; idx++) {
|
|
1363
|
+
assertValid(opts);
|
|
1293
1364
|
const list = chunks[idx];
|
|
1294
1365
|
const map = /* @__PURE__ */ new Map();
|
|
1295
1366
|
for (const missingFile of list) {
|
|
1296
1367
|
const absPath = path2.join(rootPath, missingFile.file_path);
|
|
1297
1368
|
try {
|
|
1298
1369
|
const buffer = await fs5.readFile(absPath);
|
|
1370
|
+
const serviceHash = computeServiceFileHash(
|
|
1371
|
+
missingFile.file_path,
|
|
1372
|
+
buffer
|
|
1373
|
+
);
|
|
1374
|
+
if (serviceHash !== missingFile.file_hash) {
|
|
1375
|
+
logger2.warn(
|
|
1376
|
+
{
|
|
1377
|
+
relPath: missingFile.file_path,
|
|
1378
|
+
expected: missingFile.file_hash,
|
|
1379
|
+
got: serviceHash
|
|
1380
|
+
},
|
|
1381
|
+
"File content changed during upload; snapshot is stale"
|
|
1382
|
+
);
|
|
1383
|
+
throw new SnapshotStaleError(
|
|
1384
|
+
`file hash mismatch for ${missingFile.file_path}`
|
|
1385
|
+
);
|
|
1386
|
+
}
|
|
1299
1387
|
map.set(missingFile.file_hash, {
|
|
1300
1388
|
path: missingFile.file_path,
|
|
1301
1389
|
content: buffer
|
|
1302
1390
|
});
|
|
1303
1391
|
} catch (error) {
|
|
1392
|
+
if (error instanceof SnapshotStaleError) throw error;
|
|
1304
1393
|
logger2.warn(
|
|
1305
1394
|
{ err: error, relPath: missingFile.file_path },
|
|
1306
1395
|
"Failed to read missing file content"
|
|
@@ -1308,6 +1397,7 @@ async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts
|
|
|
1308
1397
|
}
|
|
1309
1398
|
}
|
|
1310
1399
|
if (map.size === 0) continue;
|
|
1400
|
+
assertValid(opts);
|
|
1311
1401
|
await withRetries(
|
|
1312
1402
|
() => syncClient.uploadFileContent(map),
|
|
1313
1403
|
logger2,
|
|
@@ -1317,65 +1407,76 @@ async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts
|
|
|
1317
1407
|
chunks: chunks.length,
|
|
1318
1408
|
files: map.size
|
|
1319
1409
|
},
|
|
1320
|
-
maxAttempts
|
|
1410
|
+
maxAttempts,
|
|
1411
|
+
opts
|
|
1321
1412
|
);
|
|
1322
1413
|
}
|
|
1323
1414
|
}
|
|
1324
|
-
async function ensureSnapshotCreated(rootPath, computation, syncClient, logger2,
|
|
1415
|
+
async function ensureSnapshotCreated(rootPath, computation, syncClient, logger2, opts) {
|
|
1325
1416
|
const { snapshotHash, files } = computation;
|
|
1326
|
-
const maxAttempts =
|
|
1327
|
-
const uploadChunkSize =
|
|
1417
|
+
const maxAttempts = opts?.maxAttempts ?? 5;
|
|
1418
|
+
const uploadChunkSize = opts?.uploadChunkSize ?? 1;
|
|
1419
|
+
assertValid(opts);
|
|
1328
1420
|
let status = await withRetries(
|
|
1329
1421
|
() => syncClient.checkSnapshotStatus(snapshotHash),
|
|
1330
1422
|
logger2,
|
|
1331
1423
|
{ op: "checkSnapshotStatus", snapshotHash },
|
|
1332
|
-
maxAttempts
|
|
1424
|
+
maxAttempts,
|
|
1425
|
+
opts
|
|
1333
1426
|
);
|
|
1334
1427
|
if (status.status === "READY") {
|
|
1335
1428
|
logger2.info({ snapshotHash }, "Snapshot already READY");
|
|
1336
1429
|
return;
|
|
1337
1430
|
}
|
|
1338
|
-
if (status.status === "NOT_FOUND" || status.status === "MISSING_CONTENT") {
|
|
1339
|
-
status = await withRetries(
|
|
1340
|
-
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1341
|
-
logger2,
|
|
1342
|
-
{ op: "createSnapshot", snapshotHash },
|
|
1343
|
-
maxAttempts
|
|
1344
|
-
);
|
|
1345
|
-
}
|
|
1346
|
-
if (status.status === "MISSING_CONTENT" && status.missing_files?.length) {
|
|
1347
|
-
logger2.info(
|
|
1348
|
-
{ missing: status.missing_files.length },
|
|
1349
|
-
"Uploading missing file content"
|
|
1350
|
-
);
|
|
1351
|
-
await uploadMissing(
|
|
1352
|
-
rootPath,
|
|
1353
|
-
status.missing_files,
|
|
1354
|
-
syncClient,
|
|
1355
|
-
logger2,
|
|
1356
|
-
maxAttempts,
|
|
1357
|
-
uploadChunkSize
|
|
1358
|
-
);
|
|
1359
|
-
status = await withRetries(
|
|
1360
|
-
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1361
|
-
logger2,
|
|
1362
|
-
{ op: "createSnapshot", snapshotHash },
|
|
1363
|
-
maxAttempts
|
|
1364
|
-
);
|
|
1365
|
-
}
|
|
1366
1431
|
let attempt = 0;
|
|
1367
1432
|
while (status.status !== "READY") {
|
|
1433
|
+
assertValid(opts);
|
|
1368
1434
|
if (status.status === "FAILED") {
|
|
1369
1435
|
throw new Error(`Snapshot failed processing: ${JSON.stringify(status)}`);
|
|
1370
1436
|
}
|
|
1437
|
+
if (status.status === "NOT_FOUND" || status.status === "MISSING_CONTENT") {
|
|
1438
|
+
status = await withRetries(
|
|
1439
|
+
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1440
|
+
logger2,
|
|
1441
|
+
{ op: "createSnapshot", snapshotHash },
|
|
1442
|
+
maxAttempts,
|
|
1443
|
+
opts
|
|
1444
|
+
);
|
|
1445
|
+
}
|
|
1446
|
+
if (status.status === "MISSING_CONTENT" && status.missing_files?.length) {
|
|
1447
|
+
logger2.info(
|
|
1448
|
+
{ missing: status.missing_files.length },
|
|
1449
|
+
"Uploading missing file content"
|
|
1450
|
+
);
|
|
1451
|
+
await uploadMissing(
|
|
1452
|
+
rootPath,
|
|
1453
|
+
status.missing_files,
|
|
1454
|
+
syncClient,
|
|
1455
|
+
logger2,
|
|
1456
|
+
maxAttempts,
|
|
1457
|
+
uploadChunkSize,
|
|
1458
|
+
opts
|
|
1459
|
+
);
|
|
1460
|
+
status = await withRetries(
|
|
1461
|
+
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1462
|
+
logger2,
|
|
1463
|
+
{ op: "createSnapshot", snapshotHash },
|
|
1464
|
+
maxAttempts,
|
|
1465
|
+
opts
|
|
1466
|
+
);
|
|
1467
|
+
continue;
|
|
1468
|
+
}
|
|
1469
|
+
if (status.status === "READY") break;
|
|
1371
1470
|
const delay = Math.min(5e3, 1e3 * Math.max(1, 2 ** attempt));
|
|
1372
1471
|
await sleep(delay);
|
|
1373
1472
|
attempt += 1;
|
|
1473
|
+
assertValid(opts);
|
|
1374
1474
|
status = await withRetries(
|
|
1375
1475
|
() => syncClient.checkSnapshotStatus(snapshotHash),
|
|
1376
1476
|
logger2,
|
|
1377
1477
|
{ op: "checkSnapshotStatus", snapshotHash },
|
|
1378
|
-
maxAttempts
|
|
1478
|
+
maxAttempts,
|
|
1479
|
+
opts
|
|
1379
1480
|
);
|
|
1380
1481
|
}
|
|
1381
1482
|
logger2.info({ snapshotHash }, "Snapshot READY");
|
|
@@ -1542,12 +1643,14 @@ async function sendHeartbeat(rootId, snapshotsRepo, syncClient, logger2) {
|
|
|
1542
1643
|
// src/service/State.ts
|
|
1543
1644
|
var ServiceState = class {
|
|
1544
1645
|
constructor() {
|
|
1646
|
+
this.changeSeq = 0;
|
|
1545
1647
|
this.lastChangeAt = Date.now();
|
|
1546
1648
|
this.lastSnapshotReadyAt = Date.now();
|
|
1547
1649
|
this.lastHeartbeatEnqueuedAt = 0;
|
|
1548
1650
|
}
|
|
1549
1651
|
updateChange(timestamp = Date.now()) {
|
|
1550
1652
|
this.lastChangeAt = timestamp;
|
|
1653
|
+
this.changeSeq += 1;
|
|
1551
1654
|
}
|
|
1552
1655
|
updateSnapshotReady(timestamp = Date.now()) {
|
|
1553
1656
|
this.lastSnapshotReadyAt = timestamp;
|
|
@@ -1573,6 +1676,7 @@ function computeBackoff(attempts) {
|
|
|
1573
1676
|
const delay = 1e3 * 2 ** base;
|
|
1574
1677
|
return Math.min(delay, 6e4);
|
|
1575
1678
|
}
|
|
1679
|
+
var PUBLISH_MAX_MS = 5 * 6e4;
|
|
1576
1680
|
async function readSymlinkTarget2(absPath) {
|
|
1577
1681
|
try {
|
|
1578
1682
|
return await fs5.readlink(absPath);
|
|
@@ -1851,9 +1955,10 @@ var ServiceRunner = class {
|
|
|
1851
1955
|
if (this.runtime.filesRepo.countByState("dirty") > 0 || this.runtime.filesRepo.countByState("hashing") > 0) {
|
|
1852
1956
|
const delay = computeBackoff(job.attempts);
|
|
1853
1957
|
this.runtime.outbox.retry(job.id, this.fsControlLeaseOwner, delay);
|
|
1854
|
-
await sleep2(200);
|
|
1855
1958
|
return;
|
|
1856
1959
|
}
|
|
1960
|
+
const publishSeq = this.state.changeSeq;
|
|
1961
|
+
const isStale = () => this.state.changeSeq !== publishSeq || this.runtime.filesRepo.countByState("dirty") > 0 || this.runtime.filesRepo.countByState("hashing") > 0;
|
|
1857
1962
|
try {
|
|
1858
1963
|
const result = await publishSnapshot(
|
|
1859
1964
|
this.runtime.config.rootPath,
|
|
@@ -1863,17 +1968,40 @@ var ServiceRunner = class {
|
|
|
1863
1968
|
log,
|
|
1864
1969
|
{
|
|
1865
1970
|
maxAttempts: this.runtime.config.maxSnapshotAttempts,
|
|
1866
|
-
uploadChunkSize: this.runtime.config.uploadChunkSize
|
|
1971
|
+
uploadChunkSize: this.runtime.config.uploadChunkSize,
|
|
1972
|
+
deadlineMs: Date.now() + PUBLISH_MAX_MS,
|
|
1973
|
+
isStale
|
|
1867
1974
|
}
|
|
1868
1975
|
);
|
|
1869
|
-
this.runtime.outbox.ack(job.id, this.fsControlLeaseOwner);
|
|
1976
|
+
const acked = this.runtime.outbox.ack(job.id, this.fsControlLeaseOwner);
|
|
1977
|
+
if (!acked) {
|
|
1978
|
+
log.warn(
|
|
1979
|
+
{ jobId: job.id },
|
|
1980
|
+
"Snapshot job ack failed (lease may have expired)"
|
|
1981
|
+
);
|
|
1982
|
+
}
|
|
1870
1983
|
this.state.updateSnapshotReady(result.createdAt);
|
|
1871
1984
|
log.info({ snapshotHash: result.snapshotHash }, "Snapshot job completed");
|
|
1985
|
+
if (this.runtime.filesRepo.countByState("dirty") > 0 || this.runtime.filesRepo.countByState("hashing") > 0) {
|
|
1986
|
+
this.runtime.outbox.enqueueSnapshot(this.runtime.config.rootId, 0);
|
|
1987
|
+
return;
|
|
1988
|
+
}
|
|
1989
|
+
const nowHash = computeSnapshot(this.runtime.filesRepo).snapshotHash;
|
|
1990
|
+
if (nowHash !== result.snapshotHash) {
|
|
1991
|
+
this.runtime.outbox.enqueueSnapshot(this.runtime.config.rootId, 0);
|
|
1992
|
+
}
|
|
1872
1993
|
} catch (error) {
|
|
1994
|
+
if (error instanceof SnapshotStaleError) {
|
|
1995
|
+
log.debug(
|
|
1996
|
+
{ err: error },
|
|
1997
|
+
"Snapshot publish aborted (stale); quick retry"
|
|
1998
|
+
);
|
|
1999
|
+
this.runtime.outbox.retry(job.id, this.fsControlLeaseOwner, 250);
|
|
2000
|
+
return;
|
|
2001
|
+
}
|
|
1873
2002
|
log.warn({ err: error }, "Snapshot job failed");
|
|
1874
2003
|
const delay = computeBackoff(job.attempts);
|
|
1875
2004
|
this.runtime.outbox.retry(job.id, this.fsControlLeaseOwner, delay);
|
|
1876
|
-
await sleep2(delay);
|
|
1877
2005
|
}
|
|
1878
2006
|
}
|
|
1879
2007
|
async handleHeartbeatJob(job, log) {
|