@coderule/mcp 1.2.0 → 1.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli.cjs +136 -35
- package/dist/cli.cjs.map +1 -1
- package/dist/cli.d.cts +1 -2
- package/dist/cli.d.ts +1 -2
- package/dist/cli.js +137 -37
- package/dist/cli.js.map +1 -1
- package/dist/index.cjs +135 -35
- package/dist/index.cjs.map +1 -1
- package/dist/index.js +136 -36
- package/dist/index.js.map +1 -1
- package/dist/mcp-cli.cjs +136 -35
- package/dist/mcp-cli.cjs.map +1 -1
- package/dist/mcp-cli.d.cts +1 -2
- package/dist/mcp-cli.d.ts +1 -2
- package/dist/mcp-cli.js +137 -37
- package/dist/mcp-cli.js.map +1 -1
- package/package.json +1 -1
package/dist/mcp-cli.cjs
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
1
2
|
'use strict';
|
|
2
3
|
|
|
3
4
|
var fs4 = require('fs/promises');
|
|
@@ -46,7 +47,7 @@ var DEFAULT_HEARTBEAT_CHECK_INTERVAL_MS = 5e3;
|
|
|
46
47
|
var DEFAULT_QUEUE_POLL_INTERVAL_MS = 500;
|
|
47
48
|
var DEFAULT_HASH_BATCH_SIZE = 32;
|
|
48
49
|
var DEFAULT_MAX_SNAPSHOT_ATTEMPTS = 5;
|
|
49
|
-
var DEFAULT_HTTP_TIMEOUT_MS =
|
|
50
|
+
var DEFAULT_HTTP_TIMEOUT_MS = 12e4;
|
|
50
51
|
|
|
51
52
|
// src/config/Configurator.ts
|
|
52
53
|
var DEFAULT_RETRIEVAL_FORMATTER = "standard";
|
|
@@ -551,6 +552,16 @@ var Outbox = class {
|
|
|
551
552
|
if (purged > 0) {
|
|
552
553
|
this.log.warn({ purged }, "Purged legacy fs_control jobs without kind");
|
|
553
554
|
}
|
|
555
|
+
try {
|
|
556
|
+
const counts = {
|
|
557
|
+
pending: this.queue.countByStatus(qulite.JobStatus.Pending),
|
|
558
|
+
processing: this.queue.countByStatus(qulite.JobStatus.Processing),
|
|
559
|
+
done: this.queue.countByStatus(qulite.JobStatus.Done),
|
|
560
|
+
failed: this.queue.countByStatus(qulite.JobStatus.Failed)
|
|
561
|
+
};
|
|
562
|
+
this.log.debug({ counts }, "Outbox initialized");
|
|
563
|
+
} catch {
|
|
564
|
+
}
|
|
554
565
|
}
|
|
555
566
|
getQueue() {
|
|
556
567
|
return this.queue;
|
|
@@ -1062,44 +1073,109 @@ function computeSnapshot(filesRepo) {
|
|
|
1062
1073
|
totalSize
|
|
1063
1074
|
};
|
|
1064
1075
|
}
|
|
1065
|
-
async function
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
for (const missingFile of missing) {
|
|
1069
|
-
const absPath = path__default.default.join(rootPath, missingFile.file_path);
|
|
1076
|
+
async function withRetries(op, logger2, context, maxAttempts) {
|
|
1077
|
+
let attempt = 0;
|
|
1078
|
+
while (true) {
|
|
1070
1079
|
try {
|
|
1071
|
-
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
|
|
1076
|
-
|
|
1080
|
+
return await op();
|
|
1081
|
+
} catch (err) {
|
|
1082
|
+
attempt += 1;
|
|
1083
|
+
if (attempt >= maxAttempts) {
|
|
1084
|
+
logger2.error(
|
|
1085
|
+
{ err, ...context, attempt },
|
|
1086
|
+
"Operation failed after retries"
|
|
1087
|
+
);
|
|
1088
|
+
throw err;
|
|
1089
|
+
}
|
|
1090
|
+
const delay = Math.min(15e3, 1e3 * 2 ** (attempt - 1));
|
|
1077
1091
|
logger2.warn(
|
|
1078
|
-
{ err
|
|
1079
|
-
"
|
|
1092
|
+
{ err, ...context, attempt, delay },
|
|
1093
|
+
"Operation failed; retrying"
|
|
1080
1094
|
);
|
|
1095
|
+
await sleep(delay);
|
|
1081
1096
|
}
|
|
1082
1097
|
}
|
|
1083
|
-
if (map.size === 0) return;
|
|
1084
|
-
await syncClient.uploadFileContent(map);
|
|
1085
1098
|
}
|
|
1086
|
-
async function
|
|
1099
|
+
async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts, chunkSize = 64) {
|
|
1100
|
+
if (!missing || missing.length === 0) return;
|
|
1101
|
+
const total = missing.length;
|
|
1102
|
+
const chunks = [];
|
|
1103
|
+
for (let i = 0; i < total; i += chunkSize) {
|
|
1104
|
+
chunks.push(missing.slice(i, i + chunkSize));
|
|
1105
|
+
}
|
|
1106
|
+
for (let idx = 0; idx < chunks.length; idx++) {
|
|
1107
|
+
const list = chunks[idx];
|
|
1108
|
+
const map = /* @__PURE__ */ new Map();
|
|
1109
|
+
for (const missingFile of list) {
|
|
1110
|
+
const absPath = path__default.default.join(rootPath, missingFile.file_path);
|
|
1111
|
+
try {
|
|
1112
|
+
const buffer = await fs4__default.default.readFile(absPath);
|
|
1113
|
+
map.set(missingFile.file_hash, {
|
|
1114
|
+
path: missingFile.file_path,
|
|
1115
|
+
content: buffer
|
|
1116
|
+
});
|
|
1117
|
+
} catch (error) {
|
|
1118
|
+
logger2.warn(
|
|
1119
|
+
{ err: error, relPath: missingFile.file_path },
|
|
1120
|
+
"Failed to read missing file content"
|
|
1121
|
+
);
|
|
1122
|
+
}
|
|
1123
|
+
}
|
|
1124
|
+
if (map.size === 0) continue;
|
|
1125
|
+
await withRetries(
|
|
1126
|
+
() => syncClient.uploadFileContent(map),
|
|
1127
|
+
logger2,
|
|
1128
|
+
{
|
|
1129
|
+
op: "uploadFileContent",
|
|
1130
|
+
chunkIndex: idx + 1,
|
|
1131
|
+
chunks: chunks.length,
|
|
1132
|
+
files: map.size
|
|
1133
|
+
},
|
|
1134
|
+
maxAttempts
|
|
1135
|
+
);
|
|
1136
|
+
}
|
|
1137
|
+
}
|
|
1138
|
+
async function ensureSnapshotCreated(rootPath, computation, syncClient, logger2, options) {
|
|
1087
1139
|
const { snapshotHash, files } = computation;
|
|
1088
|
-
|
|
1140
|
+
const maxAttempts = options?.maxAttempts ?? 5;
|
|
1141
|
+
const uploadChunkSize = options?.uploadChunkSize ?? 64;
|
|
1142
|
+
let status = await withRetries(
|
|
1143
|
+
() => syncClient.checkSnapshotStatus(snapshotHash),
|
|
1144
|
+
logger2,
|
|
1145
|
+
{ op: "checkSnapshotStatus", snapshotHash },
|
|
1146
|
+
maxAttempts
|
|
1147
|
+
);
|
|
1089
1148
|
if (status.status === "READY") {
|
|
1090
1149
|
logger2.info({ snapshotHash }, "Snapshot already READY");
|
|
1091
1150
|
return;
|
|
1092
1151
|
}
|
|
1093
1152
|
if (status.status === "NOT_FOUND" || status.status === "MISSING_CONTENT") {
|
|
1094
|
-
status = await
|
|
1153
|
+
status = await withRetries(
|
|
1154
|
+
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1155
|
+
logger2,
|
|
1156
|
+
{ op: "createSnapshot", snapshotHash },
|
|
1157
|
+
maxAttempts
|
|
1158
|
+
);
|
|
1095
1159
|
}
|
|
1096
1160
|
if (status.status === "MISSING_CONTENT" && status.missing_files?.length) {
|
|
1097
1161
|
logger2.info(
|
|
1098
1162
|
{ missing: status.missing_files.length },
|
|
1099
1163
|
"Uploading missing file content"
|
|
1100
1164
|
);
|
|
1101
|
-
await uploadMissing(
|
|
1102
|
-
|
|
1165
|
+
await uploadMissing(
|
|
1166
|
+
rootPath,
|
|
1167
|
+
status.missing_files,
|
|
1168
|
+
syncClient,
|
|
1169
|
+
logger2,
|
|
1170
|
+
maxAttempts,
|
|
1171
|
+
uploadChunkSize
|
|
1172
|
+
);
|
|
1173
|
+
status = await withRetries(
|
|
1174
|
+
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1175
|
+
logger2,
|
|
1176
|
+
{ op: "createSnapshot", snapshotHash },
|
|
1177
|
+
maxAttempts
|
|
1178
|
+
);
|
|
1103
1179
|
}
|
|
1104
1180
|
let attempt = 0;
|
|
1105
1181
|
while (status.status !== "READY") {
|
|
@@ -1109,13 +1185,24 @@ async function ensureSnapshotCreated(rootPath, computation, syncClient, logger2)
|
|
|
1109
1185
|
const delay = Math.min(5e3, 1e3 * Math.max(1, 2 ** attempt));
|
|
1110
1186
|
await sleep(delay);
|
|
1111
1187
|
attempt += 1;
|
|
1112
|
-
status = await
|
|
1188
|
+
status = await withRetries(
|
|
1189
|
+
() => syncClient.checkSnapshotStatus(snapshotHash),
|
|
1190
|
+
logger2,
|
|
1191
|
+
{ op: "checkSnapshotStatus", snapshotHash },
|
|
1192
|
+
maxAttempts
|
|
1193
|
+
);
|
|
1113
1194
|
}
|
|
1114
1195
|
logger2.info({ snapshotHash }, "Snapshot READY");
|
|
1115
1196
|
}
|
|
1116
|
-
async function publishSnapshot(rootPath, filesRepo, snapshotsRepo, syncClient, logger2) {
|
|
1197
|
+
async function publishSnapshot(rootPath, filesRepo, snapshotsRepo, syncClient, logger2, options) {
|
|
1117
1198
|
const computation = computeSnapshot(filesRepo);
|
|
1118
|
-
await ensureSnapshotCreated(
|
|
1199
|
+
await ensureSnapshotCreated(
|
|
1200
|
+
rootPath,
|
|
1201
|
+
computation,
|
|
1202
|
+
syncClient,
|
|
1203
|
+
logger2,
|
|
1204
|
+
options
|
|
1205
|
+
);
|
|
1119
1206
|
const createdAt = Date.now();
|
|
1120
1207
|
snapshotsRepo.insert(
|
|
1121
1208
|
computation.snapshotHash,
|
|
@@ -1155,7 +1242,8 @@ async function runInitialSyncPipeline(runtime) {
|
|
|
1155
1242
|
runtime.filesRepo,
|
|
1156
1243
|
runtime.snapshotsRepo,
|
|
1157
1244
|
runtime.clients.sync,
|
|
1158
|
-
syncLogger
|
|
1245
|
+
syncLogger,
|
|
1246
|
+
{ maxAttempts: runtime.config.maxSnapshotAttempts }
|
|
1159
1247
|
);
|
|
1160
1248
|
return result;
|
|
1161
1249
|
}
|
|
@@ -1558,7 +1646,8 @@ var ServiceRunner = class {
|
|
|
1558
1646
|
this.runtime.filesRepo,
|
|
1559
1647
|
this.runtime.snapshotsRepo,
|
|
1560
1648
|
this.runtime.clients.sync,
|
|
1561
|
-
log
|
|
1649
|
+
log,
|
|
1650
|
+
{ maxAttempts: this.runtime.config.maxSnapshotAttempts }
|
|
1562
1651
|
);
|
|
1563
1652
|
this.runtime.outbox.ack(job.id, this.fsControlLeaseOwner);
|
|
1564
1653
|
this.state.updateSnapshotReady(result.createdAt);
|
|
@@ -1886,15 +1975,27 @@ async function main() {
|
|
|
1886
1975
|
const runner = new ServiceRunner(runtime);
|
|
1887
1976
|
try {
|
|
1888
1977
|
await runner.prepareWatcher(true);
|
|
1889
|
-
|
|
1890
|
-
|
|
1891
|
-
|
|
1892
|
-
|
|
1893
|
-
|
|
1894
|
-
|
|
1895
|
-
|
|
1896
|
-
|
|
1897
|
-
|
|
1978
|
+
let initialCreatedAt;
|
|
1979
|
+
try {
|
|
1980
|
+
const initial = await runInitialSyncPipeline(runtime);
|
|
1981
|
+
runtime.logger.info(
|
|
1982
|
+
{
|
|
1983
|
+
snapshotHash: initial.snapshotHash,
|
|
1984
|
+
filesCount: initial.filesCount
|
|
1985
|
+
},
|
|
1986
|
+
"Initial sync completed; starting MCP server"
|
|
1987
|
+
);
|
|
1988
|
+
initialCreatedAt = initial.createdAt;
|
|
1989
|
+
} catch (error) {
|
|
1990
|
+
runtime.logger.warn(
|
|
1991
|
+
{ err: error },
|
|
1992
|
+
"Initial sync failed; enqueuing snapshot job and continuing"
|
|
1993
|
+
);
|
|
1994
|
+
runtime.outbox.enqueueSnapshot(runtime.config.rootId, 0);
|
|
1995
|
+
}
|
|
1996
|
+
if (initialCreatedAt) {
|
|
1997
|
+
runner.recordInitialSnapshot(initialCreatedAt);
|
|
1998
|
+
}
|
|
1898
1999
|
await runner.startLoops();
|
|
1899
2000
|
await runner.enableWatcherProcessing();
|
|
1900
2001
|
const server = createMcpServer({ runtime, runner });
|