@coderule/mcp 1.3.0 → 1.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli.cjs +135 -35
- package/dist/cli.cjs.map +1 -1
- package/dist/cli.js +136 -36
- package/dist/cli.js.map +1 -1
- package/dist/index.cjs +135 -35
- package/dist/index.cjs.map +1 -1
- package/dist/index.js +136 -36
- package/dist/index.js.map +1 -1
- package/dist/mcp-cli.cjs +135 -35
- package/dist/mcp-cli.cjs.map +1 -1
- package/dist/mcp-cli.js +136 -36
- package/dist/mcp-cli.js.map +1 -1
- package/package.json +1 -1
package/dist/mcp-cli.cjs
CHANGED
|
@@ -47,7 +47,7 @@ var DEFAULT_HEARTBEAT_CHECK_INTERVAL_MS = 5e3;
|
|
|
47
47
|
var DEFAULT_QUEUE_POLL_INTERVAL_MS = 500;
|
|
48
48
|
var DEFAULT_HASH_BATCH_SIZE = 32;
|
|
49
49
|
var DEFAULT_MAX_SNAPSHOT_ATTEMPTS = 5;
|
|
50
|
-
var DEFAULT_HTTP_TIMEOUT_MS =
|
|
50
|
+
var DEFAULT_HTTP_TIMEOUT_MS = 12e4;
|
|
51
51
|
|
|
52
52
|
// src/config/Configurator.ts
|
|
53
53
|
var DEFAULT_RETRIEVAL_FORMATTER = "standard";
|
|
@@ -552,6 +552,16 @@ var Outbox = class {
|
|
|
552
552
|
if (purged > 0) {
|
|
553
553
|
this.log.warn({ purged }, "Purged legacy fs_control jobs without kind");
|
|
554
554
|
}
|
|
555
|
+
try {
|
|
556
|
+
const counts = {
|
|
557
|
+
pending: this.queue.countByStatus(qulite.JobStatus.Pending),
|
|
558
|
+
processing: this.queue.countByStatus(qulite.JobStatus.Processing),
|
|
559
|
+
done: this.queue.countByStatus(qulite.JobStatus.Done),
|
|
560
|
+
failed: this.queue.countByStatus(qulite.JobStatus.Failed)
|
|
561
|
+
};
|
|
562
|
+
this.log.debug({ counts }, "Outbox initialized");
|
|
563
|
+
} catch {
|
|
564
|
+
}
|
|
555
565
|
}
|
|
556
566
|
getQueue() {
|
|
557
567
|
return this.queue;
|
|
@@ -1063,44 +1073,109 @@ function computeSnapshot(filesRepo) {
|
|
|
1063
1073
|
totalSize
|
|
1064
1074
|
};
|
|
1065
1075
|
}
|
|
1066
|
-
async function
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
for (const missingFile of missing) {
|
|
1070
|
-
const absPath = path__default.default.join(rootPath, missingFile.file_path);
|
|
1076
|
+
async function withRetries(op, logger2, context, maxAttempts) {
|
|
1077
|
+
let attempt = 0;
|
|
1078
|
+
while (true) {
|
|
1071
1079
|
try {
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
|
|
1076
|
-
|
|
1077
|
-
|
|
1080
|
+
return await op();
|
|
1081
|
+
} catch (err) {
|
|
1082
|
+
attempt += 1;
|
|
1083
|
+
if (attempt >= maxAttempts) {
|
|
1084
|
+
logger2.error(
|
|
1085
|
+
{ err, ...context, attempt },
|
|
1086
|
+
"Operation failed after retries"
|
|
1087
|
+
);
|
|
1088
|
+
throw err;
|
|
1089
|
+
}
|
|
1090
|
+
const delay = Math.min(15e3, 1e3 * 2 ** (attempt - 1));
|
|
1078
1091
|
logger2.warn(
|
|
1079
|
-
{ err
|
|
1080
|
-
"
|
|
1092
|
+
{ err, ...context, attempt, delay },
|
|
1093
|
+
"Operation failed; retrying"
|
|
1081
1094
|
);
|
|
1095
|
+
await sleep(delay);
|
|
1082
1096
|
}
|
|
1083
1097
|
}
|
|
1084
|
-
if (map.size === 0) return;
|
|
1085
|
-
await syncClient.uploadFileContent(map);
|
|
1086
1098
|
}
|
|
1087
|
-
async function
|
|
1099
|
+
async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts, chunkSize = 64) {
|
|
1100
|
+
if (!missing || missing.length === 0) return;
|
|
1101
|
+
const total = missing.length;
|
|
1102
|
+
const chunks = [];
|
|
1103
|
+
for (let i = 0; i < total; i += chunkSize) {
|
|
1104
|
+
chunks.push(missing.slice(i, i + chunkSize));
|
|
1105
|
+
}
|
|
1106
|
+
for (let idx = 0; idx < chunks.length; idx++) {
|
|
1107
|
+
const list = chunks[idx];
|
|
1108
|
+
const map = /* @__PURE__ */ new Map();
|
|
1109
|
+
for (const missingFile of list) {
|
|
1110
|
+
const absPath = path__default.default.join(rootPath, missingFile.file_path);
|
|
1111
|
+
try {
|
|
1112
|
+
const buffer = await fs4__default.default.readFile(absPath);
|
|
1113
|
+
map.set(missingFile.file_hash, {
|
|
1114
|
+
path: missingFile.file_path,
|
|
1115
|
+
content: buffer
|
|
1116
|
+
});
|
|
1117
|
+
} catch (error) {
|
|
1118
|
+
logger2.warn(
|
|
1119
|
+
{ err: error, relPath: missingFile.file_path },
|
|
1120
|
+
"Failed to read missing file content"
|
|
1121
|
+
);
|
|
1122
|
+
}
|
|
1123
|
+
}
|
|
1124
|
+
if (map.size === 0) continue;
|
|
1125
|
+
await withRetries(
|
|
1126
|
+
() => syncClient.uploadFileContent(map),
|
|
1127
|
+
logger2,
|
|
1128
|
+
{
|
|
1129
|
+
op: "uploadFileContent",
|
|
1130
|
+
chunkIndex: idx + 1,
|
|
1131
|
+
chunks: chunks.length,
|
|
1132
|
+
files: map.size
|
|
1133
|
+
},
|
|
1134
|
+
maxAttempts
|
|
1135
|
+
);
|
|
1136
|
+
}
|
|
1137
|
+
}
|
|
1138
|
+
async function ensureSnapshotCreated(rootPath, computation, syncClient, logger2, options) {
|
|
1088
1139
|
const { snapshotHash, files } = computation;
|
|
1089
|
-
|
|
1140
|
+
const maxAttempts = options?.maxAttempts ?? 5;
|
|
1141
|
+
const uploadChunkSize = options?.uploadChunkSize ?? 64;
|
|
1142
|
+
let status = await withRetries(
|
|
1143
|
+
() => syncClient.checkSnapshotStatus(snapshotHash),
|
|
1144
|
+
logger2,
|
|
1145
|
+
{ op: "checkSnapshotStatus", snapshotHash },
|
|
1146
|
+
maxAttempts
|
|
1147
|
+
);
|
|
1090
1148
|
if (status.status === "READY") {
|
|
1091
1149
|
logger2.info({ snapshotHash }, "Snapshot already READY");
|
|
1092
1150
|
return;
|
|
1093
1151
|
}
|
|
1094
1152
|
if (status.status === "NOT_FOUND" || status.status === "MISSING_CONTENT") {
|
|
1095
|
-
status = await
|
|
1153
|
+
status = await withRetries(
|
|
1154
|
+
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1155
|
+
logger2,
|
|
1156
|
+
{ op: "createSnapshot", snapshotHash },
|
|
1157
|
+
maxAttempts
|
|
1158
|
+
);
|
|
1096
1159
|
}
|
|
1097
1160
|
if (status.status === "MISSING_CONTENT" && status.missing_files?.length) {
|
|
1098
1161
|
logger2.info(
|
|
1099
1162
|
{ missing: status.missing_files.length },
|
|
1100
1163
|
"Uploading missing file content"
|
|
1101
1164
|
);
|
|
1102
|
-
await uploadMissing(
|
|
1103
|
-
|
|
1165
|
+
await uploadMissing(
|
|
1166
|
+
rootPath,
|
|
1167
|
+
status.missing_files,
|
|
1168
|
+
syncClient,
|
|
1169
|
+
logger2,
|
|
1170
|
+
maxAttempts,
|
|
1171
|
+
uploadChunkSize
|
|
1172
|
+
);
|
|
1173
|
+
status = await withRetries(
|
|
1174
|
+
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1175
|
+
logger2,
|
|
1176
|
+
{ op: "createSnapshot", snapshotHash },
|
|
1177
|
+
maxAttempts
|
|
1178
|
+
);
|
|
1104
1179
|
}
|
|
1105
1180
|
let attempt = 0;
|
|
1106
1181
|
while (status.status !== "READY") {
|
|
@@ -1110,13 +1185,24 @@ async function ensureSnapshotCreated(rootPath, computation, syncClient, logger2)
|
|
|
1110
1185
|
const delay = Math.min(5e3, 1e3 * Math.max(1, 2 ** attempt));
|
|
1111
1186
|
await sleep(delay);
|
|
1112
1187
|
attempt += 1;
|
|
1113
|
-
status = await
|
|
1188
|
+
status = await withRetries(
|
|
1189
|
+
() => syncClient.checkSnapshotStatus(snapshotHash),
|
|
1190
|
+
logger2,
|
|
1191
|
+
{ op: "checkSnapshotStatus", snapshotHash },
|
|
1192
|
+
maxAttempts
|
|
1193
|
+
);
|
|
1114
1194
|
}
|
|
1115
1195
|
logger2.info({ snapshotHash }, "Snapshot READY");
|
|
1116
1196
|
}
|
|
1117
|
-
async function publishSnapshot(rootPath, filesRepo, snapshotsRepo, syncClient, logger2) {
|
|
1197
|
+
async function publishSnapshot(rootPath, filesRepo, snapshotsRepo, syncClient, logger2, options) {
|
|
1118
1198
|
const computation = computeSnapshot(filesRepo);
|
|
1119
|
-
await ensureSnapshotCreated(
|
|
1199
|
+
await ensureSnapshotCreated(
|
|
1200
|
+
rootPath,
|
|
1201
|
+
computation,
|
|
1202
|
+
syncClient,
|
|
1203
|
+
logger2,
|
|
1204
|
+
options
|
|
1205
|
+
);
|
|
1120
1206
|
const createdAt = Date.now();
|
|
1121
1207
|
snapshotsRepo.insert(
|
|
1122
1208
|
computation.snapshotHash,
|
|
@@ -1156,7 +1242,8 @@ async function runInitialSyncPipeline(runtime) {
|
|
|
1156
1242
|
runtime.filesRepo,
|
|
1157
1243
|
runtime.snapshotsRepo,
|
|
1158
1244
|
runtime.clients.sync,
|
|
1159
|
-
syncLogger
|
|
1245
|
+
syncLogger,
|
|
1246
|
+
{ maxAttempts: runtime.config.maxSnapshotAttempts }
|
|
1160
1247
|
);
|
|
1161
1248
|
return result;
|
|
1162
1249
|
}
|
|
@@ -1559,7 +1646,8 @@ var ServiceRunner = class {
|
|
|
1559
1646
|
this.runtime.filesRepo,
|
|
1560
1647
|
this.runtime.snapshotsRepo,
|
|
1561
1648
|
this.runtime.clients.sync,
|
|
1562
|
-
log
|
|
1649
|
+
log,
|
|
1650
|
+
{ maxAttempts: this.runtime.config.maxSnapshotAttempts }
|
|
1563
1651
|
);
|
|
1564
1652
|
this.runtime.outbox.ack(job.id, this.fsControlLeaseOwner);
|
|
1565
1653
|
this.state.updateSnapshotReady(result.createdAt);
|
|
@@ -1887,15 +1975,27 @@ async function main() {
|
|
|
1887
1975
|
const runner = new ServiceRunner(runtime);
|
|
1888
1976
|
try {
|
|
1889
1977
|
await runner.prepareWatcher(true);
|
|
1890
|
-
|
|
1891
|
-
|
|
1892
|
-
|
|
1893
|
-
|
|
1894
|
-
|
|
1895
|
-
|
|
1896
|
-
|
|
1897
|
-
|
|
1898
|
-
|
|
1978
|
+
let initialCreatedAt;
|
|
1979
|
+
try {
|
|
1980
|
+
const initial = await runInitialSyncPipeline(runtime);
|
|
1981
|
+
runtime.logger.info(
|
|
1982
|
+
{
|
|
1983
|
+
snapshotHash: initial.snapshotHash,
|
|
1984
|
+
filesCount: initial.filesCount
|
|
1985
|
+
},
|
|
1986
|
+
"Initial sync completed; starting MCP server"
|
|
1987
|
+
);
|
|
1988
|
+
initialCreatedAt = initial.createdAt;
|
|
1989
|
+
} catch (error) {
|
|
1990
|
+
runtime.logger.warn(
|
|
1991
|
+
{ err: error },
|
|
1992
|
+
"Initial sync failed; enqueuing snapshot job and continuing"
|
|
1993
|
+
);
|
|
1994
|
+
runtime.outbox.enqueueSnapshot(runtime.config.rootId, 0);
|
|
1995
|
+
}
|
|
1996
|
+
if (initialCreatedAt) {
|
|
1997
|
+
runner.recordInitialSnapshot(initialCreatedAt);
|
|
1998
|
+
}
|
|
1899
1999
|
await runner.startLoops();
|
|
1900
2000
|
await runner.enableWatcherProcessing();
|
|
1901
2001
|
const server = createMcpServer({ runtime, runner });
|