@coderule/mcp 1.3.0 → 1.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli.cjs +135 -35
- package/dist/cli.cjs.map +1 -1
- package/dist/cli.js +136 -36
- package/dist/cli.js.map +1 -1
- package/dist/index.cjs +135 -35
- package/dist/index.cjs.map +1 -1
- package/dist/index.js +136 -36
- package/dist/index.js.map +1 -1
- package/dist/mcp-cli.cjs +135 -35
- package/dist/mcp-cli.cjs.map +1 -1
- package/dist/mcp-cli.js +136 -36
- package/dist/mcp-cli.js.map +1 -1
- package/package.json +1 -1
package/dist/mcp-cli.js
CHANGED
|
@@ -6,7 +6,7 @@ import { createHash } from 'crypto';
|
|
|
6
6
|
import envPaths from 'env-paths';
|
|
7
7
|
import pino from 'pino';
|
|
8
8
|
import Database from 'better-sqlite3';
|
|
9
|
-
import { Qulite,
|
|
9
|
+
import { Qulite, JobStatus, enqueueFsEvent } from '@coderule/qulite';
|
|
10
10
|
import { CoderuleClients, ASTHttpClient, SyncHttpClient } from '@coderule/clients';
|
|
11
11
|
import fs2 from 'fs';
|
|
12
12
|
import { Worker } from 'worker_threads';
|
|
@@ -32,7 +32,7 @@ var DEFAULT_HEARTBEAT_CHECK_INTERVAL_MS = 5e3;
|
|
|
32
32
|
var DEFAULT_QUEUE_POLL_INTERVAL_MS = 500;
|
|
33
33
|
var DEFAULT_HASH_BATCH_SIZE = 32;
|
|
34
34
|
var DEFAULT_MAX_SNAPSHOT_ATTEMPTS = 5;
|
|
35
|
-
var DEFAULT_HTTP_TIMEOUT_MS =
|
|
35
|
+
var DEFAULT_HTTP_TIMEOUT_MS = 12e4;
|
|
36
36
|
|
|
37
37
|
// src/config/Configurator.ts
|
|
38
38
|
var DEFAULT_RETRIEVAL_FORMATTER = "standard";
|
|
@@ -537,6 +537,16 @@ var Outbox = class {
|
|
|
537
537
|
if (purged > 0) {
|
|
538
538
|
this.log.warn({ purged }, "Purged legacy fs_control jobs without kind");
|
|
539
539
|
}
|
|
540
|
+
try {
|
|
541
|
+
const counts = {
|
|
542
|
+
pending: this.queue.countByStatus(JobStatus.Pending),
|
|
543
|
+
processing: this.queue.countByStatus(JobStatus.Processing),
|
|
544
|
+
done: this.queue.countByStatus(JobStatus.Done),
|
|
545
|
+
failed: this.queue.countByStatus(JobStatus.Failed)
|
|
546
|
+
};
|
|
547
|
+
this.log.debug({ counts }, "Outbox initialized");
|
|
548
|
+
} catch {
|
|
549
|
+
}
|
|
540
550
|
}
|
|
541
551
|
getQueue() {
|
|
542
552
|
return this.queue;
|
|
@@ -1048,44 +1058,109 @@ function computeSnapshot(filesRepo) {
|
|
|
1048
1058
|
totalSize
|
|
1049
1059
|
};
|
|
1050
1060
|
}
|
|
1051
|
-
async function
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
for (const missingFile of missing) {
|
|
1055
|
-
const absPath = path.join(rootPath, missingFile.file_path);
|
|
1061
|
+
async function withRetries(op, logger2, context, maxAttempts) {
|
|
1062
|
+
let attempt = 0;
|
|
1063
|
+
while (true) {
|
|
1056
1064
|
try {
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1065
|
+
return await op();
|
|
1066
|
+
} catch (err) {
|
|
1067
|
+
attempt += 1;
|
|
1068
|
+
if (attempt >= maxAttempts) {
|
|
1069
|
+
logger2.error(
|
|
1070
|
+
{ err, ...context, attempt },
|
|
1071
|
+
"Operation failed after retries"
|
|
1072
|
+
);
|
|
1073
|
+
throw err;
|
|
1074
|
+
}
|
|
1075
|
+
const delay = Math.min(15e3, 1e3 * 2 ** (attempt - 1));
|
|
1063
1076
|
logger2.warn(
|
|
1064
|
-
{ err
|
|
1065
|
-
"
|
|
1077
|
+
{ err, ...context, attempt, delay },
|
|
1078
|
+
"Operation failed; retrying"
|
|
1066
1079
|
);
|
|
1080
|
+
await sleep(delay);
|
|
1067
1081
|
}
|
|
1068
1082
|
}
|
|
1069
|
-
if (map.size === 0) return;
|
|
1070
|
-
await syncClient.uploadFileContent(map);
|
|
1071
1083
|
}
|
|
1072
|
-
async function
|
|
1084
|
+
async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts, chunkSize = 64) {
|
|
1085
|
+
if (!missing || missing.length === 0) return;
|
|
1086
|
+
const total = missing.length;
|
|
1087
|
+
const chunks = [];
|
|
1088
|
+
for (let i = 0; i < total; i += chunkSize) {
|
|
1089
|
+
chunks.push(missing.slice(i, i + chunkSize));
|
|
1090
|
+
}
|
|
1091
|
+
for (let idx = 0; idx < chunks.length; idx++) {
|
|
1092
|
+
const list = chunks[idx];
|
|
1093
|
+
const map = /* @__PURE__ */ new Map();
|
|
1094
|
+
for (const missingFile of list) {
|
|
1095
|
+
const absPath = path.join(rootPath, missingFile.file_path);
|
|
1096
|
+
try {
|
|
1097
|
+
const buffer = await fs4.readFile(absPath);
|
|
1098
|
+
map.set(missingFile.file_hash, {
|
|
1099
|
+
path: missingFile.file_path,
|
|
1100
|
+
content: buffer
|
|
1101
|
+
});
|
|
1102
|
+
} catch (error) {
|
|
1103
|
+
logger2.warn(
|
|
1104
|
+
{ err: error, relPath: missingFile.file_path },
|
|
1105
|
+
"Failed to read missing file content"
|
|
1106
|
+
);
|
|
1107
|
+
}
|
|
1108
|
+
}
|
|
1109
|
+
if (map.size === 0) continue;
|
|
1110
|
+
await withRetries(
|
|
1111
|
+
() => syncClient.uploadFileContent(map),
|
|
1112
|
+
logger2,
|
|
1113
|
+
{
|
|
1114
|
+
op: "uploadFileContent",
|
|
1115
|
+
chunkIndex: idx + 1,
|
|
1116
|
+
chunks: chunks.length,
|
|
1117
|
+
files: map.size
|
|
1118
|
+
},
|
|
1119
|
+
maxAttempts
|
|
1120
|
+
);
|
|
1121
|
+
}
|
|
1122
|
+
}
|
|
1123
|
+
async function ensureSnapshotCreated(rootPath, computation, syncClient, logger2, options) {
|
|
1073
1124
|
const { snapshotHash, files } = computation;
|
|
1074
|
-
|
|
1125
|
+
const maxAttempts = options?.maxAttempts ?? 5;
|
|
1126
|
+
const uploadChunkSize = options?.uploadChunkSize ?? 64;
|
|
1127
|
+
let status = await withRetries(
|
|
1128
|
+
() => syncClient.checkSnapshotStatus(snapshotHash),
|
|
1129
|
+
logger2,
|
|
1130
|
+
{ op: "checkSnapshotStatus", snapshotHash },
|
|
1131
|
+
maxAttempts
|
|
1132
|
+
);
|
|
1075
1133
|
if (status.status === "READY") {
|
|
1076
1134
|
logger2.info({ snapshotHash }, "Snapshot already READY");
|
|
1077
1135
|
return;
|
|
1078
1136
|
}
|
|
1079
1137
|
if (status.status === "NOT_FOUND" || status.status === "MISSING_CONTENT") {
|
|
1080
|
-
status = await
|
|
1138
|
+
status = await withRetries(
|
|
1139
|
+
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1140
|
+
logger2,
|
|
1141
|
+
{ op: "createSnapshot", snapshotHash },
|
|
1142
|
+
maxAttempts
|
|
1143
|
+
);
|
|
1081
1144
|
}
|
|
1082
1145
|
if (status.status === "MISSING_CONTENT" && status.missing_files?.length) {
|
|
1083
1146
|
logger2.info(
|
|
1084
1147
|
{ missing: status.missing_files.length },
|
|
1085
1148
|
"Uploading missing file content"
|
|
1086
1149
|
);
|
|
1087
|
-
await uploadMissing(
|
|
1088
|
-
|
|
1150
|
+
await uploadMissing(
|
|
1151
|
+
rootPath,
|
|
1152
|
+
status.missing_files,
|
|
1153
|
+
syncClient,
|
|
1154
|
+
logger2,
|
|
1155
|
+
maxAttempts,
|
|
1156
|
+
uploadChunkSize
|
|
1157
|
+
);
|
|
1158
|
+
status = await withRetries(
|
|
1159
|
+
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1160
|
+
logger2,
|
|
1161
|
+
{ op: "createSnapshot", snapshotHash },
|
|
1162
|
+
maxAttempts
|
|
1163
|
+
);
|
|
1089
1164
|
}
|
|
1090
1165
|
let attempt = 0;
|
|
1091
1166
|
while (status.status !== "READY") {
|
|
@@ -1095,13 +1170,24 @@ async function ensureSnapshotCreated(rootPath, computation, syncClient, logger2)
|
|
|
1095
1170
|
const delay = Math.min(5e3, 1e3 * Math.max(1, 2 ** attempt));
|
|
1096
1171
|
await sleep(delay);
|
|
1097
1172
|
attempt += 1;
|
|
1098
|
-
status = await
|
|
1173
|
+
status = await withRetries(
|
|
1174
|
+
() => syncClient.checkSnapshotStatus(snapshotHash),
|
|
1175
|
+
logger2,
|
|
1176
|
+
{ op: "checkSnapshotStatus", snapshotHash },
|
|
1177
|
+
maxAttempts
|
|
1178
|
+
);
|
|
1099
1179
|
}
|
|
1100
1180
|
logger2.info({ snapshotHash }, "Snapshot READY");
|
|
1101
1181
|
}
|
|
1102
|
-
async function publishSnapshot(rootPath, filesRepo, snapshotsRepo, syncClient, logger2) {
|
|
1182
|
+
async function publishSnapshot(rootPath, filesRepo, snapshotsRepo, syncClient, logger2, options) {
|
|
1103
1183
|
const computation = computeSnapshot(filesRepo);
|
|
1104
|
-
await ensureSnapshotCreated(
|
|
1184
|
+
await ensureSnapshotCreated(
|
|
1185
|
+
rootPath,
|
|
1186
|
+
computation,
|
|
1187
|
+
syncClient,
|
|
1188
|
+
logger2,
|
|
1189
|
+
options
|
|
1190
|
+
);
|
|
1105
1191
|
const createdAt = Date.now();
|
|
1106
1192
|
snapshotsRepo.insert(
|
|
1107
1193
|
computation.snapshotHash,
|
|
@@ -1141,7 +1227,8 @@ async function runInitialSyncPipeline(runtime) {
|
|
|
1141
1227
|
runtime.filesRepo,
|
|
1142
1228
|
runtime.snapshotsRepo,
|
|
1143
1229
|
runtime.clients.sync,
|
|
1144
|
-
syncLogger
|
|
1230
|
+
syncLogger,
|
|
1231
|
+
{ maxAttempts: runtime.config.maxSnapshotAttempts }
|
|
1145
1232
|
);
|
|
1146
1233
|
return result;
|
|
1147
1234
|
}
|
|
@@ -1544,7 +1631,8 @@ var ServiceRunner = class {
|
|
|
1544
1631
|
this.runtime.filesRepo,
|
|
1545
1632
|
this.runtime.snapshotsRepo,
|
|
1546
1633
|
this.runtime.clients.sync,
|
|
1547
|
-
log
|
|
1634
|
+
log,
|
|
1635
|
+
{ maxAttempts: this.runtime.config.maxSnapshotAttempts }
|
|
1548
1636
|
);
|
|
1549
1637
|
this.runtime.outbox.ack(job.id, this.fsControlLeaseOwner);
|
|
1550
1638
|
this.state.updateSnapshotReady(result.createdAt);
|
|
@@ -1872,15 +1960,27 @@ async function main() {
|
|
|
1872
1960
|
const runner = new ServiceRunner(runtime);
|
|
1873
1961
|
try {
|
|
1874
1962
|
await runner.prepareWatcher(true);
|
|
1875
|
-
|
|
1876
|
-
|
|
1877
|
-
|
|
1878
|
-
|
|
1879
|
-
|
|
1880
|
-
|
|
1881
|
-
|
|
1882
|
-
|
|
1883
|
-
|
|
1963
|
+
let initialCreatedAt;
|
|
1964
|
+
try {
|
|
1965
|
+
const initial = await runInitialSyncPipeline(runtime);
|
|
1966
|
+
runtime.logger.info(
|
|
1967
|
+
{
|
|
1968
|
+
snapshotHash: initial.snapshotHash,
|
|
1969
|
+
filesCount: initial.filesCount
|
|
1970
|
+
},
|
|
1971
|
+
"Initial sync completed; starting MCP server"
|
|
1972
|
+
);
|
|
1973
|
+
initialCreatedAt = initial.createdAt;
|
|
1974
|
+
} catch (error) {
|
|
1975
|
+
runtime.logger.warn(
|
|
1976
|
+
{ err: error },
|
|
1977
|
+
"Initial sync failed; enqueuing snapshot job and continuing"
|
|
1978
|
+
);
|
|
1979
|
+
runtime.outbox.enqueueSnapshot(runtime.config.rootId, 0);
|
|
1980
|
+
}
|
|
1981
|
+
if (initialCreatedAt) {
|
|
1982
|
+
runner.recordInitialSnapshot(initialCreatedAt);
|
|
1983
|
+
}
|
|
1884
1984
|
await runner.startLoops();
|
|
1885
1985
|
await runner.enableWatcherProcessing();
|
|
1886
1986
|
const server = createMcpServer({ runtime, runner });
|