mono-pilot 0.2.10 → 0.2.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +260 -2
- package/dist/src/agents-paths.js +36 -0
- package/dist/src/brief/blocks.js +83 -0
- package/dist/src/brief/defaults.js +60 -0
- package/dist/src/brief/frontmatter.js +53 -0
- package/dist/src/brief/paths.js +10 -0
- package/dist/src/brief/reflection.js +27 -0
- package/dist/src/cli.js +62 -5
- package/dist/src/cluster/bus.js +102 -0
- package/dist/src/cluster/follower.js +137 -0
- package/dist/src/cluster/init.js +182 -0
- package/dist/src/cluster/leader.js +97 -0
- package/dist/src/cluster/log.js +49 -0
- package/dist/src/cluster/protocol.js +34 -0
- package/dist/src/cluster/services/bus.js +243 -0
- package/dist/src/cluster/services/embedding.js +12 -0
- package/dist/src/cluster/socket.js +86 -0
- package/dist/src/cluster/test-bus.js +175 -0
- package/dist/src/cluster_v2/connection-lifecycle.js +31 -0
- package/dist/src/cluster_v2/connection-lifecycle.test.js +24 -0
- package/dist/src/cluster_v2/connection.js +159 -0
- package/dist/src/cluster_v2/connection.test.js +55 -0
- package/dist/src/cluster_v2/events.js +102 -0
- package/dist/src/cluster_v2/index.js +2 -0
- package/dist/src/cluster_v2/observability.js +99 -0
- package/dist/src/cluster_v2/observability.test.js +46 -0
- package/dist/src/cluster_v2/rpc.js +389 -0
- package/dist/src/cluster_v2/rpc.test.js +110 -0
- package/dist/src/cluster_v2/runtime.failover.integration.test.js +156 -0
- package/dist/src/cluster_v2/runtime.js +531 -0
- package/dist/src/cluster_v2/runtime.lease-compromise.integration.test.js +91 -0
- package/dist/src/cluster_v2/runtime.lifecycle.integration.test.js +225 -0
- package/dist/src/cluster_v2/services/bus.integration.test.js +140 -0
- package/dist/src/cluster_v2/services/bus.js +450 -0
- package/dist/src/cluster_v2/services/discord/auth-store.js +82 -0
- package/dist/src/cluster_v2/services/discord/collector.js +569 -0
- package/dist/src/cluster_v2/services/discord/index.js +1 -0
- package/dist/src/cluster_v2/services/discord/oauth.js +87 -0
- package/dist/src/cluster_v2/services/discord/rpc-client.js +325 -0
- package/dist/src/cluster_v2/services/embedding.js +66 -0
- package/dist/src/cluster_v2/services/registry-cache.js +107 -0
- package/dist/src/cluster_v2/services/registry-cache.test.js +66 -0
- package/dist/src/cluster_v2/services/registry.js +36 -0
- package/dist/src/cluster_v2/services/twitter/collector.js +1055 -0
- package/dist/src/cluster_v2/services/twitter/index.js +1 -0
- package/dist/src/config/digest.js +78 -0
- package/dist/src/config/discord.js +143 -0
- package/dist/src/config/image-gen.js +48 -0
- package/dist/src/config/mono-pilot.js +31 -0
- package/dist/src/config/twitter.js +100 -0
- package/dist/src/extensions/cluster.js +311 -0
- package/dist/src/extensions/commands/build-memory.js +76 -0
- package/dist/src/extensions/commands/digest/backfill.js +779 -0
- package/dist/src/extensions/commands/digest/index.js +1133 -0
- package/dist/src/extensions/commands/image-model.js +214 -0
- package/dist/src/extensions/game/bus-injection.js +47 -0
- package/dist/src/extensions/game/identity.js +83 -0
- package/dist/src/extensions/game/mailbox.js +61 -0
- package/dist/src/extensions/game/system-prompt.js +134 -0
- package/dist/src/extensions/game/tools.js +28 -0
- package/dist/src/extensions/lifecycle.js +337 -0
- package/dist/src/extensions/mode-runtime.js +26 -2
- package/dist/src/extensions/mono-game.js +66 -0
- package/dist/src/extensions/mono-pilot.js +100 -18
- package/dist/src/extensions/nvim.js +47 -0
- package/dist/src/extensions/session-hints.js +1 -2
- package/dist/src/extensions/sftp.js +897 -0
- package/dist/src/extensions/status.js +676 -0
- package/dist/src/extensions/system-events.js +478 -0
- package/dist/src/extensions/system-prompt.js +24 -14
- package/dist/src/extensions/user-message.js +70 -1
- package/dist/src/lsp/client.js +235 -0
- package/dist/src/lsp/index.js +165 -0
- package/dist/src/lsp/runtime.js +67 -0
- package/dist/src/lsp/server.js +242 -0
- package/dist/src/memory/build-memory.js +103 -0
- package/dist/src/memory/config/defaults.js +55 -0
- package/dist/src/memory/config/loader.js +29 -0
- package/dist/src/memory/config/paths.js +9 -0
- package/dist/src/memory/config/resolve.js +90 -0
- package/dist/src/memory/config/types.js +1 -0
- package/dist/src/memory/embeddings/batch-runner.js +39 -0
- package/dist/src/memory/embeddings/cache.js +47 -0
- package/dist/src/memory/embeddings/chunk-limits.js +26 -0
- package/dist/src/memory/embeddings/input-limits.js +48 -0
- package/dist/src/memory/embeddings/local.js +108 -0
- package/dist/src/memory/embeddings/types.js +1 -0
- package/dist/src/memory/index-manager.js +552 -0
- package/dist/src/memory/indexing/embeddings.js +67 -0
- package/dist/src/memory/indexing/files.js +180 -0
- package/dist/src/memory/indexing/index-file.js +105 -0
- package/dist/src/memory/log.js +38 -0
- package/dist/src/memory/paths.js +15 -0
- package/dist/src/memory/runtime/index.js +299 -0
- package/dist/src/memory/runtime/thread.js +116 -0
- package/dist/src/memory/search/fts.js +57 -0
- package/dist/src/memory/search/hybrid.js +50 -0
- package/dist/src/memory/search/text.js +30 -0
- package/dist/src/memory/search/vector.js +43 -0
- package/dist/src/memory/session/content-hash.js +7 -0
- package/dist/src/memory/session/entry.js +33 -0
- package/dist/src/memory/session/flush-policy.js +34 -0
- package/dist/src/memory/session/hook.js +191 -0
- package/dist/src/memory/session/paths.js +15 -0
- package/dist/src/memory/session/session-reader.js +88 -0
- package/dist/src/memory/session/transcript/content-hash.js +7 -0
- package/dist/src/memory/session/transcript/entry.js +28 -0
- package/dist/src/memory/session/transcript/flush.js +56 -0
- package/dist/src/memory/session/transcript/paths.js +28 -0
- package/dist/src/memory/session/transcript/reader.js +112 -0
- package/dist/src/memory/session/transcript/state.js +31 -0
- package/dist/src/memory/store/schema.js +89 -0
- package/dist/src/memory/store/sqlite.js +89 -0
- package/dist/src/memory/types.js +1 -0
- package/dist/src/memory/warm.js +25 -0
- package/dist/{tools → src/tools}/README.md +28 -2
- package/dist/{tools → src/tools}/apply-patch-description.md +8 -2
- package/dist/{tools → src/tools}/apply-patch.js +174 -104
- package/dist/{tools → src/tools}/apply-patch.test.js +52 -1
- package/dist/{tools/ask-question.js → src/tools/ask-user-question.js} +3 -3
- package/dist/src/tools/ast-grep.js +357 -0
- package/dist/src/tools/brief-write.js +122 -0
- package/dist/src/tools/bus-send.js +100 -0
- package/dist/{tools → src/tools}/call-mcp-tool.js +20 -24
- package/dist/src/tools/codex-apply-patch-description.md +52 -0
- package/dist/src/tools/codex-apply-patch.js +540 -0
- package/dist/{tools → src/tools}/delete.js +24 -0
- package/dist/src/tools/exit-plan-mode.js +83 -0
- package/dist/{tools → src/tools}/fetch-mcp-resource.js +31 -3
- package/dist/src/tools/generate-image.js +567 -0
- package/dist/{tools → src/tools}/glob.js +55 -1
- package/dist/{tools → src/tools}/list-mcp-resources.js +32 -3
- package/dist/{tools → src/tools}/list-mcp-tools.js +38 -3
- package/dist/src/tools/ls.js +48 -0
- package/dist/src/tools/lsp-diagnostics.js +67 -0
- package/dist/src/tools/lsp-symbols.js +54 -0
- package/dist/src/tools/mailbox.js +85 -0
- package/dist/src/tools/memory-get.js +90 -0
- package/dist/src/tools/memory-search.js +180 -0
- package/dist/{tools → src/tools}/plan-mode-reminder.md +3 -4
- package/dist/{tools → src/tools}/read-file.js +8 -19
- package/dist/{tools → src/tools}/rg.js +10 -20
- package/dist/{tools → src/tools}/shell.js +19 -42
- package/dist/{tools → src/tools}/subagent.js +255 -6
- package/dist/{tools → src/tools}/switch-mode.js +37 -6
- package/dist/{tools → src/tools}/web-fetch.js +105 -7
- package/dist/{tools → src/tools}/web-search.js +29 -1
- package/package.json +21 -9
- package/dist/src/utils/mcp-client.js +0 -282
- /package/dist/{tools → src/tools}/ask-mode-reminder.md +0 -0
- /package/dist/{tools → src/tools}/rg.test.js +0 -0
- /package/dist/{tools → src/tools}/semantic-search-description.md +0 -0
- /package/dist/{tools → src/tools}/semantic-search.js +0 -0
- /package/dist/{tools → src/tools}/shell-description.md +0 -0
- /package/dist/{tools → src/tools}/subagent-description.md +0 -0
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* High-level message bus API over the cluster socket.
|
|
3
|
+
*
|
|
4
|
+
* Wraps ClusterClient RPC + push into a simple event-driven interface.
|
|
5
|
+
* Socket lifecycle is owned by FollowerHandle — bus only manages messaging.
|
|
6
|
+
*/
|
|
7
|
+
import { clusterLog } from "./log.js";
|
|
8
|
+
/**
|
|
9
|
+
* Connect to the cluster message bus.
|
|
10
|
+
*
|
|
11
|
+
* Calls `register` on the leader, wires push dispatch, returns a BusHandle.
|
|
12
|
+
* The underlying socket is owned by the FollowerHandle — call bus.close()
|
|
13
|
+
* to detach messaging without killing the embedding connection.
|
|
14
|
+
*/
|
|
15
|
+
export async function connectBus(client, agentId, displayName, channels) {
|
|
16
|
+
let messageHandlers = [];
|
|
17
|
+
let presenceHandlers = [];
|
|
18
|
+
let closed = false;
|
|
19
|
+
const eventBuffer = [];
|
|
20
|
+
let buffering = true;
|
|
21
|
+
function dispatch(method, payload) {
|
|
22
|
+
switch (method) {
|
|
23
|
+
case "message": {
|
|
24
|
+
const msg = payload;
|
|
25
|
+
for (const h of messageHandlers)
|
|
26
|
+
h(msg);
|
|
27
|
+
break;
|
|
28
|
+
}
|
|
29
|
+
case "presence": {
|
|
30
|
+
const evt = payload;
|
|
31
|
+
for (const h of presenceHandlers)
|
|
32
|
+
h(evt);
|
|
33
|
+
break;
|
|
34
|
+
}
|
|
35
|
+
default:
|
|
36
|
+
clusterLog.debug("unknown push method", { method });
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
// Wire push handler BEFORE register so presence events for existing agents are captured
|
|
40
|
+
client.onPush((method, payload) => {
|
|
41
|
+
if (closed)
|
|
42
|
+
return;
|
|
43
|
+
if (buffering) {
|
|
44
|
+
eventBuffer.push({ method, payload });
|
|
45
|
+
return;
|
|
46
|
+
}
|
|
47
|
+
dispatch(method, payload);
|
|
48
|
+
});
|
|
49
|
+
const result = await client.call("register", { agentId, displayName, channels });
|
|
50
|
+
clusterLog.info("bus connected", { agentId: result.agentId, channels: result.channels });
|
|
51
|
+
// Flush buffered events on next tick — gives caller time to register handlers synchronously
|
|
52
|
+
process.nextTick(() => {
|
|
53
|
+
buffering = false;
|
|
54
|
+
for (const evt of eventBuffer)
|
|
55
|
+
dispatch(evt.method, evt.payload);
|
|
56
|
+
eventBuffer.length = 0;
|
|
57
|
+
});
|
|
58
|
+
const roster = async () => {
|
|
59
|
+
return client.call("roster", {});
|
|
60
|
+
};
|
|
61
|
+
const resolveTarget = async (target) => {
|
|
62
|
+
const { agents } = await roster();
|
|
63
|
+
const byId = agents.find((agent) => agent.agentId === target);
|
|
64
|
+
if (byId)
|
|
65
|
+
return { agentId: byId.agentId, displayName: byId.displayName };
|
|
66
|
+
const matches = agents.filter((agent) => agent.displayName?.trim() && agent.displayName.trim() === target);
|
|
67
|
+
if (matches.length === 1) {
|
|
68
|
+
return { agentId: matches[0].agentId, displayName: matches[0].displayName };
|
|
69
|
+
}
|
|
70
|
+
if (matches.length === 0) {
|
|
71
|
+
throw new Error(`No agent found for "${target}". Use /cluster who to list agents.`);
|
|
72
|
+
}
|
|
73
|
+
const ids = matches.map((agent) => agent.agentId).join(", ");
|
|
74
|
+
throw new Error(`DisplayName "${target}" is not unique. Candidates: ${ids}. Use agentId instead.`);
|
|
75
|
+
};
|
|
76
|
+
return {
|
|
77
|
+
async send(to, payload, channel) {
|
|
78
|
+
return client.call("send", { to, channel, payload });
|
|
79
|
+
},
|
|
80
|
+
async broadcast(payload, channel) {
|
|
81
|
+
return client.call("broadcast", { channel, payload });
|
|
82
|
+
},
|
|
83
|
+
async subscribe(chs) {
|
|
84
|
+
return client.call("subscribe", { channels: chs });
|
|
85
|
+
},
|
|
86
|
+
roster,
|
|
87
|
+
resolveTarget,
|
|
88
|
+
onMessage(handler) {
|
|
89
|
+
messageHandlers.push(handler);
|
|
90
|
+
},
|
|
91
|
+
onPresence(handler) {
|
|
92
|
+
presenceHandlers.push(handler);
|
|
93
|
+
},
|
|
94
|
+
close() {
|
|
95
|
+
closed = true;
|
|
96
|
+
messageHandlers = [];
|
|
97
|
+
presenceHandlers = [];
|
|
98
|
+
client.onPush(() => { });
|
|
99
|
+
clusterLog.debug("bus detached", { agentId });
|
|
100
|
+
},
|
|
101
|
+
};
|
|
102
|
+
}
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
import { tryConnect } from "./socket.js";
|
|
2
|
+
import { CLUSTER_PROTOCOL_VERSION, encodeMessage, MessageDecoder, isPush, } from "./protocol.js";
|
|
3
|
+
import { clusterLog } from "./log.js";
|
|
4
|
+
/**
|
|
5
|
+
* Try to connect to an existing cluster leader.
|
|
6
|
+
* Returns a FollowerHandle on success, or null if no leader is running.
|
|
7
|
+
*/
|
|
8
|
+
export async function tryFollowLeader(modelId, identity) {
|
|
9
|
+
const socket = await tryConnect();
|
|
10
|
+
if (!socket) {
|
|
11
|
+
clusterLog.debug("no leader found, cannot follow");
|
|
12
|
+
return null;
|
|
13
|
+
}
|
|
14
|
+
// Verify the leader is alive with a ping
|
|
15
|
+
const client = new ClusterClient(socket, identity);
|
|
16
|
+
try {
|
|
17
|
+
const pong = await client.call("ping", null, 3000);
|
|
18
|
+
if (pong !== "pong") {
|
|
19
|
+
clusterLog.warn("leader ping returned unexpected response", { pong });
|
|
20
|
+
client.close();
|
|
21
|
+
return null;
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
catch {
|
|
25
|
+
clusterLog.warn("leader ping failed, disconnecting");
|
|
26
|
+
client.close();
|
|
27
|
+
return null;
|
|
28
|
+
}
|
|
29
|
+
clusterLog.info("connected as follower");
|
|
30
|
+
const provider = {
|
|
31
|
+
id: "local",
|
|
32
|
+
model: modelId,
|
|
33
|
+
embedQuery: async (text) => {
|
|
34
|
+
const result = await client.call("embed", { texts: [text] });
|
|
35
|
+
return result.vectors[0];
|
|
36
|
+
},
|
|
37
|
+
embedBatch: async (texts) => {
|
|
38
|
+
const result = await client.call("embed", { texts });
|
|
39
|
+
return result.vectors;
|
|
40
|
+
},
|
|
41
|
+
dispose: async () => {
|
|
42
|
+
client.close();
|
|
43
|
+
},
|
|
44
|
+
};
|
|
45
|
+
const handle = { provider, close: () => client.close(), client };
|
|
46
|
+
client.onDisconnect(() => {
|
|
47
|
+
if (handle.onDisconnect)
|
|
48
|
+
handle.onDisconnect();
|
|
49
|
+
});
|
|
50
|
+
return handle;
|
|
51
|
+
}
|
|
52
|
+
/**
|
|
53
|
+
* Low-level RPC client over a connected Unix socket.
|
|
54
|
+
*/
|
|
55
|
+
export class ClusterClient {
|
|
56
|
+
socket;
|
|
57
|
+
nextId = 1;
|
|
58
|
+
pending = new Map();
|
|
59
|
+
decoder = new MessageDecoder();
|
|
60
|
+
closed = false;
|
|
61
|
+
from;
|
|
62
|
+
pushHandler;
|
|
63
|
+
constructor(socket, identity) {
|
|
64
|
+
this.socket = socket;
|
|
65
|
+
this.from = { pid: process.pid, ...identity };
|
|
66
|
+
socket.on("data", (chunk) => {
|
|
67
|
+
const messages = this.decoder.feed(chunk);
|
|
68
|
+
for (const msg of messages) {
|
|
69
|
+
this.handleIncoming(msg);
|
|
70
|
+
}
|
|
71
|
+
});
|
|
72
|
+
socket.on("error", () => this.abortAll("socket error"));
|
|
73
|
+
socket.on("close", () => {
|
|
74
|
+
clusterLog.info("connection to leader closed", { pending: this.pending.size });
|
|
75
|
+
this.abortAll("socket closed");
|
|
76
|
+
this.disconnectCallback?.();
|
|
77
|
+
});
|
|
78
|
+
}
|
|
79
|
+
disconnectCallback;
|
|
80
|
+
onDisconnect(cb) {
|
|
81
|
+
this.disconnectCallback = cb;
|
|
82
|
+
}
|
|
83
|
+
onPush(handler) {
|
|
84
|
+
this.pushHandler = handler;
|
|
85
|
+
}
|
|
86
|
+
handleIncoming(msg) {
|
|
87
|
+
if (isPush(msg)) {
|
|
88
|
+
this.pushHandler?.(msg.method, msg.payload);
|
|
89
|
+
return;
|
|
90
|
+
}
|
|
91
|
+
const res = msg;
|
|
92
|
+
const handler = this.pending.get(res.id);
|
|
93
|
+
if (handler) {
|
|
94
|
+
this.pending.delete(res.id);
|
|
95
|
+
if (res.error) {
|
|
96
|
+
handler.reject(new Error(res.error));
|
|
97
|
+
}
|
|
98
|
+
else {
|
|
99
|
+
handler.resolve(res.result);
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
call(method, params, timeoutMs = 30_000) {
|
|
104
|
+
if (this.closed)
|
|
105
|
+
return Promise.reject(new Error("client closed"));
|
|
106
|
+
const id = this.nextId++;
|
|
107
|
+
return new Promise((resolve, reject) => {
|
|
108
|
+
const timer = setTimeout(() => {
|
|
109
|
+
this.pending.delete(id);
|
|
110
|
+
reject(new Error(`cluster RPC timeout: ${method}`));
|
|
111
|
+
}, timeoutMs);
|
|
112
|
+
this.pending.set(id, {
|
|
113
|
+
resolve: (v) => {
|
|
114
|
+
clearTimeout(timer);
|
|
115
|
+
resolve(v);
|
|
116
|
+
},
|
|
117
|
+
reject: (e) => {
|
|
118
|
+
clearTimeout(timer);
|
|
119
|
+
reject(e);
|
|
120
|
+
},
|
|
121
|
+
});
|
|
122
|
+
this.socket.write(encodeMessage({ id, version: CLUSTER_PROTOCOL_VERSION, method, params, from: this.from }));
|
|
123
|
+
});
|
|
124
|
+
}
|
|
125
|
+
close() {
|
|
126
|
+
clusterLog.debug("follower client closing");
|
|
127
|
+
this.closed = true;
|
|
128
|
+
this.socket.destroy();
|
|
129
|
+
this.abortAll("client closed");
|
|
130
|
+
}
|
|
131
|
+
abortAll(reason) {
|
|
132
|
+
for (const [, handler] of this.pending) {
|
|
133
|
+
handler.reject(new Error(reason));
|
|
134
|
+
}
|
|
135
|
+
this.pending.clear();
|
|
136
|
+
}
|
|
137
|
+
}
|
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Unified cluster entry point.
|
|
3
|
+
*
|
|
4
|
+
* Initializes the cluster (leader or follower), returning a single ClusterService
|
|
5
|
+
* that provides both embedding and message bus capabilities.
|
|
6
|
+
*/
|
|
7
|
+
import { tryBecomeLeader } from "./leader.js";
|
|
8
|
+
import { tryFollowLeader } from "./follower.js";
|
|
9
|
+
import { createEmbeddingHandler } from "./services/embedding.js";
|
|
10
|
+
import { createBusHandler } from "./services/bus.js";
|
|
11
|
+
import { createLeaderBus } from "./services/bus.js";
|
|
12
|
+
import { connectBus } from "./bus.js";
|
|
13
|
+
import { clusterLog, setLogContext } from "./log.js";
|
|
14
|
+
let activeService = null;
|
|
15
|
+
let cachedParams = null;
|
|
16
|
+
let reElecting = null;
|
|
17
|
+
export function getActiveClusterService() {
|
|
18
|
+
return activeService;
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* Initialize the cluster. Tries follower first, falls back to leader.
|
|
22
|
+
* Returns a ClusterService with both embedding and bus.
|
|
23
|
+
*/
|
|
24
|
+
export async function initCluster(params) {
|
|
25
|
+
if (activeService)
|
|
26
|
+
return activeService;
|
|
27
|
+
cachedParams = params;
|
|
28
|
+
const { agentId, getSessionId } = params;
|
|
29
|
+
setLogContext(() => ({
|
|
30
|
+
...(agentId ? { agentId } : {}),
|
|
31
|
+
...(getSessionId ? { sessionId: getSessionId() } : {}),
|
|
32
|
+
}));
|
|
33
|
+
const modelId = params.modelPath ?? "local";
|
|
34
|
+
const identity = {
|
|
35
|
+
...(agentId ? { agentId } : {}),
|
|
36
|
+
...(getSessionId ? { sessionId: getSessionId() } : {}),
|
|
37
|
+
};
|
|
38
|
+
// Try follower first — avoids loading model if leader exists
|
|
39
|
+
clusterLog.info("resolving cluster role");
|
|
40
|
+
const follower = await tryFollowLeader(modelId, identity);
|
|
41
|
+
if (follower) {
|
|
42
|
+
clusterLog.info("resolved as follower");
|
|
43
|
+
activeService = await makeFollowerService(follower, agentId, params);
|
|
44
|
+
return activeService;
|
|
45
|
+
}
|
|
46
|
+
// No leader — become one
|
|
47
|
+
activeService = await makeLeaderService(params);
|
|
48
|
+
if (activeService)
|
|
49
|
+
return activeService;
|
|
50
|
+
// Race condition: another process just became leader between our two attempts
|
|
51
|
+
const retryFollower = await tryFollowLeader(modelId, identity);
|
|
52
|
+
if (retryFollower) {
|
|
53
|
+
clusterLog.info("resolved as follower (retry)");
|
|
54
|
+
activeService = await makeFollowerService(retryFollower, agentId, params);
|
|
55
|
+
return activeService;
|
|
56
|
+
}
|
|
57
|
+
// Fallback: standalone (no cluster)
|
|
58
|
+
clusterLog.warn("cluster unavailable, loading model directly (standalone)");
|
|
59
|
+
const { createLocalEmbeddingProvider } = await import("../memory/embeddings/local.js");
|
|
60
|
+
const provider = await createLocalEmbeddingProvider(params);
|
|
61
|
+
activeService = {
|
|
62
|
+
role: "leader",
|
|
63
|
+
embedding: provider,
|
|
64
|
+
bus: null,
|
|
65
|
+
async close() {
|
|
66
|
+
if (provider.dispose)
|
|
67
|
+
await provider.dispose();
|
|
68
|
+
activeService = null;
|
|
69
|
+
},
|
|
70
|
+
};
|
|
71
|
+
return activeService;
|
|
72
|
+
}
|
|
73
|
+
export async function closeCluster() {
|
|
74
|
+
if (activeService) {
|
|
75
|
+
clusterLog.info("closing cluster", { role: activeService.role });
|
|
76
|
+
await activeService.close();
|
|
77
|
+
activeService = null;
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
// --- Leader setup ---
|
|
81
|
+
async function makeLeaderService(params) {
|
|
82
|
+
const { createLocalEmbeddingProvider } = await import("../memory/embeddings/local.js");
|
|
83
|
+
clusterLog.info("trying to become leader, loading embedding model");
|
|
84
|
+
const provider = await createLocalEmbeddingProvider(params);
|
|
85
|
+
const embeddingHandler = createEmbeddingHandler(provider);
|
|
86
|
+
const busHandler = createBusHandler();
|
|
87
|
+
const leader = await tryBecomeLeader([embeddingHandler, busHandler], async () => { if (provider.dispose)
|
|
88
|
+
await provider.dispose(); });
|
|
89
|
+
if (!leader) {
|
|
90
|
+
// Lost election — dispose the model we just loaded
|
|
91
|
+
if (provider.dispose)
|
|
92
|
+
await provider.dispose();
|
|
93
|
+
return null;
|
|
94
|
+
}
|
|
95
|
+
clusterLog.info("resolved as leader");
|
|
96
|
+
const bus = createLeaderBus(params.agentId, params.displayName);
|
|
97
|
+
return {
|
|
98
|
+
role: "leader",
|
|
99
|
+
embedding: provider,
|
|
100
|
+
bus,
|
|
101
|
+
async close() {
|
|
102
|
+
bus.close();
|
|
103
|
+
await leader.close();
|
|
104
|
+
activeService = null;
|
|
105
|
+
},
|
|
106
|
+
};
|
|
107
|
+
}
|
|
108
|
+
// --- Follower setup ---
|
|
109
|
+
async function makeFollowerService(handle, agentId, params) {
|
|
110
|
+
const inner = handle.provider;
|
|
111
|
+
// Proactive re-election on leader disconnect
|
|
112
|
+
handle.onDisconnect = () => {
|
|
113
|
+
if (reElecting || !activeService || activeService.role !== "follower")
|
|
114
|
+
return;
|
|
115
|
+
clusterLog.info("leader disconnected, proactively re-electing");
|
|
116
|
+
activeService = null;
|
|
117
|
+
reElecting = initCluster(cachedParams)
|
|
118
|
+
.then(() => { reElecting = null; })
|
|
119
|
+
.catch((err) => {
|
|
120
|
+
reElecting = null;
|
|
121
|
+
clusterLog.error("proactive re-election failed", { error: String(err) });
|
|
122
|
+
});
|
|
123
|
+
};
|
|
124
|
+
async function withReconnect(op) {
|
|
125
|
+
try {
|
|
126
|
+
return await op(inner);
|
|
127
|
+
}
|
|
128
|
+
catch (err) {
|
|
129
|
+
if (!isConnectionError(err))
|
|
130
|
+
throw err;
|
|
131
|
+
if (reElecting) {
|
|
132
|
+
clusterLog.debug("waiting for proactive re-election");
|
|
133
|
+
await reElecting;
|
|
134
|
+
}
|
|
135
|
+
else {
|
|
136
|
+
clusterLog.warn("leader lost, re-electing on demand", { error: String(err) });
|
|
137
|
+
handle.close();
|
|
138
|
+
activeService = null;
|
|
139
|
+
await initCluster(cachedParams);
|
|
140
|
+
}
|
|
141
|
+
if (!activeService)
|
|
142
|
+
throw err;
|
|
143
|
+
return op(activeService.embedding);
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
const embedding = {
|
|
147
|
+
id: inner.id,
|
|
148
|
+
model: inner.model,
|
|
149
|
+
embedQuery: (text) => withReconnect((p) => p.embedQuery(text)),
|
|
150
|
+
embedBatch: (texts) => withReconnect((p) => p.embedBatch(texts)),
|
|
151
|
+
dispose: async () => handle.close(),
|
|
152
|
+
};
|
|
153
|
+
// Connect bus over the same socket
|
|
154
|
+
let bus = null;
|
|
155
|
+
try {
|
|
156
|
+
bus = await connectBus(handle.client, agentId, params.displayName);
|
|
157
|
+
}
|
|
158
|
+
catch (err) {
|
|
159
|
+
clusterLog.warn("bus connect failed", { error: String(err) });
|
|
160
|
+
}
|
|
161
|
+
return {
|
|
162
|
+
role: "follower",
|
|
163
|
+
embedding,
|
|
164
|
+
bus,
|
|
165
|
+
async close() {
|
|
166
|
+
if (bus)
|
|
167
|
+
bus.close();
|
|
168
|
+
handle.close();
|
|
169
|
+
activeService = null;
|
|
170
|
+
},
|
|
171
|
+
};
|
|
172
|
+
}
|
|
173
|
+
function isConnectionError(err) {
|
|
174
|
+
if (!(err instanceof Error))
|
|
175
|
+
return false;
|
|
176
|
+
const msg = err.message;
|
|
177
|
+
return msg.includes("socket closed")
|
|
178
|
+
|| msg.includes("socket error")
|
|
179
|
+
|| msg.includes("client closed")
|
|
180
|
+
|| msg.includes("EPIPE")
|
|
181
|
+
|| msg.includes("ECONNRESET");
|
|
182
|
+
}
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
import { tryListen, cleanupSocket } from "./socket.js";
|
|
2
|
+
import { CLUSTER_PROTOCOL_VERSION, encodeMessage, MessageDecoder, } from "./protocol.js";
|
|
3
|
+
import { clusterLog } from "./log.js";
|
|
4
|
+
/**
|
|
5
|
+
* Try to become the cluster leader.
|
|
6
|
+
*
|
|
7
|
+
* Registers the given service handlers and dispatches incoming requests.
|
|
8
|
+
* Returns a LeaderHandle on success, or null if another leader is already running.
|
|
9
|
+
*/
|
|
10
|
+
export async function tryBecomeLeader(services, onClose) {
|
|
11
|
+
const server = await tryListen();
|
|
12
|
+
if (!server) {
|
|
13
|
+
clusterLog.debug("leader election lost (socket in use)");
|
|
14
|
+
return null;
|
|
15
|
+
}
|
|
16
|
+
// Build method → handler dispatch table
|
|
17
|
+
const dispatch = new Map();
|
|
18
|
+
for (const svc of services) {
|
|
19
|
+
for (const m of svc.methods)
|
|
20
|
+
dispatch.set(m, svc);
|
|
21
|
+
}
|
|
22
|
+
clusterLog.info("became leader", { methods: [...dispatch.keys()] });
|
|
23
|
+
server.on("connection", (socket) => {
|
|
24
|
+
clusterLog.info("follower connected", { remote: socket.remoteAddress ?? "unknown" });
|
|
25
|
+
handleConnection(socket, dispatch, services);
|
|
26
|
+
});
|
|
27
|
+
const close = async () => {
|
|
28
|
+
clusterLog.info("leader shutting down");
|
|
29
|
+
server.close();
|
|
30
|
+
cleanupSocket();
|
|
31
|
+
if (onClose)
|
|
32
|
+
await onClose();
|
|
33
|
+
};
|
|
34
|
+
const onExit = () => { server.close(); cleanupSocket(); };
|
|
35
|
+
process.on("exit", onExit);
|
|
36
|
+
process.on("SIGINT", onExit);
|
|
37
|
+
process.on("SIGTERM", onExit);
|
|
38
|
+
return { close };
|
|
39
|
+
}
|
|
40
|
+
function handleConnection(socket, dispatch, services) {
|
|
41
|
+
const decoder = new MessageDecoder();
|
|
42
|
+
let registeredId;
|
|
43
|
+
const ctx = {
|
|
44
|
+
socket,
|
|
45
|
+
respond: () => { }, // overwritten per request
|
|
46
|
+
getRegisteredId: () => registeredId,
|
|
47
|
+
setRegisteredId: (id) => { registeredId = id; },
|
|
48
|
+
};
|
|
49
|
+
socket.on("data", (chunk) => {
|
|
50
|
+
for (const msg of decoder.feed(chunk)) {
|
|
51
|
+
void routeRequest(msg, ctx, dispatch);
|
|
52
|
+
}
|
|
53
|
+
});
|
|
54
|
+
const cleanup = () => {
|
|
55
|
+
if (registeredId) {
|
|
56
|
+
for (const svc of services)
|
|
57
|
+
svc.onDisconnect?.(registeredId);
|
|
58
|
+
registeredId = undefined;
|
|
59
|
+
}
|
|
60
|
+
};
|
|
61
|
+
socket.on("error", (err) => {
|
|
62
|
+
clusterLog.debug("follower socket error", { error: String(err) });
|
|
63
|
+
cleanup();
|
|
64
|
+
});
|
|
65
|
+
socket.on("close", cleanup);
|
|
66
|
+
}
|
|
67
|
+
async function routeRequest(req, baseCtx, dispatch) {
|
|
68
|
+
// Per-request respond bound to this req.id
|
|
69
|
+
const respond = (res) => {
|
|
70
|
+
if (!baseCtx.socket.destroyed) {
|
|
71
|
+
baseCtx.socket.write(encodeMessage({ id: req.id, ...res }));
|
|
72
|
+
}
|
|
73
|
+
};
|
|
74
|
+
const ctx = { ...baseCtx, respond };
|
|
75
|
+
if (req.version !== CLUSTER_PROTOCOL_VERSION) {
|
|
76
|
+
clusterLog.warn("protocol version mismatch", { expected: CLUSTER_PROTOCOL_VERSION, got: req.version });
|
|
77
|
+
respond({ error: `unsupported protocol version: ${req.version}` });
|
|
78
|
+
return;
|
|
79
|
+
}
|
|
80
|
+
if (req.method === "ping") {
|
|
81
|
+
respond({ result: "pong" });
|
|
82
|
+
return;
|
|
83
|
+
}
|
|
84
|
+
const handler = dispatch.get(req.method);
|
|
85
|
+
if (!handler) {
|
|
86
|
+
clusterLog.warn("unknown method", { method: req.method, reqId: req.id });
|
|
87
|
+
respond({ error: `unknown method: ${req.method}` });
|
|
88
|
+
return;
|
|
89
|
+
}
|
|
90
|
+
try {
|
|
91
|
+
await handler.handle(req, ctx);
|
|
92
|
+
}
|
|
93
|
+
catch (err) {
|
|
94
|
+
clusterLog.error("handler error", { method: req.method, reqId: req.id, error: err instanceof Error ? err.message : String(err) });
|
|
95
|
+
respond({ error: err instanceof Error ? err.message : String(err) });
|
|
96
|
+
}
|
|
97
|
+
}
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import { appendFile, mkdir } from "node:fs/promises";
|
|
2
|
+
import { homedir } from "node:os";
|
|
3
|
+
import { join } from "node:path";
|
|
4
|
+
let resolveContext = () => ({});
|
|
5
|
+
/** Called internally by cluster init — not exported to consumers. */
|
|
6
|
+
export function setLogContext(resolve) {
|
|
7
|
+
resolveContext = resolve;
|
|
8
|
+
}
|
|
9
|
+
const LOGS_DIR = join(homedir(), ".mono-pilot", "logs");
|
|
10
|
+
function getLogPath(date = new Date()) {
|
|
11
|
+
const stamp = date.toISOString().slice(0, 10);
|
|
12
|
+
return join(LOGS_DIR, `cluster.${stamp}.log`);
|
|
13
|
+
}
|
|
14
|
+
function formatLine(level, message, data) {
|
|
15
|
+
const timestamp = new Date().toISOString();
|
|
16
|
+
const merged = { pid: process.pid, ...resolveContext(), ...data };
|
|
17
|
+
const payload = Object.keys(merged).length > 0 ? ` ${safeJson(merged)}` : "";
|
|
18
|
+
return `${timestamp} [${level}] ${message}${payload}`;
|
|
19
|
+
}
|
|
20
|
+
function safeJson(data) {
|
|
21
|
+
try {
|
|
22
|
+
return JSON.stringify(data);
|
|
23
|
+
}
|
|
24
|
+
catch {
|
|
25
|
+
return "[unserializable]";
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
let writeQueue = Promise.resolve();
|
|
29
|
+
function enqueue(level, message, data) {
|
|
30
|
+
const line = formatLine(level, message, data);
|
|
31
|
+
writeQueue = writeQueue.then(async () => {
|
|
32
|
+
await mkdir(LOGS_DIR, { recursive: true });
|
|
33
|
+
await appendFile(getLogPath(), `${line}\n`, { encoding: "utf-8" });
|
|
34
|
+
}).catch(() => { });
|
|
35
|
+
}
|
|
36
|
+
export const clusterLog = {
|
|
37
|
+
debug(message, data) {
|
|
38
|
+
enqueue("debug", message, data);
|
|
39
|
+
},
|
|
40
|
+
info(message, data) {
|
|
41
|
+
enqueue("info", message, data);
|
|
42
|
+
},
|
|
43
|
+
warn(message, data) {
|
|
44
|
+
enqueue("warn", message, data);
|
|
45
|
+
},
|
|
46
|
+
error(message, data) {
|
|
47
|
+
enqueue("error", message, data);
|
|
48
|
+
},
|
|
49
|
+
};
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
/** Wire protocol for cluster IPC over Unix domain socket. */
|
|
2
|
+
export const CLUSTER_PROTOCOL_VERSION = 2;
|
|
3
|
+
export function isPush(msg) {
|
|
4
|
+
return "type" in msg && msg.type === "push";
|
|
5
|
+
}
|
|
6
|
+
// --- Serialize / Deserialize (length-prefixed JSON over stream) ---
|
|
7
|
+
/**
|
|
8
|
+
* Encode a message as a length-prefixed buffer: [4-byte LE length][JSON payload].
|
|
9
|
+
*/
|
|
10
|
+
export function encodeMessage(msg) {
|
|
11
|
+
const json = Buffer.from(JSON.stringify(msg), "utf8");
|
|
12
|
+
const header = Buffer.alloc(4);
|
|
13
|
+
header.writeUInt32LE(json.length, 0);
|
|
14
|
+
return Buffer.concat([header, json]);
|
|
15
|
+
}
|
|
16
|
+
/**
|
|
17
|
+
* Streaming decoder: feed chunks, get back parsed messages.
|
|
18
|
+
*/
|
|
19
|
+
export class MessageDecoder {
|
|
20
|
+
buf = Buffer.alloc(0);
|
|
21
|
+
feed(chunk) {
|
|
22
|
+
this.buf = Buffer.concat([this.buf, chunk]);
|
|
23
|
+
const messages = [];
|
|
24
|
+
while (this.buf.length >= 4) {
|
|
25
|
+
const len = this.buf.readUInt32LE(0);
|
|
26
|
+
if (this.buf.length < 4 + len)
|
|
27
|
+
break;
|
|
28
|
+
const json = this.buf.subarray(4, 4 + len).toString("utf8");
|
|
29
|
+
this.buf = this.buf.subarray(4 + len);
|
|
30
|
+
messages.push(JSON.parse(json));
|
|
31
|
+
}
|
|
32
|
+
return messages;
|
|
33
|
+
}
|
|
34
|
+
}
|