@sesamespace/hivemind 0.2.0 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/PLANNING.md +383 -0
- package/TASKS.md +60 -0
- package/install.sh +187 -0
- package/npm-package.json +28 -0
- package/package.json +13 -20
- package/packages/cli/package.json +23 -0
- package/{dist/chunk-DVR2KBL7.js → packages/cli/src/commands/fleet.ts} +50 -30
- package/packages/cli/src/commands/init.ts +230 -0
- package/{dist/chunk-MBS5A6BZ.js → packages/cli/src/commands/service.ts} +51 -42
- package/{dist/chunk-RNK5Q5GR.js → packages/cli/src/commands/start.ts} +12 -14
- package/{dist/main.js → packages/cli/src/main.ts} +12 -18
- package/packages/cli/tsconfig.json +8 -0
- package/packages/memory/Cargo.lock +6480 -0
- package/packages/memory/Cargo.toml +21 -0
- package/packages/memory/src/context.rs +179 -0
- package/packages/memory/src/embeddings.rs +51 -0
- package/packages/memory/src/main.rs +626 -0
- package/packages/memory/src/promotion.rs +637 -0
- package/packages/memory/src/scoring.rs +131 -0
- package/packages/memory/src/store.rs +460 -0
- package/packages/memory/src/tasks.rs +321 -0
- package/packages/runtime/package.json +24 -0
- package/packages/runtime/src/__tests__/fleet-integration.test.ts +235 -0
- package/packages/runtime/src/__tests__/fleet.test.ts +207 -0
- package/packages/runtime/src/__tests__/integration.test.ts +434 -0
- package/packages/runtime/src/agent.ts +255 -0
- package/packages/runtime/src/config.ts +130 -0
- package/packages/runtime/src/context.ts +192 -0
- package/packages/runtime/src/fleet/fleet-manager.ts +399 -0
- package/packages/runtime/src/fleet/memory-sync.ts +362 -0
- package/packages/runtime/src/fleet/primary-client.ts +285 -0
- package/packages/runtime/src/fleet/worker-protocol.ts +158 -0
- package/packages/runtime/src/fleet/worker-server.ts +246 -0
- package/packages/runtime/src/index.ts +57 -0
- package/packages/runtime/src/llm-client.ts +65 -0
- package/packages/runtime/src/memory-client.ts +309 -0
- package/packages/runtime/src/pipeline.ts +151 -0
- package/packages/runtime/src/prompt.ts +173 -0
- package/packages/runtime/src/sesame.ts +174 -0
- package/{dist/start.js → packages/runtime/src/start.ts} +7 -9
- package/packages/runtime/src/task-engine.ts +113 -0
- package/packages/runtime/src/worker.ts +339 -0
- package/packages/runtime/tsconfig.json +8 -0
- package/pnpm-workspace.yaml +2 -0
- package/run-aidan.sh +23 -0
- package/scripts/bootstrap.sh +196 -0
- package/scripts/build-npm.sh +94 -0
- package/scripts/com.hivemind.agent.plist +44 -0
- package/scripts/com.hivemind.memory.plist +31 -0
- package/tsconfig.json +22 -0
- package/tsup.config.ts +28 -0
- package/dist/chunk-2I2O6X5D.js +0 -1408
- package/dist/chunk-2I2O6X5D.js.map +0 -1
- package/dist/chunk-DVR2KBL7.js.map +0 -1
- package/dist/chunk-MBS5A6BZ.js.map +0 -1
- package/dist/chunk-NVJ424TB.js +0 -731
- package/dist/chunk-NVJ424TB.js.map +0 -1
- package/dist/chunk-RNK5Q5GR.js.map +0 -1
- package/dist/chunk-XNOWVLXD.js +0 -160
- package/dist/chunk-XNOWVLXD.js.map +0 -1
- package/dist/commands/fleet.js +0 -9
- package/dist/commands/fleet.js.map +0 -1
- package/dist/commands/init.js +0 -7
- package/dist/commands/init.js.map +0 -1
- package/dist/commands/service.js +0 -7
- package/dist/commands/service.js.map +0 -1
- package/dist/commands/start.js +0 -9
- package/dist/commands/start.js.map +0 -1
- package/dist/index.js +0 -41
- package/dist/index.js.map +0 -1
- package/dist/main.js.map +0 -1
- package/dist/start.js.map +0 -1
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
import { SesameClient as SesameSDK } from "@sesamespace/sdk";
|
|
2
|
+
import type { SesameConfig } from "./config.js";
|
|
3
|
+
|
|
4
|
+
export interface SesameMessage {
|
|
5
|
+
id: string;
|
|
6
|
+
channelId: string;
|
|
7
|
+
channelKind: "dm" | "group" | "topic";
|
|
8
|
+
content: string;
|
|
9
|
+
author: {
|
|
10
|
+
id: string;
|
|
11
|
+
handle: string;
|
|
12
|
+
};
|
|
13
|
+
timestamp: string;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
type MessageHandler = (message: SesameMessage) => void | Promise<void>;
|
|
17
|
+
|
|
18
|
+
interface ChannelInfo {
|
|
19
|
+
kind: "dm" | "group" | "topic";
|
|
20
|
+
name?: string;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
export class SesameClient {
|
|
24
|
+
private config: SesameConfig;
|
|
25
|
+
private sdk: SesameSDK;
|
|
26
|
+
private messageHandler: MessageHandler | null = null;
|
|
27
|
+
private agentId: string | null = null;
|
|
28
|
+
private channels: Map<string, ChannelInfo> = new Map();
|
|
29
|
+
private typingIntervals: Map<string, ReturnType<typeof setInterval>> = new Map();
|
|
30
|
+
|
|
31
|
+
constructor(config: SesameConfig) {
|
|
32
|
+
this.config = config;
|
|
33
|
+
this.sdk = new SesameSDK({
|
|
34
|
+
apiUrl: config.api_url.replace(/\/api\/v1$/, ""),
|
|
35
|
+
wsUrl: config.ws_url,
|
|
36
|
+
apiKey: config.api_key,
|
|
37
|
+
});
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
onMessage(handler: MessageHandler): void {
|
|
41
|
+
this.messageHandler = handler;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
async connect(): Promise<void> {
|
|
45
|
+
const manifest = await this.sdk.getManifest();
|
|
46
|
+
this.agentId = manifest.agent.id;
|
|
47
|
+
console.log(`[sesame] Authenticated as ${manifest.agent.handle} (${this.agentId})`);
|
|
48
|
+
|
|
49
|
+
// Cache channel info
|
|
50
|
+
for (const ch of manifest.channels) {
|
|
51
|
+
this.channels.set(ch.id, { kind: ch.kind as ChannelInfo["kind"], name: ch.name ?? undefined });
|
|
52
|
+
const label = ch.name || ch.id.slice(0, 8);
|
|
53
|
+
console.log(`[sesame] Channel: ${label} (${ch.kind})`);
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
// Set presence to online
|
|
57
|
+
this.updatePresence("online", { emoji: "🟢" });
|
|
58
|
+
|
|
59
|
+
// Listen for message events
|
|
60
|
+
this.sdk.on("message", (event: any) => {
|
|
61
|
+
const msg = event.data || event.message || event;
|
|
62
|
+
const senderId = msg.senderId || msg.sender?.id;
|
|
63
|
+
|
|
64
|
+
// Ignore our own messages
|
|
65
|
+
if (senderId === this.agentId) return;
|
|
66
|
+
|
|
67
|
+
if (!this.messageHandler || !msg.content) return;
|
|
68
|
+
|
|
69
|
+
const channelInfo = this.channels.get(msg.channelId);
|
|
70
|
+
|
|
71
|
+
this.messageHandler({
|
|
72
|
+
id: msg.id || "unknown",
|
|
73
|
+
channelId: msg.channelId || "unknown",
|
|
74
|
+
channelKind: channelInfo?.kind || "dm",
|
|
75
|
+
content: msg.content,
|
|
76
|
+
author: {
|
|
77
|
+
id: senderId || "unknown",
|
|
78
|
+
handle: msg.senderHandle || msg.metadata?.senderHandle || "unknown",
|
|
79
|
+
},
|
|
80
|
+
timestamp: msg.createdAt || new Date().toISOString(),
|
|
81
|
+
});
|
|
82
|
+
});
|
|
83
|
+
|
|
84
|
+
await this.sdk.connect();
|
|
85
|
+
console.log("[sesame] WebSocket connected");
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
// ── Typing Indicators ──
|
|
89
|
+
|
|
90
|
+
/**
|
|
91
|
+
* Start sending typing indicators for a channel.
|
|
92
|
+
* Sends immediately, then every 2.5s until stopTyping() is called.
|
|
93
|
+
*/
|
|
94
|
+
startTyping(channelId: string): void {
|
|
95
|
+
// Don't double-start
|
|
96
|
+
if (this.typingIntervals.has(channelId)) return;
|
|
97
|
+
|
|
98
|
+
this.sdk.sendTyping(channelId);
|
|
99
|
+
const interval = setInterval(() => {
|
|
100
|
+
this.sdk.sendTyping(channelId);
|
|
101
|
+
}, 2500);
|
|
102
|
+
this.typingIntervals.set(channelId, interval);
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
/**
|
|
106
|
+
* Stop sending typing indicators for a channel.
|
|
107
|
+
*/
|
|
108
|
+
stopTyping(channelId: string): void {
|
|
109
|
+
const interval = this.typingIntervals.get(channelId);
|
|
110
|
+
if (interval) {
|
|
111
|
+
clearInterval(interval);
|
|
112
|
+
this.typingIntervals.delete(channelId);
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
/**
|
|
117
|
+
* Stop all active typing indicators.
|
|
118
|
+
*/
|
|
119
|
+
stopAllTyping(): void {
|
|
120
|
+
for (const [channelId, interval] of this.typingIntervals) {
|
|
121
|
+
clearInterval(interval);
|
|
122
|
+
}
|
|
123
|
+
this.typingIntervals.clear();
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
// ── Presence ──
|
|
127
|
+
|
|
128
|
+
/**
|
|
129
|
+
* Update agent presence/status.
|
|
130
|
+
* @param status - "online" | "thinking" | "working" | "idle" | "offline"
|
|
131
|
+
* @param options - Optional detail text, progress (0-100), emoji
|
|
132
|
+
*/
|
|
133
|
+
updatePresence(status: string, options?: { detail?: string; progress?: number; emoji?: string }): void {
|
|
134
|
+
// Use updatePresence if available (SDK >= 0.3), otherwise fall back to raw WS
|
|
135
|
+
if (typeof (this.sdk as any).updatePresence === "function") {
|
|
136
|
+
(this.sdk as any).updatePresence(status, options);
|
|
137
|
+
} else {
|
|
138
|
+
// Direct WS send for older SDK versions
|
|
139
|
+
const ws = (this.sdk as any).ws;
|
|
140
|
+
if (ws?.readyState === 1) {
|
|
141
|
+
ws.send(JSON.stringify({ type: "status", status, ...options }));
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
// ── Read Receipts ──
|
|
147
|
+
|
|
148
|
+
/**
|
|
149
|
+
* Mark a channel as read up to a given sequence number.
|
|
150
|
+
*/
|
|
151
|
+
async markRead(channelId: string, seq: number): Promise<void> {
|
|
152
|
+
await this.sdk.markRead(channelId, seq);
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
// ── Messages ──
|
|
156
|
+
|
|
157
|
+
async sendMessage(channelId: string, content: string): Promise<void> {
|
|
158
|
+
await this.sdk.sendMessage(channelId, { content });
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
getAgentId(): string | null {
|
|
162
|
+
return this.agentId;
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
getChannelInfo(channelId: string): ChannelInfo | undefined {
|
|
166
|
+
return this.channels.get(channelId);
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
disconnect(): void {
|
|
170
|
+
this.stopAllTyping();
|
|
171
|
+
this.updatePresence("offline");
|
|
172
|
+
this.sdk.disconnect();
|
|
173
|
+
}
|
|
174
|
+
}
|
|
@@ -1,13 +1,12 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
|
-
import {
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
startWorker
|
|
6
|
-
} from "./chunk-2I2O6X5D.js";
|
|
2
|
+
import { loadConfig } from "./config.js";
|
|
3
|
+
import { startPipeline } from "./pipeline.js";
|
|
4
|
+
import { startWorker } from "./worker.js";
|
|
7
5
|
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
6
|
+
const configPath = process.argv[2] || "config/default.toml";
|
|
7
|
+
const config = loadConfig(configPath);
|
|
8
|
+
|
|
9
|
+
// If worker mode is enabled, start as a fleet worker
|
|
11
10
|
if (config.worker?.enabled) {
|
|
12
11
|
startWorker(config).catch((err) => {
|
|
13
12
|
console.error("[hivemind] Worker fatal:", err);
|
|
@@ -19,4 +18,3 @@ if (config.worker?.enabled) {
|
|
|
19
18
|
process.exit(1);
|
|
20
19
|
});
|
|
21
20
|
}
|
|
22
|
-
//# sourceMappingURL=start.js.map
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
import type { MemoryClient, TaskRecord, TaskInput } from "./memory-client.js";
|
|
2
|
+
|
|
3
|
+
export type TaskStatus = "planned" | "active" | "complete" | "archived";
|
|
4
|
+
|
|
5
|
+
export interface TaskEngineOptions {
|
|
6
|
+
contextName: string;
|
|
7
|
+
memory: MemoryClient;
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
export class TaskEngine {
|
|
11
|
+
private contextName: string;
|
|
12
|
+
private memory: MemoryClient;
|
|
13
|
+
|
|
14
|
+
constructor(options: TaskEngineOptions) {
|
|
15
|
+
this.contextName = options.contextName;
|
|
16
|
+
this.memory = options.memory;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
async addTask(title: string, description: string, blockedBy: string[] = []): Promise<TaskRecord> {
|
|
20
|
+
return this.memory.createTask({
|
|
21
|
+
context_name: this.contextName,
|
|
22
|
+
title,
|
|
23
|
+
description,
|
|
24
|
+
status: "planned",
|
|
25
|
+
blocked_by: blockedBy,
|
|
26
|
+
});
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
async listTasks(status?: TaskStatus): Promise<TaskRecord[]> {
|
|
30
|
+
return this.memory.listTasks(this.contextName, status);
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
async startTask(taskId: string): Promise<TaskRecord | null> {
|
|
34
|
+
return this.memory.updateTask(taskId, { status: "active" });
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
async completeTask(taskId: string): Promise<TaskRecord | null> {
|
|
38
|
+
return this.memory.updateTask(taskId, { status: "complete" });
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
async archiveTask(taskId: string): Promise<TaskRecord | null> {
|
|
42
|
+
return this.memory.updateTask(taskId, { status: "archived" });
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
async getNextTask(): Promise<TaskRecord | null> {
|
|
46
|
+
return this.memory.getNextTask(this.contextName);
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
async pickAndStartNextTask(): Promise<TaskRecord | null> {
|
|
50
|
+
const next = await this.getNextTask();
|
|
51
|
+
if (next) {
|
|
52
|
+
return this.startTask(next.id);
|
|
53
|
+
}
|
|
54
|
+
return null;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
setContext(contextName: string): void {
|
|
58
|
+
this.contextName = contextName;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
* Parse a task command from a chat message.
|
|
63
|
+
* Returns the action and parsed parameters, or null if not a task command.
|
|
64
|
+
*/
|
|
65
|
+
static parseTaskCommand(message: string): TaskCommand | null {
|
|
66
|
+
const lower = message.trim().toLowerCase();
|
|
67
|
+
|
|
68
|
+
// "add task: <title>"
|
|
69
|
+
const addMatch = message.match(/^(?:add|create)\s+task[:\s]+(.+)/i);
|
|
70
|
+
if (addMatch) {
|
|
71
|
+
return { action: "add", title: addMatch[1].trim() };
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
// "list tasks"
|
|
75
|
+
if (/^list\s+tasks/i.test(lower)) {
|
|
76
|
+
const statusMatch = lower.match(/list\s+tasks\s+(\w+)/);
|
|
77
|
+
return { action: "list", statusFilter: statusMatch?.[1] as TaskStatus | undefined };
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
// "complete task <id>"
|
|
81
|
+
const completeMatch = message.match(/^complete\s+task\s+(\S+)/i);
|
|
82
|
+
if (completeMatch) {
|
|
83
|
+
return { action: "complete", taskId: completeMatch[1] };
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
// "start task <id>"
|
|
87
|
+
const startMatch = message.match(/^start\s+task\s+(\S+)/i);
|
|
88
|
+
if (startMatch) {
|
|
89
|
+
return { action: "start", taskId: startMatch[1] };
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
// "next task"
|
|
93
|
+
if (/^next\s+task/i.test(lower)) {
|
|
94
|
+
return { action: "next" };
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
// "archive task <id>"
|
|
98
|
+
const archiveMatch = message.match(/^archive\s+task\s+(\S+)/i);
|
|
99
|
+
if (archiveMatch) {
|
|
100
|
+
return { action: "archive", taskId: archiveMatch[1] };
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
return null;
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
export interface TaskCommand {
|
|
108
|
+
action: "add" | "list" | "complete" | "start" | "next" | "archive";
|
|
109
|
+
title?: string;
|
|
110
|
+
description?: string;
|
|
111
|
+
taskId?: string;
|
|
112
|
+
statusFilter?: TaskStatus;
|
|
113
|
+
}
|
|
@@ -0,0 +1,339 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Worker mode entry point — distributed context execution.
|
|
3
|
+
*
|
|
4
|
+
* A worker runs autonomously on a separate machine:
|
|
5
|
+
* 1. Starts its HTTP server (for Primary to manage it)
|
|
6
|
+
* 2. Registers with the Primary node
|
|
7
|
+
* 3. Connects to the local memory daemon
|
|
8
|
+
* 4. Picks up tasks from assigned context queues
|
|
9
|
+
* 5. Executes tasks using the agent loop (query memory, call LLM, store episodes)
|
|
10
|
+
* 6. Reports status back to Primary via HTTP
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
import type { HivemindConfig, WorkerModeConfig } from "./config.js";
|
|
14
|
+
import type { WorkerRegistrationRequest, WorkerRegistrationResponse, WorkerStatusReport } from "./fleet/worker-protocol.js";
|
|
15
|
+
import { PRIMARY_ROUTES } from "./fleet/worker-protocol.js";
|
|
16
|
+
import { WorkerServer } from "./fleet/worker-server.js";
|
|
17
|
+
import { Agent } from "./agent.js";
|
|
18
|
+
import { MemoryClient } from "./memory-client.js";
|
|
19
|
+
import { TaskEngine } from "./task-engine.js";
|
|
20
|
+
|
|
21
|
+
export interface WorkerRuntimeOptions {
|
|
22
|
+
config: HivemindConfig;
|
|
23
|
+
workerConfig: WorkerModeConfig;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
export class WorkerRuntime {
|
|
27
|
+
private config: HivemindConfig;
|
|
28
|
+
private workerConfig: WorkerModeConfig;
|
|
29
|
+
private server: WorkerServer;
|
|
30
|
+
private memory: MemoryClient;
|
|
31
|
+
private agent: Agent;
|
|
32
|
+
private taskPollTimer: ReturnType<typeof setInterval> | null = null;
|
|
33
|
+
private statusReportTimer: ReturnType<typeof setInterval> | null = null;
|
|
34
|
+
private registeredWorkerId: string | null = null;
|
|
35
|
+
private running = false;
|
|
36
|
+
private executing = false;
|
|
37
|
+
|
|
38
|
+
constructor(opts: WorkerRuntimeOptions) {
|
|
39
|
+
this.config = opts.config;
|
|
40
|
+
this.workerConfig = opts.workerConfig;
|
|
41
|
+
|
|
42
|
+
this.server = new WorkerServer({
|
|
43
|
+
workerId: this.workerConfig.worker_id,
|
|
44
|
+
port: this.workerConfig.worker_port,
|
|
45
|
+
maxContexts: this.workerConfig.max_contexts,
|
|
46
|
+
memoryDaemonUrl: this.config.memory.daemon_url,
|
|
47
|
+
ollamaUrl: this.config.ollama.base_url,
|
|
48
|
+
});
|
|
49
|
+
|
|
50
|
+
this.memory = new MemoryClient(this.config.memory);
|
|
51
|
+
this.agent = new Agent(this.config);
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* Start the worker: HTTP server, register with Primary, begin task loop.
|
|
56
|
+
*/
|
|
57
|
+
async start(): Promise<void> {
|
|
58
|
+
this.running = true;
|
|
59
|
+
|
|
60
|
+
// 1. Start the worker HTTP server
|
|
61
|
+
this.server.onContextAssigned((contextName, description) => {
|
|
62
|
+
console.log(`[worker] Context assigned: "${contextName}" — ${description || "(no description)"}`);
|
|
63
|
+
// Create the context in the local memory daemon
|
|
64
|
+
this.memory.createContext(contextName, description).catch((err) => {
|
|
65
|
+
console.warn(`[worker] Failed to create context "${contextName}" in daemon:`, (err as Error).message);
|
|
66
|
+
});
|
|
67
|
+
});
|
|
68
|
+
await this.server.start();
|
|
69
|
+
console.log(`[worker] HTTP server listening on port ${this.workerConfig.worker_port}`);
|
|
70
|
+
|
|
71
|
+
// 2. Verify local memory daemon is reachable
|
|
72
|
+
const memoryOk = await this.memory.healthCheck();
|
|
73
|
+
if (memoryOk) {
|
|
74
|
+
console.log("[worker] Local memory daemon connected");
|
|
75
|
+
} else {
|
|
76
|
+
console.warn("[worker] Memory daemon unreachable at", this.config.memory.daemon_url);
|
|
77
|
+
console.warn("[worker] Continuing — episodes will not be stored until daemon is available");
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
// 3. Register with Primary
|
|
81
|
+
await this.registerWithPrimary();
|
|
82
|
+
|
|
83
|
+
// 4. Start the task execution loop
|
|
84
|
+
this.startTaskLoop();
|
|
85
|
+
|
|
86
|
+
// 5. Start periodic status reporting
|
|
87
|
+
this.startStatusReporting();
|
|
88
|
+
|
|
89
|
+
console.log("[worker] Ready — waiting for context assignments");
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
/**
|
|
93
|
+
* Stop the worker gracefully.
|
|
94
|
+
*/
|
|
95
|
+
async stop(): Promise<void> {
|
|
96
|
+
this.running = false;
|
|
97
|
+
|
|
98
|
+
if (this.taskPollTimer) {
|
|
99
|
+
clearInterval(this.taskPollTimer);
|
|
100
|
+
this.taskPollTimer = null;
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
if (this.statusReportTimer) {
|
|
104
|
+
clearInterval(this.statusReportTimer);
|
|
105
|
+
this.statusReportTimer = null;
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
await this.server.stop();
|
|
109
|
+
console.log("[worker] Stopped");
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
/**
|
|
113
|
+
* Register this worker with the Primary node.
|
|
114
|
+
*/
|
|
115
|
+
async registerWithPrimary(): Promise<WorkerRegistrationResponse | null> {
|
|
116
|
+
const workerUrl = `http://localhost:${this.workerConfig.worker_port}`;
|
|
117
|
+
|
|
118
|
+
const registration: WorkerRegistrationRequest = {
|
|
119
|
+
url: workerUrl,
|
|
120
|
+
capabilities: {
|
|
121
|
+
max_contexts: this.workerConfig.max_contexts,
|
|
122
|
+
has_ollama: true,
|
|
123
|
+
has_memory_daemon: true,
|
|
124
|
+
available_models: [this.config.memory.embedding_model],
|
|
125
|
+
},
|
|
126
|
+
};
|
|
127
|
+
|
|
128
|
+
try {
|
|
129
|
+
const resp = await fetch(
|
|
130
|
+
`${this.workerConfig.primary_url}${PRIMARY_ROUTES.register}`,
|
|
131
|
+
{
|
|
132
|
+
method: "POST",
|
|
133
|
+
headers: { "Content-Type": "application/json" },
|
|
134
|
+
body: JSON.stringify(registration),
|
|
135
|
+
},
|
|
136
|
+
);
|
|
137
|
+
|
|
138
|
+
if (!resp.ok) {
|
|
139
|
+
console.error(`[worker] Registration failed: ${resp.status} ${await resp.text()}`);
|
|
140
|
+
return null;
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
const result = (await resp.json()) as WorkerRegistrationResponse;
|
|
144
|
+
this.registeredWorkerId = result.worker_id;
|
|
145
|
+
console.log(`[worker] Registered with Primary as ${result.worker_id}`);
|
|
146
|
+
return result;
|
|
147
|
+
} catch (err) {
|
|
148
|
+
console.error("[worker] Could not reach Primary at", this.workerConfig.primary_url);
|
|
149
|
+
console.error("[worker] Will continue in standalone mode — retry registration manually");
|
|
150
|
+
return null;
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
/**
|
|
155
|
+
* Start the task execution polling loop.
|
|
156
|
+
* Iterates assigned contexts and picks up tasks from each queue.
|
|
157
|
+
*/
|
|
158
|
+
startTaskLoop(): void {
|
|
159
|
+
this.taskPollTimer = setInterval(() => {
|
|
160
|
+
if (!this.running || this.executing) return;
|
|
161
|
+
this.executeNextTask().catch((err) => {
|
|
162
|
+
console.error("[worker] Task execution error:", (err as Error).message);
|
|
163
|
+
});
|
|
164
|
+
}, this.workerConfig.task_poll_interval_ms);
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
/**
|
|
168
|
+
* Find and execute the next available task across all assigned contexts.
|
|
169
|
+
*/
|
|
170
|
+
async executeNextTask(): Promise<void> {
|
|
171
|
+
const contexts = this.server.getAssignedContexts();
|
|
172
|
+
if (contexts.length === 0) return;
|
|
173
|
+
|
|
174
|
+
for (const contextName of contexts) {
|
|
175
|
+
const engine = new TaskEngine({ contextName, memory: this.memory });
|
|
176
|
+
const task = await engine.getNextTask();
|
|
177
|
+
|
|
178
|
+
if (task) {
|
|
179
|
+
this.executing = true;
|
|
180
|
+
|
|
181
|
+
try {
|
|
182
|
+
// Update worker status
|
|
183
|
+
this.server.setActiveContext(contextName);
|
|
184
|
+
this.server.setCurrentTask(task.id);
|
|
185
|
+
|
|
186
|
+
console.log(`[worker] Executing task [${task.id.slice(0, 8)}] "${task.title}" in context "${contextName}"`);
|
|
187
|
+
|
|
188
|
+
// Mark task as active
|
|
189
|
+
await engine.startTask(task.id);
|
|
190
|
+
|
|
191
|
+
// Switch agent to the correct context
|
|
192
|
+
this.agent.setContext(contextName);
|
|
193
|
+
|
|
194
|
+
// Ensure context exists in memory daemon
|
|
195
|
+
try {
|
|
196
|
+
await this.memory.createContext(contextName);
|
|
197
|
+
} catch {
|
|
198
|
+
// Context may already exist
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
// Execute the task via the agent loop:
|
|
202
|
+
// - Agent queries memory for relevant episodes
|
|
203
|
+
// - Builds prompt with context + memories
|
|
204
|
+
// - Calls LLM for a response
|
|
205
|
+
// - Stores episodes (write-through to L2)
|
|
206
|
+
// - Triggers promotion if needed
|
|
207
|
+
const taskPrompt = buildTaskPrompt(task.title, task.description);
|
|
208
|
+
const response = await this.agent.processMessage(taskPrompt);
|
|
209
|
+
|
|
210
|
+
console.log(`[worker] Task [${task.id.slice(0, 8)}] completed. Response: ${response.content.slice(0, 100)}...`);
|
|
211
|
+
|
|
212
|
+
// Mark task as complete
|
|
213
|
+
await engine.completeTask(task.id);
|
|
214
|
+
|
|
215
|
+
// Report status to Primary
|
|
216
|
+
await this.reportStatus("working", contextName, task.id);
|
|
217
|
+
} catch (err) {
|
|
218
|
+
console.error(`[worker] Task [${task.id.slice(0, 8)}] failed:`, (err as Error).message);
|
|
219
|
+
await this.reportStatus("error", contextName, task.id, (err as Error).message);
|
|
220
|
+
} finally {
|
|
221
|
+
this.executing = false;
|
|
222
|
+
this.server.setCurrentTask(null);
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
// Execute one task per poll cycle to stay responsive
|
|
226
|
+
return;
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
// No tasks found in any context — go idle
|
|
231
|
+
if (this.server.getAssignedContexts().length > 0) {
|
|
232
|
+
this.server.setActiveContext(null);
|
|
233
|
+
this.server.setCurrentTask(null);
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
/**
|
|
238
|
+
* Periodically report status back to Primary.
|
|
239
|
+
*/
|
|
240
|
+
startStatusReporting(): void {
|
|
241
|
+
this.statusReportTimer = setInterval(() => {
|
|
242
|
+
if (!this.running || !this.registeredWorkerId) return;
|
|
243
|
+
|
|
244
|
+
const contexts = this.server.getAssignedContexts();
|
|
245
|
+
if (contexts.length === 0) return;
|
|
246
|
+
|
|
247
|
+
this.reportStatus(
|
|
248
|
+
this.executing ? "working" : "idle",
|
|
249
|
+
this.executing ? contexts[0] : null,
|
|
250
|
+
null,
|
|
251
|
+
).catch(() => {});
|
|
252
|
+
}, this.workerConfig.status_report_interval_ms);
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
/**
|
|
256
|
+
* Send a status report to the Primary.
|
|
257
|
+
*/
|
|
258
|
+
async reportStatus(
|
|
259
|
+
activity: "idle" | "working" | "error",
|
|
260
|
+
currentContext: string | null,
|
|
261
|
+
currentTask: string | null,
|
|
262
|
+
error?: string,
|
|
263
|
+
): Promise<void> {
|
|
264
|
+
if (!this.registeredWorkerId) return;
|
|
265
|
+
|
|
266
|
+
const report: WorkerStatusReport = {
|
|
267
|
+
activity,
|
|
268
|
+
current_context: currentContext,
|
|
269
|
+
current_task: currentTask,
|
|
270
|
+
error,
|
|
271
|
+
};
|
|
272
|
+
|
|
273
|
+
try {
|
|
274
|
+
const statusUrl = `${this.workerConfig.primary_url}${PRIMARY_ROUTES.status(this.registeredWorkerId)}`;
|
|
275
|
+
await fetch(statusUrl, {
|
|
276
|
+
method: "POST",
|
|
277
|
+
headers: { "Content-Type": "application/json" },
|
|
278
|
+
body: JSON.stringify(report),
|
|
279
|
+
});
|
|
280
|
+
} catch {
|
|
281
|
+
// Primary unreachable — non-fatal, will retry next interval
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
// --- Accessors for testing ---
|
|
286
|
+
|
|
287
|
+
getServer(): WorkerServer {
|
|
288
|
+
return this.server;
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
getAgent(): Agent {
|
|
292
|
+
return this.agent;
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
getMemoryClient(): MemoryClient {
|
|
296
|
+
return this.memory;
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
isRunning(): boolean {
|
|
300
|
+
return this.running;
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
isExecuting(): boolean {
|
|
304
|
+
return this.executing;
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
getRegisteredWorkerId(): string | null {
|
|
308
|
+
return this.registeredWorkerId;
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
/**
|
|
313
|
+
* Build a prompt for executing a task via the agent.
|
|
314
|
+
*/
|
|
315
|
+
function buildTaskPrompt(title: string, description: string): string {
|
|
316
|
+
let prompt = `[TASK] ${title}`;
|
|
317
|
+
if (description) {
|
|
318
|
+
prompt += `\n\n${description}`;
|
|
319
|
+
}
|
|
320
|
+
prompt += "\n\nPlease work on this task. Use your memory and knowledge to provide a thorough response.";
|
|
321
|
+
return prompt;
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
/**
|
|
325
|
+
* Start a worker from a config file path (convenience entry point).
|
|
326
|
+
*/
|
|
327
|
+
export async function startWorker(config: HivemindConfig): Promise<WorkerRuntime> {
|
|
328
|
+
if (!config.worker || !config.worker.enabled) {
|
|
329
|
+
throw new Error("Worker mode is not enabled in config. Set [worker] enabled = true.");
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
const runtime = new WorkerRuntime({
|
|
333
|
+
config,
|
|
334
|
+
workerConfig: config.worker,
|
|
335
|
+
});
|
|
336
|
+
|
|
337
|
+
await runtime.start();
|
|
338
|
+
return runtime;
|
|
339
|
+
}
|
package/run-aidan.sh
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
set -euo pipefail
|
|
3
|
+
cd /tmp/hivemind
|
|
4
|
+
|
|
5
|
+
# Load secrets
|
|
6
|
+
export SESAME_API_KEY="sk_sesame_KggHivRwUPVK-zEyUrD45UAWLAD8Sj6J8lIaH0xc"
|
|
7
|
+
export LLM_API_KEY="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"
|
|
8
|
+
|
|
9
|
+
# Ensure memory daemon is running
|
|
10
|
+
if ! curl -s http://localhost:3434/health &>/dev/null; then
|
|
11
|
+
echo "[hivemind] Starting memory daemon..."
|
|
12
|
+
nohup ./packages/memory/target/release/hivemind-memory > /tmp/hivemind-memory.log 2>&1 &
|
|
13
|
+
sleep 2
|
|
14
|
+
fi
|
|
15
|
+
|
|
16
|
+
# Auto-restart loop
|
|
17
|
+
while true; do
|
|
18
|
+
echo "[hivemind] Starting Aidan at $(date)"
|
|
19
|
+
node packages/cli/dist/main.js start --config config/default.toml 2>&1 | tee -a /tmp/hivemind-aidan.log
|
|
20
|
+
EXIT_CODE=$?
|
|
21
|
+
echo "[hivemind] Aidan exited with code $EXIT_CODE at $(date) — restarting in 5s..."
|
|
22
|
+
sleep 5
|
|
23
|
+
done
|