@sesamespace/hivemind 0.2.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. package/PLANNING.md +383 -0
  2. package/TASKS.md +60 -0
  3. package/install.sh +187 -0
  4. package/npm-package.json +28 -0
  5. package/package.json +13 -20
  6. package/packages/cli/package.json +23 -0
  7. package/{dist/chunk-DVR2KBL7.js → packages/cli/src/commands/fleet.ts} +50 -30
  8. package/packages/cli/src/commands/init.ts +230 -0
  9. package/{dist/chunk-MBS5A6BZ.js → packages/cli/src/commands/service.ts} +51 -42
  10. package/{dist/chunk-RNK5Q5GR.js → packages/cli/src/commands/start.ts} +12 -14
  11. package/{dist/main.js → packages/cli/src/main.ts} +12 -18
  12. package/packages/cli/tsconfig.json +8 -0
  13. package/packages/memory/Cargo.lock +6480 -0
  14. package/packages/memory/Cargo.toml +21 -0
  15. package/packages/memory/src/context.rs +179 -0
  16. package/packages/memory/src/embeddings.rs +51 -0
  17. package/packages/memory/src/main.rs +626 -0
  18. package/packages/memory/src/promotion.rs +637 -0
  19. package/packages/memory/src/scoring.rs +131 -0
  20. package/packages/memory/src/store.rs +460 -0
  21. package/packages/memory/src/tasks.rs +321 -0
  22. package/packages/runtime/package.json +24 -0
  23. package/packages/runtime/src/__tests__/fleet-integration.test.ts +235 -0
  24. package/packages/runtime/src/__tests__/fleet.test.ts +207 -0
  25. package/packages/runtime/src/__tests__/integration.test.ts +434 -0
  26. package/packages/runtime/src/agent.ts +255 -0
  27. package/packages/runtime/src/config.ts +130 -0
  28. package/packages/runtime/src/context.ts +192 -0
  29. package/packages/runtime/src/fleet/fleet-manager.ts +399 -0
  30. package/packages/runtime/src/fleet/memory-sync.ts +362 -0
  31. package/packages/runtime/src/fleet/primary-client.ts +285 -0
  32. package/packages/runtime/src/fleet/worker-protocol.ts +158 -0
  33. package/packages/runtime/src/fleet/worker-server.ts +246 -0
  34. package/packages/runtime/src/index.ts +57 -0
  35. package/packages/runtime/src/llm-client.ts +65 -0
  36. package/packages/runtime/src/memory-client.ts +309 -0
  37. package/packages/runtime/src/pipeline.ts +151 -0
  38. package/packages/runtime/src/prompt.ts +173 -0
  39. package/packages/runtime/src/sesame.ts +174 -0
  40. package/{dist/start.js → packages/runtime/src/start.ts} +7 -9
  41. package/packages/runtime/src/task-engine.ts +113 -0
  42. package/packages/runtime/src/worker.ts +339 -0
  43. package/packages/runtime/tsconfig.json +8 -0
  44. package/pnpm-workspace.yaml +2 -0
  45. package/run-aidan.sh +23 -0
  46. package/scripts/bootstrap.sh +196 -0
  47. package/scripts/build-npm.sh +94 -0
  48. package/scripts/com.hivemind.agent.plist +44 -0
  49. package/scripts/com.hivemind.memory.plist +31 -0
  50. package/tsconfig.json +22 -0
  51. package/tsup.config.ts +28 -0
  52. package/dist/chunk-2I2O6X5D.js +0 -1408
  53. package/dist/chunk-2I2O6X5D.js.map +0 -1
  54. package/dist/chunk-DVR2KBL7.js.map +0 -1
  55. package/dist/chunk-MBS5A6BZ.js.map +0 -1
  56. package/dist/chunk-NVJ424TB.js +0 -731
  57. package/dist/chunk-NVJ424TB.js.map +0 -1
  58. package/dist/chunk-RNK5Q5GR.js.map +0 -1
  59. package/dist/chunk-XNOWVLXD.js +0 -160
  60. package/dist/chunk-XNOWVLXD.js.map +0 -1
  61. package/dist/commands/fleet.js +0 -9
  62. package/dist/commands/fleet.js.map +0 -1
  63. package/dist/commands/init.js +0 -7
  64. package/dist/commands/init.js.map +0 -1
  65. package/dist/commands/service.js +0 -7
  66. package/dist/commands/service.js.map +0 -1
  67. package/dist/commands/start.js +0 -9
  68. package/dist/commands/start.js.map +0 -1
  69. package/dist/index.js +0 -41
  70. package/dist/index.js.map +0 -1
  71. package/dist/main.js.map +0 -1
  72. package/dist/start.js.map +0 -1
@@ -0,0 +1,321 @@
1
+ use anyhow::Result;
2
+ use arrow_array::{RecordBatch, RecordBatchIterator, StringArray};
3
+ use arrow_schema::{DataType, Field, Schema};
4
+ use chrono::Utc;
5
+ use futures::stream::TryStreamExt;
6
+ use lancedb::{connection::Connection, query::ExecutableQuery, query::QueryBase, Table};
7
+ use serde::{Deserialize, Serialize};
8
+ use std::sync::Arc;
9
+
10
+ const TASKS_TABLE: &str = "tasks";
11
+
12
+ #[derive(Debug, Serialize, Deserialize, Clone)]
13
+ pub struct TaskRecord {
14
+ pub id: String,
15
+ pub context_name: String,
16
+ pub title: String,
17
+ pub description: String,
18
+ pub status: String,
19
+ pub blocked_by: String, // JSON array stored as string
20
+ pub created_at: String,
21
+ pub updated_at: String,
22
+ }
23
+
24
+ #[derive(Debug, Deserialize)]
25
+ pub struct TaskInput {
26
+ pub context_name: String,
27
+ pub title: String,
28
+ pub description: String,
29
+ #[serde(default = "default_status")]
30
+ pub status: String,
31
+ #[serde(default)]
32
+ pub blocked_by: Vec<String>,
33
+ }
34
+
35
+ fn default_status() -> String {
36
+ "planned".to_string()
37
+ }
38
+
39
+ #[derive(Debug, Deserialize)]
40
+ pub struct TaskUpdate {
41
+ pub status: Option<String>,
42
+ pub title: Option<String>,
43
+ pub description: Option<String>,
44
+ pub blocked_by: Option<Vec<String>>,
45
+ }
46
+
47
+ pub struct TaskStore {
48
+ db: Connection,
49
+ }
50
+
51
+ impl TaskStore {
52
+ pub async fn new(db: Connection) -> Result<Self> {
53
+ let store = Self { db };
54
+ store.ensure_table().await?;
55
+ Ok(store)
56
+ }
57
+
58
+ fn schema() -> Arc<Schema> {
59
+ Arc::new(Schema::new(vec![
60
+ Field::new("id", DataType::Utf8, false),
61
+ Field::new("context_name", DataType::Utf8, false),
62
+ Field::new("title", DataType::Utf8, false),
63
+ Field::new("description", DataType::Utf8, false),
64
+ Field::new("status", DataType::Utf8, false),
65
+ Field::new("blocked_by", DataType::Utf8, false),
66
+ Field::new("created_at", DataType::Utf8, false),
67
+ Field::new("updated_at", DataType::Utf8, false),
68
+ ]))
69
+ }
70
+
71
+ async fn ensure_table(&self) -> Result<()> {
72
+ let names = self.db.table_names().execute().await?;
73
+ if !names.contains(&TASKS_TABLE.to_string()) {
74
+ let schema = Self::schema();
75
+ let batch = RecordBatch::new_empty(schema.clone());
76
+ let batches = RecordBatchIterator::new(vec![Ok(batch)], schema);
77
+ self.db
78
+ .create_table(TASKS_TABLE, Box::new(batches))
79
+ .execute()
80
+ .await?;
81
+ tracing::info!("Created tasks table");
82
+ }
83
+ Ok(())
84
+ }
85
+
86
+ pub async fn create_task(&self, input: TaskInput) -> Result<TaskRecord> {
87
+ let id = uuid::Uuid::new_v4().to_string();
88
+ let now = Utc::now().to_rfc3339();
89
+ let blocked_by_json = serde_json::to_string(&input.blocked_by)?;
90
+
91
+ let task = TaskRecord {
92
+ id: id.clone(),
93
+ context_name: input.context_name.clone(),
94
+ title: input.title.clone(),
95
+ description: input.description.clone(),
96
+ status: input.status,
97
+ blocked_by: blocked_by_json.clone(),
98
+ created_at: now.clone(),
99
+ updated_at: now.clone(),
100
+ };
101
+
102
+ let schema = Self::schema();
103
+ let batch = RecordBatch::try_new(
104
+ schema.clone(),
105
+ vec![
106
+ Arc::new(StringArray::from(vec![task.id.as_str()])),
107
+ Arc::new(StringArray::from(vec![task.context_name.as_str()])),
108
+ Arc::new(StringArray::from(vec![task.title.as_str()])),
109
+ Arc::new(StringArray::from(vec![task.description.as_str()])),
110
+ Arc::new(StringArray::from(vec![task.status.as_str()])),
111
+ Arc::new(StringArray::from(vec![blocked_by_json.as_str()])),
112
+ Arc::new(StringArray::from(vec![task.created_at.as_str()])),
113
+ Arc::new(StringArray::from(vec![task.updated_at.as_str()])),
114
+ ],
115
+ )?;
116
+
117
+ let table = self.db.open_table(TASKS_TABLE).execute().await?;
118
+ let batches = RecordBatchIterator::new(vec![Ok(batch)], schema);
119
+ table.add(Box::new(batches)).execute().await?;
120
+
121
+ tracing::debug!("Created task {} in context {}", task.id, task.context_name);
122
+ Ok(task)
123
+ }
124
+
125
+ pub async fn list_tasks(
126
+ &self,
127
+ context: &str,
128
+ status_filter: Option<&str>,
129
+ ) -> Result<Vec<TaskRecord>> {
130
+ let table = self.db.open_table(TASKS_TABLE).execute().await?;
131
+
132
+ let filter = match status_filter {
133
+ Some(status) => format!("context_name = '{}' AND status = '{}'", context, status),
134
+ None => format!("context_name = '{}'", context),
135
+ };
136
+
137
+ let results = table.query().only_if(filter).execute().await?;
138
+
139
+ let mut tasks = Vec::new();
140
+ let batches: Vec<RecordBatch> = results.try_collect().await?;
141
+
142
+ for batch in &batches {
143
+ let ids = batch
144
+ .column_by_name("id")
145
+ .unwrap()
146
+ .as_any()
147
+ .downcast_ref::<StringArray>()
148
+ .unwrap();
149
+ let ctx_names = batch
150
+ .column_by_name("context_name")
151
+ .unwrap()
152
+ .as_any()
153
+ .downcast_ref::<StringArray>()
154
+ .unwrap();
155
+ let titles = batch
156
+ .column_by_name("title")
157
+ .unwrap()
158
+ .as_any()
159
+ .downcast_ref::<StringArray>()
160
+ .unwrap();
161
+ let descriptions = batch
162
+ .column_by_name("description")
163
+ .unwrap()
164
+ .as_any()
165
+ .downcast_ref::<StringArray>()
166
+ .unwrap();
167
+ let statuses = batch
168
+ .column_by_name("status")
169
+ .unwrap()
170
+ .as_any()
171
+ .downcast_ref::<StringArray>()
172
+ .unwrap();
173
+ let blocked_bys = batch
174
+ .column_by_name("blocked_by")
175
+ .unwrap()
176
+ .as_any()
177
+ .downcast_ref::<StringArray>()
178
+ .unwrap();
179
+ let created_ats = batch
180
+ .column_by_name("created_at")
181
+ .unwrap()
182
+ .as_any()
183
+ .downcast_ref::<StringArray>()
184
+ .unwrap();
185
+ let updated_ats = batch
186
+ .column_by_name("updated_at")
187
+ .unwrap()
188
+ .as_any()
189
+ .downcast_ref::<StringArray>()
190
+ .unwrap();
191
+
192
+ for i in 0..batch.num_rows() {
193
+ tasks.push(TaskRecord {
194
+ id: ids.value(i).to_string(),
195
+ context_name: ctx_names.value(i).to_string(),
196
+ title: titles.value(i).to_string(),
197
+ description: descriptions.value(i).to_string(),
198
+ status: statuses.value(i).to_string(),
199
+ blocked_by: blocked_bys.value(i).to_string(),
200
+ created_at: created_ats.value(i).to_string(),
201
+ updated_at: updated_ats.value(i).to_string(),
202
+ });
203
+ }
204
+ }
205
+
206
+ // Sort by created_at
207
+ tasks.sort_by(|a, b| a.created_at.cmp(&b.created_at));
208
+
209
+ Ok(tasks)
210
+ }
211
+
212
+ pub async fn get_task(&self, id: &str) -> Result<Option<TaskRecord>> {
213
+ let table = self.db.open_table(TASKS_TABLE).execute().await?;
214
+ let results = table
215
+ .query()
216
+ .only_if(format!("id = '{}'", id))
217
+ .execute()
218
+ .await?;
219
+
220
+ let batches: Vec<RecordBatch> = results.try_collect().await?;
221
+ for batch in &batches {
222
+ if batch.num_rows() > 0 {
223
+ let ids = batch.column_by_name("id").unwrap().as_any().downcast_ref::<StringArray>().unwrap();
224
+ let ctx_names = batch.column_by_name("context_name").unwrap().as_any().downcast_ref::<StringArray>().unwrap();
225
+ let titles = batch.column_by_name("title").unwrap().as_any().downcast_ref::<StringArray>().unwrap();
226
+ let descriptions = batch.column_by_name("description").unwrap().as_any().downcast_ref::<StringArray>().unwrap();
227
+ let statuses = batch.column_by_name("status").unwrap().as_any().downcast_ref::<StringArray>().unwrap();
228
+ let blocked_bys = batch.column_by_name("blocked_by").unwrap().as_any().downcast_ref::<StringArray>().unwrap();
229
+ let created_ats = batch.column_by_name("created_at").unwrap().as_any().downcast_ref::<StringArray>().unwrap();
230
+ let updated_ats = batch.column_by_name("updated_at").unwrap().as_any().downcast_ref::<StringArray>().unwrap();
231
+
232
+ return Ok(Some(TaskRecord {
233
+ id: ids.value(0).to_string(),
234
+ context_name: ctx_names.value(0).to_string(),
235
+ title: titles.value(0).to_string(),
236
+ description: descriptions.value(0).to_string(),
237
+ status: statuses.value(0).to_string(),
238
+ blocked_by: blocked_bys.value(0).to_string(),
239
+ created_at: created_ats.value(0).to_string(),
240
+ updated_at: updated_ats.value(0).to_string(),
241
+ }));
242
+ }
243
+ }
244
+
245
+ Ok(None)
246
+ }
247
+
248
+ pub async fn update_task(&self, id: &str, update: TaskUpdate) -> Result<Option<TaskRecord>> {
249
+ let existing = self.get_task(id).await?;
250
+ let Some(mut task) = existing else {
251
+ return Ok(None);
252
+ };
253
+
254
+ // Apply updates
255
+ if let Some(status) = update.status {
256
+ task.status = status;
257
+ }
258
+ if let Some(title) = update.title {
259
+ task.title = title;
260
+ }
261
+ if let Some(description) = update.description {
262
+ task.description = description;
263
+ }
264
+ if let Some(blocked_by) = update.blocked_by {
265
+ task.blocked_by = serde_json::to_string(&blocked_by)?;
266
+ }
267
+ task.updated_at = Utc::now().to_rfc3339();
268
+
269
+ // Delete and reinsert
270
+ let table = self.db.open_table(TASKS_TABLE).execute().await?;
271
+ table.delete(&format!("id = '{}'", id)).await?;
272
+
273
+ let schema = Self::schema();
274
+ let batch = RecordBatch::try_new(
275
+ schema.clone(),
276
+ vec![
277
+ Arc::new(StringArray::from(vec![task.id.as_str()])),
278
+ Arc::new(StringArray::from(vec![task.context_name.as_str()])),
279
+ Arc::new(StringArray::from(vec![task.title.as_str()])),
280
+ Arc::new(StringArray::from(vec![task.description.as_str()])),
281
+ Arc::new(StringArray::from(vec![task.status.as_str()])),
282
+ Arc::new(StringArray::from(vec![task.blocked_by.as_str()])),
283
+ Arc::new(StringArray::from(vec![task.created_at.as_str()])),
284
+ Arc::new(StringArray::from(vec![task.updated_at.as_str()])),
285
+ ],
286
+ )?;
287
+
288
+ let batches = RecordBatchIterator::new(vec![Ok(batch)], schema);
289
+ table.add(Box::new(batches)).execute().await?;
290
+
291
+ Ok(Some(task))
292
+ }
293
+
294
+ /// Get the next available task for a context:
295
+ /// - Status is "planned" (not active/complete/archived)
296
+ /// - Not blocked by any incomplete tasks
297
+ pub async fn get_next_task(&self, context: &str) -> Result<Option<TaskRecord>> {
298
+ let planned = self.list_tasks(context, Some("planned")).await?;
299
+ let all_tasks = self.list_tasks(context, None).await?;
300
+
301
+ // Build a set of complete task IDs
302
+ let complete_ids: std::collections::HashSet<String> = all_tasks
303
+ .iter()
304
+ .filter(|t| t.status == "complete" || t.status == "archived")
305
+ .map(|t| t.id.clone())
306
+ .collect();
307
+
308
+ for task in planned {
309
+ let blocked_by: Vec<String> =
310
+ serde_json::from_str(&task.blocked_by).unwrap_or_default();
311
+
312
+ // Task is available if all blockers are complete
313
+ let is_blocked = blocked_by.iter().any(|b| !complete_ids.contains(b));
314
+ if !is_blocked {
315
+ return Ok(Some(task));
316
+ }
317
+ }
318
+
319
+ Ok(None)
320
+ }
321
+ }
@@ -0,0 +1,24 @@
1
+ {
2
+ "name": "@hivemind/runtime",
3
+ "version": "0.1.0",
4
+ "private": true,
5
+ "type": "module",
6
+ "main": "dist/index.js",
7
+ "scripts": {
8
+ "build": "tsc",
9
+ "typecheck": "tsc --noEmit",
10
+ "dev": "tsx src/start.ts",
11
+ "start": "node dist/start.js",
12
+ "test": "echo \"No tests yet\""
13
+ },
14
+ "dependencies": {
15
+ "@iarna/toml": "^2.2.5",
16
+ "ws": "^8.18.0"
17
+ },
18
+ "devDependencies": {
19
+ "@types/node": "^20.14.0",
20
+ "@types/ws": "^8.5.10",
21
+ "tsx": "^4.16.0",
22
+ "typescript": "^5.5.0"
23
+ }
24
+ }
@@ -0,0 +1,235 @@
1
+ /**
2
+ * Fleet integration test (Phase 3, Task 3.5).
3
+ *
4
+ * Tests multi-worker fleet lifecycle: two WorkerServers, a PrimaryClient
5
+ * coordinating both, FleetManager dashboard, context migration, and
6
+ * memory sync round-trips.
7
+ */
8
+
9
+ import { describe, it, before, after } from "node:test";
10
+ import assert from "node:assert/strict";
11
+ import { PrimaryClient } from "../fleet/primary-client.js";
12
+ import { WorkerServer } from "../fleet/worker-server.js";
13
+ import { FleetManager } from "../fleet/fleet-manager.js";
14
+ import type {
15
+ WorkerRegistrationRequest,
16
+ WorkerStatusReport,
17
+ SyncPullRequest,
18
+ SyncPushRequest,
19
+ } from "../fleet/worker-protocol.js";
20
+
21
+ const PORT_A = 19900;
22
+ const PORT_B = 19901;
23
+ const URL_A = `http://localhost:${PORT_A}`;
24
+ const URL_B = `http://localhost:${PORT_B}`;
25
+
26
+ describe("Fleet Integration — Multi-Worker", () => {
27
+ let workerA: WorkerServer;
28
+ let workerB: WorkerServer;
29
+ let primary: PrimaryClient;
30
+ let fleet: FleetManager;
31
+ let workerIdA: string;
32
+ let workerIdB: string;
33
+
34
+ before(async () => {
35
+ workerA = new WorkerServer({
36
+ workerId: "worker-a",
37
+ port: PORT_A,
38
+ maxContexts: 3,
39
+ memoryDaemonUrl: "http://localhost:9999",
40
+ ollamaUrl: "http://localhost:11434",
41
+ });
42
+ workerB = new WorkerServer({
43
+ workerId: "worker-b",
44
+ port: PORT_B,
45
+ maxContexts: 2,
46
+ memoryDaemonUrl: "http://localhost:9999",
47
+ ollamaUrl: "http://localhost:11434",
48
+ });
49
+ await workerA.start();
50
+ await workerB.start();
51
+
52
+ primary = new PrimaryClient();
53
+ fleet = new FleetManager(primary);
54
+ });
55
+
56
+ after(async () => {
57
+ primary.stopHealthPolling();
58
+ await workerA.stop();
59
+ await workerB.stop();
60
+ });
61
+
62
+ describe("Registration", () => {
63
+ it("should register worker A", () => {
64
+ const req: WorkerRegistrationRequest = {
65
+ url: URL_A,
66
+ capabilities: { max_contexts: 3, has_ollama: true, has_memory_daemon: true, available_models: [] },
67
+ };
68
+ const resp = primary.handleRegistration(req);
69
+ workerIdA = resp.worker_id;
70
+ assert.ok(workerIdA);
71
+ });
72
+
73
+ it("should register worker B", () => {
74
+ const req: WorkerRegistrationRequest = {
75
+ url: URL_B,
76
+ capabilities: { max_contexts: 2, has_ollama: true, has_memory_daemon: true, available_models: [] },
77
+ };
78
+ const resp = primary.handleRegistration(req);
79
+ workerIdB = resp.worker_id;
80
+ assert.ok(workerIdB);
81
+ });
82
+
83
+ it("should list both workers", () => {
84
+ assert.equal(primary.getWorkers().length, 2);
85
+ });
86
+ });
87
+
88
+ describe("Health Checks — Both Workers", () => {
89
+ it("should get healthy responses from both", async () => {
90
+ const healthA = await primary.checkHealth(workerIdA);
91
+ const healthB = await primary.checkHealth(workerIdB);
92
+ assert.ok(healthA);
93
+ assert.ok(healthB);
94
+ assert.equal(healthA.status, "healthy");
95
+ assert.equal(healthB.status, "healthy");
96
+ });
97
+
98
+ it("should poll all and get two healthy", async () => {
99
+ const results = await primary.checkAllHealth();
100
+ assert.equal(results.size, 2);
101
+ for (const [, status] of results) {
102
+ assert.equal(status, "healthy");
103
+ }
104
+ });
105
+ });
106
+
107
+ describe("Context Distribution Across Workers", () => {
108
+ it("should assign project-alpha to worker A", async () => {
109
+ const resp = await primary.assignContext(workerIdA, "project-alpha", "Alpha");
110
+ assert.equal(resp.accepted, true);
111
+ });
112
+
113
+ it("should assign project-beta to worker B", async () => {
114
+ const resp = await primary.assignContext(workerIdB, "project-beta", "Beta");
115
+ assert.equal(resp.accepted, true);
116
+ });
117
+
118
+ it("should assign project-gamma to worker A", async () => {
119
+ const resp = await primary.assignContext(workerIdA, "project-gamma", "Gamma");
120
+ assert.equal(resp.accepted, true);
121
+ });
122
+
123
+ it("should find correct worker for each context", () => {
124
+ assert.equal(primary.findWorkerForContext("project-alpha")?.id, workerIdA);
125
+ assert.equal(primary.findWorkerForContext("project-beta")?.id, workerIdB);
126
+ assert.equal(primary.findWorkerForContext("project-gamma")?.id, workerIdA);
127
+ });
128
+
129
+ it("should reject when worker B is at capacity (max 2)", async () => {
130
+ // B already has project-beta (1/2), add one more
131
+ const resp1 = await primary.assignContext(workerIdB, "project-delta", "Delta");
132
+ assert.equal(resp1.accepted, true);
133
+ // Now at 2/2 — next should fail
134
+ const resp2 = await primary.assignContext(workerIdB, "project-epsilon", "Epsilon");
135
+ assert.equal(resp2.accepted, false);
136
+ assert.ok(resp2.reason?.includes("capacity"));
137
+ });
138
+ });
139
+
140
+ describe("Status Reporting", () => {
141
+ it("should report working status on worker A", async () => {
142
+ workerA.setActiveContext("project-alpha");
143
+ workerA.setCurrentTask("task-1");
144
+
145
+ const report: WorkerStatusReport = { activity: "working", current_context: "project-alpha", current_task: "task-1" };
146
+ const status = primary.handleStatusReport(workerIdA, report);
147
+ assert.ok(status);
148
+ assert.equal(status.activity, "working");
149
+ assert.equal(status.current_context, "project-alpha");
150
+ });
151
+
152
+ it("should report idle status on worker B", async () => {
153
+ const resp = await fetch(`${URL_B}/status`);
154
+ const body = (await resp.json()) as WorkerStatusReport;
155
+ assert.equal(body.activity, "idle");
156
+ });
157
+ });
158
+
159
+ describe("Memory Sync Endpoints", () => {
160
+ it("should return 501 when no sync handler registered", async () => {
161
+ const resp = await fetch(`${URL_A}/sync/push`, {
162
+ method: "POST",
163
+ headers: { "Content-Type": "application/json" },
164
+ body: JSON.stringify({
165
+ entries: [],
166
+ episodes: [],
167
+ } satisfies SyncPushRequest),
168
+ });
169
+ // 501 = handler not registered (expected without MemorySync wiring)
170
+ assert.equal(resp.status, 501);
171
+ });
172
+
173
+ it("should accept sync push when handler is registered", async () => {
174
+ workerA.onSyncPush(async (_req) => ({
175
+ l3_accepted: 0,
176
+ l2_appended: 0,
177
+ }));
178
+
179
+ const resp = await fetch(`${URL_A}/sync/push`, {
180
+ method: "POST",
181
+ headers: { "Content-Type": "application/json" },
182
+ body: JSON.stringify({
183
+ entries: [],
184
+ episodes: [],
185
+ } satisfies SyncPushRequest),
186
+ });
187
+ assert.equal(resp.status, 200);
188
+ const body = await resp.json() as any;
189
+ assert.equal(body.l3_accepted, 0);
190
+ });
191
+ });
192
+
193
+ describe("Context Unassignment and Reassignment", () => {
194
+ it("should unassign project-gamma from worker A", async () => {
195
+ const ok = await primary.unassignContext(workerIdA, "project-gamma");
196
+ assert.equal(ok, true);
197
+ });
198
+
199
+ it("should no longer find worker for project-gamma", () => {
200
+ assert.equal(primary.findWorkerForContext("project-gamma"), undefined);
201
+ });
202
+
203
+ it("should reassign project-gamma to worker B (after freeing a slot)", async () => {
204
+ // Unassign delta from B first
205
+ await primary.unassignContext(workerIdB, "project-delta");
206
+ const resp = await primary.assignContext(workerIdB, "project-gamma", "Gamma moved");
207
+ assert.equal(resp.accepted, true);
208
+ assert.equal(primary.findWorkerForContext("project-gamma")?.id, workerIdB);
209
+ });
210
+ });
211
+
212
+ describe("Fleet Dashboard", () => {
213
+ it("should produce a dashboard with current state", async () => {
214
+ const dashboard = await fleet.getDashboard();
215
+ assert.ok(dashboard);
216
+ assert.equal(dashboard.total_workers, 2);
217
+ // Workers should have contexts assigned
218
+ assert.ok(dashboard.workers.length === 2);
219
+ });
220
+ });
221
+
222
+ describe("Deregistration", () => {
223
+ it("should deregister worker A", () => {
224
+ assert.equal(primary.deregister(workerIdA), true);
225
+ });
226
+
227
+ it("should deregister worker B", () => {
228
+ assert.equal(primary.deregister(workerIdB), true);
229
+ });
230
+
231
+ it("should have no workers left", () => {
232
+ assert.equal(primary.getWorkers().length, 0);
233
+ });
234
+ });
235
+ });