openclaw-node-harness 2.0.0 → 2.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/mesh-agent.js +417 -94
- package/bin/mesh-join-token.js +147 -0
- package/bin/mesh-node-remove.js +277 -0
- package/bin/mesh-task-daemon.js +723 -15
- package/bin/openclaw-node-init.js +733 -0
- package/cli.js +8 -10
- package/lib/llm-providers.js +262 -0
- package/lib/mesh-collab.js +549 -0
- package/lib/mesh-plans.js +528 -0
- package/lib/mesh-tasks.js +50 -34
- package/package.json +4 -1
package/bin/mesh-task-daemon.js
CHANGED
|
@@ -33,15 +33,17 @@
|
|
|
33
33
|
|
|
34
34
|
const { connect, StringCodec } = require('nats');
|
|
35
35
|
const { createTask, TaskStore, TASK_STATUS, KV_BUCKET } = require('../lib/mesh-tasks');
|
|
36
|
+
const { createSession, CollabStore, COLLAB_STATUS, COLLAB_KV_BUCKET } = require('../lib/mesh-collab');
|
|
37
|
+
const { createPlan, autoRoutePlan, PlanStore, PLAN_STATUS, SUBTASK_STATUS, PLANS_KV_BUCKET } = require('../lib/mesh-plans');
|
|
36
38
|
const os = require('os');
|
|
37
39
|
|
|
38
40
|
const sc = StringCodec();
|
|
39
|
-
const { NATS_URL
|
|
41
|
+
const { NATS_URL } = require('../lib/nats-resolve');
|
|
40
42
|
const BUDGET_CHECK_INTERVAL = 30000; // 30s
|
|
41
43
|
const STALL_MINUTES = parseInt(process.env.MESH_STALL_MINUTES || '5'); // no heartbeat for this long → stalled
|
|
42
44
|
const NODE_ID = os.hostname().toLowerCase().replace(/[^a-z0-9-]/g, '-');
|
|
43
45
|
|
|
44
|
-
let nc, store;
|
|
46
|
+
let nc, store, collabStore, planStore;
|
|
45
47
|
|
|
46
48
|
// ── Logging ─────────────────────────────────────────
|
|
47
49
|
|
|
@@ -102,6 +104,30 @@ async function handleSubmit(msg) {
|
|
|
102
104
|
|
|
103
105
|
log(`SUBMIT ${task.task_id}: "${task.title}" (budget: ${task.budget_minutes}m, metric: ${task.metric || 'none'})`);
|
|
104
106
|
publishEvent('submitted', task);
|
|
107
|
+
|
|
108
|
+
// Auto-create collab session if task has collaboration spec
|
|
109
|
+
if (task.collaboration && collabStore) {
|
|
110
|
+
const session = createSession(task.task_id, task.collaboration);
|
|
111
|
+
await collabStore.put(session);
|
|
112
|
+
|
|
113
|
+
// Store session_id back in task for agent discovery
|
|
114
|
+
task.collab_session_id = session.session_id;
|
|
115
|
+
await store.put(task);
|
|
116
|
+
|
|
117
|
+
log(` → COLLAB SESSION ${session.session_id} auto-created (mode: ${session.mode})`);
|
|
118
|
+
publishCollabEvent('created', session);
|
|
119
|
+
|
|
120
|
+
// Broadcast recruit signal
|
|
121
|
+
nc.publish(`mesh.collab.${session.session_id}.recruit`, sc.encode(JSON.stringify({
|
|
122
|
+
session_id: session.session_id,
|
|
123
|
+
task_id: task.task_id,
|
|
124
|
+
mode: session.mode,
|
|
125
|
+
min_nodes: session.min_nodes,
|
|
126
|
+
max_nodes: session.max_nodes,
|
|
127
|
+
task_title: task.title,
|
|
128
|
+
})));
|
|
129
|
+
}
|
|
130
|
+
|
|
105
131
|
respond(msg, task);
|
|
106
132
|
}
|
|
107
133
|
|
|
@@ -176,6 +202,10 @@ async function handleComplete(msg) {
|
|
|
176
202
|
|
|
177
203
|
log(`COMPLETE ${task_id} in ${elapsed}m: ${result?.summary || 'no summary'}`);
|
|
178
204
|
publishEvent('completed', task);
|
|
205
|
+
|
|
206
|
+
// Check if this task belongs to a plan
|
|
207
|
+
await checkPlanProgress(task_id, 'completed');
|
|
208
|
+
|
|
179
209
|
respond(msg, task);
|
|
180
210
|
}
|
|
181
211
|
|
|
@@ -192,6 +222,10 @@ async function handleFail(msg) {
|
|
|
192
222
|
|
|
193
223
|
log(`FAIL ${task_id}: ${reason}`);
|
|
194
224
|
publishEvent('failed', task);
|
|
225
|
+
|
|
226
|
+
// Check if this task belongs to a plan
|
|
227
|
+
await checkPlanProgress(task_id, 'failed');
|
|
228
|
+
|
|
195
229
|
respond(msg, task);
|
|
196
230
|
}
|
|
197
231
|
|
|
@@ -233,8 +267,8 @@ async function handleGet(msg) {
|
|
|
233
267
|
if (!task_id) return respondError(msg, 'task_id is required');
|
|
234
268
|
|
|
235
269
|
const task = await store.get(task_id);
|
|
236
|
-
|
|
237
|
-
|
|
270
|
+
if (!task) return respondError(msg, `Task ${task_id} not found`);
|
|
271
|
+
|
|
238
272
|
respond(msg, task);
|
|
239
273
|
}
|
|
240
274
|
|
|
@@ -279,20 +313,13 @@ async function handleCancel(msg) {
|
|
|
279
313
|
const { task_id, reason } = parseRequest(msg);
|
|
280
314
|
if (!task_id) return respondError(msg, 'task_id is required');
|
|
281
315
|
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
if (!result) return respondError(msg, `Task ${task_id} not found`);
|
|
316
|
+
const task = await store.get(task_id);
|
|
317
|
+
if (!task) return respondError(msg, `Task ${task_id} not found`);
|
|
285
318
|
|
|
286
|
-
const task = result.task;
|
|
287
319
|
task.status = TASK_STATUS.CANCELLED;
|
|
288
320
|
task.completed_at = new Date().toISOString();
|
|
289
321
|
task.result = { success: false, summary: reason || 'cancelled' };
|
|
290
|
-
|
|
291
|
-
try {
|
|
292
|
-
await store.kv.update(task_id, sc.encode(JSON.stringify(task)), result.revision);
|
|
293
|
-
} catch (err) {
|
|
294
|
-
return respondError(msg, `Cancel conflict — task ${task_id} was modified concurrently. Retry.`);
|
|
295
|
-
}
|
|
322
|
+
await store.put(task);
|
|
296
323
|
|
|
297
324
|
log(`CANCEL ${task_id}: ${reason || 'no reason'}`);
|
|
298
325
|
publishEvent('cancelled', task);
|
|
@@ -373,12 +400,666 @@ async function enforceBudgets() {
|
|
|
373
400
|
}
|
|
374
401
|
}
|
|
375
402
|
|
|
403
|
+
// ── Collab Event Publishing ──────────────────────────
|
|
404
|
+
|
|
405
|
+
function publishCollabEvent(eventType, session) {
|
|
406
|
+
nc.publish(`mesh.events.collab.${eventType}`, sc.encode(JSON.stringify({
|
|
407
|
+
event: eventType,
|
|
408
|
+
session_id: session.session_id,
|
|
409
|
+
task_id: session.task_id,
|
|
410
|
+
session,
|
|
411
|
+
timestamp: new Date().toISOString(),
|
|
412
|
+
})));
|
|
413
|
+
}
|
|
414
|
+
|
|
415
|
+
// ── Collab Subject Handlers ─────────────────────────
|
|
416
|
+
|
|
417
|
+
/**
|
|
418
|
+
* mesh.collab.create — Create a collab session for a collaborative task.
|
|
419
|
+
* Expects: { task_id }
|
|
420
|
+
* Called automatically when a task with collaboration spec is submitted.
|
|
421
|
+
*/
|
|
422
|
+
async function handleCollabCreate(msg) {
|
|
423
|
+
const { task_id } = parseRequest(msg);
|
|
424
|
+
if (!task_id) return respondError(msg, 'task_id is required');
|
|
425
|
+
|
|
426
|
+
const task = await store.get(task_id);
|
|
427
|
+
if (!task) return respondError(msg, `Task ${task_id} not found`);
|
|
428
|
+
if (!task.collaboration) return respondError(msg, `Task ${task_id} has no collaboration spec`);
|
|
429
|
+
|
|
430
|
+
// Check for existing session
|
|
431
|
+
const existing = await collabStore.findByTaskId(task_id);
|
|
432
|
+
if (existing) return respondError(msg, `Session already exists for task ${task_id}: ${existing.session_id}`);
|
|
433
|
+
|
|
434
|
+
const session = createSession(task_id, task.collaboration);
|
|
435
|
+
await collabStore.put(session);
|
|
436
|
+
|
|
437
|
+
log(`COLLAB CREATE ${session.session_id} for task ${task_id} (mode: ${session.mode}, min: ${session.min_nodes}, max: ${session.max_nodes || '∞'})`);
|
|
438
|
+
publishCollabEvent('created', session);
|
|
439
|
+
|
|
440
|
+
// Broadcast recruit signal
|
|
441
|
+
nc.publish(`mesh.collab.${session.session_id}.recruit`, sc.encode(JSON.stringify({
|
|
442
|
+
session_id: session.session_id,
|
|
443
|
+
task_id: task_id,
|
|
444
|
+
mode: session.mode,
|
|
445
|
+
min_nodes: session.min_nodes,
|
|
446
|
+
max_nodes: session.max_nodes,
|
|
447
|
+
task_title: task.title,
|
|
448
|
+
})));
|
|
449
|
+
|
|
450
|
+
respond(msg, session);
|
|
451
|
+
}
|
|
452
|
+
|
|
453
|
+
/**
|
|
454
|
+
* mesh.collab.join — Node joins a collab session.
|
|
455
|
+
* Expects: { session_id, node_id, role? }
|
|
456
|
+
*/
|
|
457
|
+
async function handleCollabJoin(msg) {
|
|
458
|
+
const { session_id, node_id, role } = parseRequest(msg);
|
|
459
|
+
if (!session_id || !node_id) return respondError(msg, 'session_id and node_id required');
|
|
460
|
+
|
|
461
|
+
const session = await collabStore.addNode(session_id, node_id, role || 'worker');
|
|
462
|
+
if (!session) return respondError(msg, `Cannot join ${session_id}: full, closed, or already joined`);
|
|
463
|
+
|
|
464
|
+
log(`COLLAB JOIN ${session_id}: ${node_id} (${session.nodes.length}/${session.max_nodes || '∞'} nodes)`);
|
|
465
|
+
await collabStore.appendAudit(session_id, 'node_joined', { node_id, role: role || 'worker', total_nodes: session.nodes.length });
|
|
466
|
+
publishCollabEvent('joined', session);
|
|
467
|
+
|
|
468
|
+
// Check if recruiting should close → start first round
|
|
469
|
+
if (collabStore.isRecruitingDone(session)) {
|
|
470
|
+
await startCollabRound(session.session_id);
|
|
471
|
+
}
|
|
472
|
+
|
|
473
|
+
respond(msg, session);
|
|
474
|
+
}
|
|
475
|
+
|
|
476
|
+
/**
|
|
477
|
+
* mesh.collab.leave — Node leaves a collab session.
|
|
478
|
+
* Expects: { session_id, node_id, reason? }
|
|
479
|
+
*/
|
|
480
|
+
async function handleCollabLeave(msg) {
|
|
481
|
+
const { session_id, node_id, reason } = parseRequest(msg);
|
|
482
|
+
if (!session_id || !node_id) return respondError(msg, 'session_id and node_id required');
|
|
483
|
+
|
|
484
|
+
const session = await collabStore.removeNode(session_id, node_id);
|
|
485
|
+
if (!session) return respondError(msg, `Session ${session_id} not found`);
|
|
486
|
+
|
|
487
|
+
log(`COLLAB LEAVE ${session_id}: ${node_id} (${reason || 'no reason'})`);
|
|
488
|
+
await collabStore.appendAudit(session_id, 'node_left', { node_id, reason: reason || null, remaining_nodes: session.nodes.length });
|
|
489
|
+
|
|
490
|
+
// If below min_nodes and still active, abort
|
|
491
|
+
if (session.status === COLLAB_STATUS.ACTIVE && session.nodes.length < session.min_nodes) {
|
|
492
|
+
await collabStore.markAborted(session_id, `Below min_nodes: ${session.nodes.length} < ${session.min_nodes}`);
|
|
493
|
+
publishCollabEvent('aborted', session);
|
|
494
|
+
}
|
|
495
|
+
|
|
496
|
+
respond(msg, session);
|
|
497
|
+
}
|
|
498
|
+
|
|
499
|
+
/**
|
|
500
|
+
* mesh.collab.status — Get session status.
|
|
501
|
+
* Expects: { session_id }
|
|
502
|
+
*/
|
|
503
|
+
async function handleCollabStatus(msg) {
|
|
504
|
+
const { session_id } = parseRequest(msg);
|
|
505
|
+
if (!session_id) return respondError(msg, 'session_id required');
|
|
506
|
+
|
|
507
|
+
const session = await collabStore.get(session_id);
|
|
508
|
+
if (!session) return respondError(msg, `Session ${session_id} not found`);
|
|
509
|
+
|
|
510
|
+
respond(msg, collabStore.getSummary(session));
|
|
511
|
+
}
|
|
512
|
+
|
|
513
|
+
/**
|
|
514
|
+
* mesh.collab.find — Find collab session by task ID.
|
|
515
|
+
* Expects: { task_id }
|
|
516
|
+
* Returns: session summary or null.
|
|
517
|
+
* Used by agents to discover session_id (which includes a timestamp suffix).
|
|
518
|
+
*/
|
|
519
|
+
async function handleCollabFind(msg) {
|
|
520
|
+
const { task_id } = parseRequest(msg);
|
|
521
|
+
if (!task_id) return respondError(msg, 'task_id is required');
|
|
522
|
+
|
|
523
|
+
const session = await collabStore.findByTaskId(task_id);
|
|
524
|
+
if (!session) return respond(msg, null);
|
|
525
|
+
|
|
526
|
+
respond(msg, session);
|
|
527
|
+
}
|
|
528
|
+
|
|
529
|
+
/**
|
|
530
|
+
* mesh.collab.reflect — Node submits a reflection for the current round.
|
|
531
|
+
* Expects: { session_id, node_id, summary, learnings, artifacts, confidence, vote }
|
|
532
|
+
*/
|
|
533
|
+
async function handleCollabReflect(msg) {
|
|
534
|
+
const reflection = parseRequest(msg);
|
|
535
|
+
const { session_id } = reflection;
|
|
536
|
+
if (!session_id || !reflection.node_id) return respondError(msg, 'session_id and node_id required');
|
|
537
|
+
|
|
538
|
+
const session = await collabStore.submitReflection(session_id, reflection);
|
|
539
|
+
if (!session) return respondError(msg, `Cannot submit reflection to ${session_id}`);
|
|
540
|
+
|
|
541
|
+
log(`COLLAB REFLECT ${session_id} R${session.current_round}: ${reflection.node_id} (vote: ${reflection.vote}, conf: ${reflection.confidence}${reflection.parse_failed ? ', PARSE FAILED' : ''})`);
|
|
542
|
+
await collabStore.appendAudit(session_id, 'reflection_received', {
|
|
543
|
+
node_id: reflection.node_id, round: session.current_round,
|
|
544
|
+
vote: reflection.vote, confidence: reflection.confidence,
|
|
545
|
+
parse_failed: reflection.parse_failed || false,
|
|
546
|
+
});
|
|
547
|
+
publishCollabEvent('reflection_received', session);
|
|
548
|
+
|
|
549
|
+
// Check if all reflections are in → evaluate convergence
|
|
550
|
+
if (collabStore.isRoundComplete(session)) {
|
|
551
|
+
await evaluateRound(session_id);
|
|
552
|
+
}
|
|
553
|
+
|
|
554
|
+
respond(msg, session);
|
|
555
|
+
}
|
|
556
|
+
|
|
557
|
+
// ── Collab Round Management ─────────────────────────
|
|
558
|
+
|
|
559
|
+
/**
|
|
560
|
+
* Compute per-node scopes based on scope_strategy.
|
|
561
|
+
*
|
|
562
|
+
* Strategies:
|
|
563
|
+
* 'shared' — all nodes get full task scope (default)
|
|
564
|
+
* 'leader_only' — first node (leader) gets full scope, others get read-only marker
|
|
565
|
+
* 'partitioned' — task scope paths split evenly across nodes (round-robin)
|
|
566
|
+
*
|
|
567
|
+
* @returns {Object<string, string[]>} node_id → effective scope array
|
|
568
|
+
*/
|
|
569
|
+
function computeNodeScopes(nodes, taskScope, strategy) {
|
|
570
|
+
const scopes = {};
|
|
571
|
+
|
|
572
|
+
switch (strategy) {
|
|
573
|
+
case 'leader_only': {
|
|
574
|
+
// Leader = first node joined. Gets full write scope.
|
|
575
|
+
// Others get the scope but marked as reviewers (read-only instruction).
|
|
576
|
+
for (let i = 0; i < nodes.length; i++) {
|
|
577
|
+
if (i === 0) {
|
|
578
|
+
// Leader gets full scope
|
|
579
|
+
scopes[nodes[i].node_id] = taskScope.length > 0 ? taskScope : ['*'];
|
|
580
|
+
nodes[i].role = 'leader';
|
|
581
|
+
} else {
|
|
582
|
+
// Reviewers get scope with read-only marker — they review but don't modify
|
|
583
|
+
scopes[nodes[i].node_id] = taskScope.length > 0
|
|
584
|
+
? taskScope.map(s => `[REVIEW-ONLY] ${s}`)
|
|
585
|
+
: ['[REVIEW-ONLY] *'];
|
|
586
|
+
nodes[i].role = 'reviewer';
|
|
587
|
+
}
|
|
588
|
+
}
|
|
589
|
+
break;
|
|
590
|
+
}
|
|
591
|
+
|
|
592
|
+
case 'partitioned': {
|
|
593
|
+
// Split scope paths across nodes round-robin
|
|
594
|
+
if (taskScope.length === 0) {
|
|
595
|
+
// No explicit scope — everyone gets full access
|
|
596
|
+
for (const node of nodes) scopes[node.node_id] = ['*'];
|
|
597
|
+
} else {
|
|
598
|
+
for (const node of nodes) scopes[node.node_id] = [];
|
|
599
|
+
for (let i = 0; i < taskScope.length; i++) {
|
|
600
|
+
const nodeIdx = i % nodes.length;
|
|
601
|
+
scopes[nodes[nodeIdx].node_id].push(taskScope[i]);
|
|
602
|
+
}
|
|
603
|
+
// Ensure every node got at least one path (if more nodes than paths)
|
|
604
|
+
for (const node of nodes) {
|
|
605
|
+
if (scopes[node.node_id].length === 0) {
|
|
606
|
+
scopes[node.node_id] = ['[NO-SCOPE-ASSIGNED]'];
|
|
607
|
+
}
|
|
608
|
+
}
|
|
609
|
+
}
|
|
610
|
+
break;
|
|
611
|
+
}
|
|
612
|
+
|
|
613
|
+
case 'shared':
|
|
614
|
+
default: {
|
|
615
|
+
// Everyone gets full scope
|
|
616
|
+
for (const node of nodes) {
|
|
617
|
+
scopes[node.node_id] = taskScope.length > 0 ? taskScope : ['*'];
|
|
618
|
+
}
|
|
619
|
+
break;
|
|
620
|
+
}
|
|
621
|
+
}
|
|
622
|
+
|
|
623
|
+
return scopes;
|
|
624
|
+
}
|
|
625
|
+
|
|
626
|
+
/**
|
|
627
|
+
* Start a new round: compile shared intel and notify all nodes.
|
|
628
|
+
*/
|
|
629
|
+
async function startCollabRound(sessionId) {
|
|
630
|
+
const round = await collabStore.startRound(sessionId);
|
|
631
|
+
if (!round) {
|
|
632
|
+
// startRound returns null if session is aborted (e.g., too few nodes after pruning dead)
|
|
633
|
+
const session = await collabStore.get(sessionId);
|
|
634
|
+
if (session && session.status === COLLAB_STATUS.ABORTED) {
|
|
635
|
+
log(`COLLAB ABORTED ${sessionId}: not enough active nodes to continue (${session.nodes.length} < ${session.min_nodes})`);
|
|
636
|
+
await collabStore.appendAudit(sessionId, 'session_aborted', {
|
|
637
|
+
reason: 'insufficient_nodes', active: session.nodes.length, min_required: session.min_nodes,
|
|
638
|
+
recruited: session.recruited_count,
|
|
639
|
+
});
|
|
640
|
+
publishCollabEvent('aborted', session);
|
|
641
|
+
await store.markFailed(session.task_id, `Collab aborted: too few active nodes (${session.nodes.length} < ${session.min_nodes})`);
|
|
642
|
+
}
|
|
643
|
+
return;
|
|
644
|
+
}
|
|
645
|
+
|
|
646
|
+
const session = await collabStore.get(sessionId);
|
|
647
|
+
log(`COLLAB ROUND ${sessionId} R${round.round_number} START (${session.nodes.length} nodes)`);
|
|
648
|
+
await collabStore.appendAudit(sessionId, 'round_started', {
|
|
649
|
+
round: round.round_number, active_nodes: session.nodes.map(n => n.node_id),
|
|
650
|
+
recruited_count: session.recruited_count,
|
|
651
|
+
});
|
|
652
|
+
publishCollabEvent('round_started', session);
|
|
653
|
+
|
|
654
|
+
// Compute effective scope per node based on scope_strategy
|
|
655
|
+
const parentTask = await store.get(session.task_id);
|
|
656
|
+
const taskScope = parentTask?.scope || [];
|
|
657
|
+
const scopeStrategy = session.scope_strategy || 'shared';
|
|
658
|
+
const nodeScopes = computeNodeScopes(session.nodes, taskScope, scopeStrategy);
|
|
659
|
+
|
|
660
|
+
// Notify each node with their enforced scope
|
|
661
|
+
for (const node of session.nodes) {
|
|
662
|
+
const effectiveScope = nodeScopes[node.node_id] || node.scope;
|
|
663
|
+
nc.publish(`mesh.collab.${sessionId}.node.${node.node_id}.round`, sc.encode(JSON.stringify({
|
|
664
|
+
session_id: sessionId,
|
|
665
|
+
task_id: session.task_id,
|
|
666
|
+
round_number: round.round_number,
|
|
667
|
+
shared_intel: round.shared_intel,
|
|
668
|
+
my_scope: effectiveScope,
|
|
669
|
+
my_role: node.role,
|
|
670
|
+
mode: session.mode,
|
|
671
|
+
current_turn: session.current_turn, // for sequential mode
|
|
672
|
+
scope_strategy: scopeStrategy,
|
|
673
|
+
})));
|
|
674
|
+
}
|
|
675
|
+
}
|
|
676
|
+
|
|
677
|
+
/**
|
|
678
|
+
* Evaluate the current round: check convergence, advance or complete.
|
|
679
|
+
*/
|
|
680
|
+
async function evaluateRound(sessionId) {
|
|
681
|
+
const session = await collabStore.get(sessionId);
|
|
682
|
+
if (!session) return;
|
|
683
|
+
|
|
684
|
+
const currentRound = session.rounds[session.rounds.length - 1];
|
|
685
|
+
currentRound.completed_at = new Date().toISOString();
|
|
686
|
+
await collabStore.put(session);
|
|
687
|
+
|
|
688
|
+
// Check convergence
|
|
689
|
+
const converged = collabStore.checkConvergence(session);
|
|
690
|
+
const maxReached = collabStore.isMaxRoundsReached(session);
|
|
691
|
+
|
|
692
|
+
// Audit the convergence evaluation
|
|
693
|
+
const votes = currentRound.reflections.map(r => ({ node: r.node_id, vote: r.vote, confidence: r.confidence, parse_failed: r.parse_failed || false }));
|
|
694
|
+
await collabStore.appendAudit(sessionId, 'round_evaluated', {
|
|
695
|
+
round: session.current_round, votes,
|
|
696
|
+
converged, max_reached: maxReached,
|
|
697
|
+
outcome: converged ? 'converged' : maxReached ? 'max_rounds' : 'continue',
|
|
698
|
+
});
|
|
699
|
+
|
|
700
|
+
if (converged) {
|
|
701
|
+
log(`COLLAB CONVERGED ${sessionId} after ${session.current_round} rounds`);
|
|
702
|
+
await collabStore.markConverged(sessionId);
|
|
703
|
+
publishCollabEvent('converged', session);
|
|
704
|
+
|
|
705
|
+
// Collect artifacts from all reflections
|
|
706
|
+
const allArtifacts = [];
|
|
707
|
+
const contributions = {};
|
|
708
|
+
for (const round of session.rounds) {
|
|
709
|
+
for (const r of round.reflections) {
|
|
710
|
+
allArtifacts.push(...r.artifacts);
|
|
711
|
+
contributions[r.node_id] = r.summary;
|
|
712
|
+
}
|
|
713
|
+
}
|
|
714
|
+
|
|
715
|
+
await collabStore.markCompleted(sessionId, {
|
|
716
|
+
artifacts: [...new Set(allArtifacts)],
|
|
717
|
+
summary: `Converged after ${session.current_round} rounds with ${session.nodes.length} nodes`,
|
|
718
|
+
node_contributions: contributions,
|
|
719
|
+
});
|
|
720
|
+
await collabStore.appendAudit(sessionId, 'session_completed', {
|
|
721
|
+
outcome: 'converged', rounds: session.current_round,
|
|
722
|
+
artifacts: [...new Set(allArtifacts)].length,
|
|
723
|
+
node_count: session.nodes.length, recruited_count: session.recruited_count,
|
|
724
|
+
});
|
|
725
|
+
|
|
726
|
+
// Complete the parent task
|
|
727
|
+
const updatedSession = await collabStore.get(sessionId);
|
|
728
|
+
await store.markCompleted(session.task_id, updatedSession.result);
|
|
729
|
+
publishEvent('completed', await store.get(session.task_id));
|
|
730
|
+
publishCollabEvent('completed', updatedSession);
|
|
731
|
+
|
|
732
|
+
} else if (maxReached) {
|
|
733
|
+
log(`COLLAB MAX ROUNDS ${sessionId}: ${session.current_round}/${session.max_rounds}. Completing with current artifacts.`);
|
|
734
|
+
|
|
735
|
+
const allArtifacts = [];
|
|
736
|
+
const contributions = {};
|
|
737
|
+
for (const round of session.rounds) {
|
|
738
|
+
for (const r of round.reflections) {
|
|
739
|
+
allArtifacts.push(...r.artifacts);
|
|
740
|
+
contributions[r.node_id] = r.summary;
|
|
741
|
+
}
|
|
742
|
+
}
|
|
743
|
+
|
|
744
|
+
await collabStore.markCompleted(sessionId, {
|
|
745
|
+
artifacts: [...new Set(allArtifacts)],
|
|
746
|
+
summary: `Max rounds (${session.max_rounds}) reached. ${session.nodes.length} nodes participated.`,
|
|
747
|
+
node_contributions: contributions,
|
|
748
|
+
});
|
|
749
|
+
await collabStore.appendAudit(sessionId, 'session_completed', {
|
|
750
|
+
outcome: 'max_rounds_reached', rounds: session.current_round,
|
|
751
|
+
max_rounds: session.max_rounds, artifacts: [...new Set(allArtifacts)].length,
|
|
752
|
+
node_count: session.nodes.length, recruited_count: session.recruited_count,
|
|
753
|
+
});
|
|
754
|
+
|
|
755
|
+
// Complete parent task (flagged for review since not truly converged)
|
|
756
|
+
const updatedSession = await collabStore.get(sessionId);
|
|
757
|
+
await store.markCompleted(session.task_id, {
|
|
758
|
+
...updatedSession.result,
|
|
759
|
+
max_rounds_reached: true,
|
|
760
|
+
});
|
|
761
|
+
publishEvent('completed', await store.get(session.task_id));
|
|
762
|
+
publishCollabEvent('completed', updatedSession);
|
|
763
|
+
|
|
764
|
+
} else {
|
|
765
|
+
// Not converged, not maxed out → next round
|
|
766
|
+
log(`COLLAB ROUND ${sessionId} R${session.current_round} DONE. Not converged. Starting next round.`);
|
|
767
|
+
await startCollabRound(sessionId);
|
|
768
|
+
}
|
|
769
|
+
}
|
|
770
|
+
|
|
771
|
+
// ── Collab Recruiting Timer ─────────────────────────
|
|
772
|
+
|
|
773
|
+
/**
|
|
774
|
+
* Check recruiting sessions whose join window has expired.
|
|
775
|
+
* If min_nodes reached → start first round. Otherwise → abort.
|
|
776
|
+
*/
|
|
777
|
+
async function checkRecruitingDeadlines() {
|
|
778
|
+
const recruiting = await collabStore.list({ status: COLLAB_STATUS.RECRUITING });
|
|
779
|
+
for (const session of recruiting) {
|
|
780
|
+
if (!collabStore.isRecruitingDone(session)) continue;
|
|
781
|
+
|
|
782
|
+
if (session.nodes.length >= session.min_nodes) {
|
|
783
|
+
log(`COLLAB RECRUIT DONE ${session.session_id}: ${session.nodes.length} nodes joined. Starting round 1.`);
|
|
784
|
+
await startCollabRound(session.session_id);
|
|
785
|
+
} else {
|
|
786
|
+
log(`COLLAB RECRUIT FAILED ${session.session_id}: only ${session.nodes.length}/${session.min_nodes} nodes. Aborting.`);
|
|
787
|
+
await collabStore.markAborted(session.session_id, `Not enough nodes: ${session.nodes.length} < ${session.min_nodes}`);
|
|
788
|
+
publishCollabEvent('aborted', await collabStore.get(session.session_id));
|
|
789
|
+
// Release the parent task
|
|
790
|
+
await store.markReleased(session.task_id, `Collab session failed to recruit: ${session.nodes.length}/${session.min_nodes} nodes`);
|
|
791
|
+
}
|
|
792
|
+
}
|
|
793
|
+
}
|
|
794
|
+
|
|
795
|
+
// ── Plan Event Publishing ───────────────────────────
|
|
796
|
+
|
|
797
|
+
function publishPlanEvent(eventType, plan) {
|
|
798
|
+
nc.publish(`mesh.events.plan.${eventType}`, sc.encode(JSON.stringify({
|
|
799
|
+
event: eventType,
|
|
800
|
+
plan_id: plan.plan_id,
|
|
801
|
+
parent_task_id: plan.parent_task_id,
|
|
802
|
+
plan,
|
|
803
|
+
timestamp: new Date().toISOString(),
|
|
804
|
+
})));
|
|
805
|
+
}
|
|
806
|
+
|
|
807
|
+
// ── Plan RPC Handlers ───────────────────────────────
|
|
808
|
+
|
|
809
|
+
/**
|
|
810
|
+
* mesh.plans.create — Create a new plan from decomposition.
|
|
811
|
+
* Expects: { parent_task_id, title, description, planner, subtasks[], requires_approval? }
|
|
812
|
+
*/
|
|
813
|
+
async function handlePlanCreate(msg) {
|
|
814
|
+
const params = parseRequest(msg);
|
|
815
|
+
if (!params.parent_task_id || !params.title) {
|
|
816
|
+
return respondError(msg, 'parent_task_id and title are required');
|
|
817
|
+
}
|
|
818
|
+
|
|
819
|
+
// Verify parent task exists
|
|
820
|
+
const parentTask = await store.get(params.parent_task_id);
|
|
821
|
+
if (!parentTask) return respondError(msg, `Parent task ${params.parent_task_id} not found`);
|
|
822
|
+
|
|
823
|
+
let plan = createPlan(params);
|
|
824
|
+
|
|
825
|
+
// Auto-route subtasks that don't have explicit delegation
|
|
826
|
+
plan = autoRoutePlan(plan);
|
|
827
|
+
|
|
828
|
+
await planStore.put(plan);
|
|
829
|
+
|
|
830
|
+
log(`PLAN CREATE ${plan.plan_id}: "${plan.title}" (${plan.subtasks.length} subtasks, ${plan.estimated_waves} waves)`);
|
|
831
|
+
publishPlanEvent('created', plan);
|
|
832
|
+
|
|
833
|
+
respond(msg, plan);
|
|
834
|
+
}
|
|
835
|
+
|
|
836
|
+
/**
|
|
837
|
+
* mesh.plans.get — Get a plan by ID.
|
|
838
|
+
* Expects: { plan_id }
|
|
839
|
+
*/
|
|
840
|
+
async function handlePlanGet(msg) {
|
|
841
|
+
const { plan_id } = parseRequest(msg);
|
|
842
|
+
if (!plan_id) return respondError(msg, 'plan_id is required');
|
|
843
|
+
|
|
844
|
+
const plan = await planStore.get(plan_id);
|
|
845
|
+
if (!plan) return respondError(msg, `Plan ${plan_id} not found`);
|
|
846
|
+
|
|
847
|
+
respond(msg, plan);
|
|
848
|
+
}
|
|
849
|
+
|
|
850
|
+
/**
|
|
851
|
+
* mesh.plans.list — List plans with optional filter.
|
|
852
|
+
* Expects: { status?, parent_task_id? }
|
|
853
|
+
*/
|
|
854
|
+
async function handlePlanList(msg) {
|
|
855
|
+
const filter = parseRequest(msg);
|
|
856
|
+
const plans = await planStore.list(filter);
|
|
857
|
+
respond(msg, plans.map(p => planStore.getSummary(p)));
|
|
858
|
+
}
|
|
859
|
+
|
|
860
|
+
/**
|
|
861
|
+
* mesh.plans.approve — Approve a plan and materialize subtasks.
|
|
862
|
+
* Expects: { plan_id, approved_by? }
|
|
863
|
+
* Triggers: subtask materialization → dispatch wave 0
|
|
864
|
+
*/
|
|
865
|
+
async function handlePlanApprove(msg) {
|
|
866
|
+
const { plan_id, approved_by } = parseRequest(msg);
|
|
867
|
+
if (!plan_id) return respondError(msg, 'plan_id is required');
|
|
868
|
+
|
|
869
|
+
const plan = await planStore.approve(plan_id, approved_by || 'gui');
|
|
870
|
+
if (!plan) return respondError(msg, `Plan ${plan_id} not found`);
|
|
871
|
+
|
|
872
|
+
log(`PLAN APPROVED ${plan_id} by ${plan.approved_by}`);
|
|
873
|
+
publishPlanEvent('approved', plan);
|
|
874
|
+
|
|
875
|
+
// Start execution → materialize wave 0
|
|
876
|
+
await planStore.startExecuting(plan_id);
|
|
877
|
+
await advancePlanWave(plan_id);
|
|
878
|
+
|
|
879
|
+
respond(msg, await planStore.get(plan_id));
|
|
880
|
+
}
|
|
881
|
+
|
|
882
|
+
/**
|
|
883
|
+
* mesh.plans.abort — Abort a plan and cancel pending subtasks.
|
|
884
|
+
* Expects: { plan_id, reason? }
|
|
885
|
+
*/
|
|
886
|
+
async function handlePlanAbort(msg) {
|
|
887
|
+
const { plan_id, reason } = parseRequest(msg);
|
|
888
|
+
if (!plan_id) return respondError(msg, 'plan_id is required');
|
|
889
|
+
|
|
890
|
+
const plan = await planStore.markAborted(plan_id, reason || 'manually aborted');
|
|
891
|
+
if (!plan) return respondError(msg, `Plan ${plan_id} not found`);
|
|
892
|
+
|
|
893
|
+
log(`PLAN ABORTED ${plan_id}: ${reason || 'no reason'}`);
|
|
894
|
+
publishPlanEvent('aborted', plan);
|
|
895
|
+
|
|
896
|
+
respond(msg, plan);
|
|
897
|
+
}
|
|
898
|
+
|
|
899
|
+
/**
|
|
900
|
+
* mesh.plans.subtask.update — Update a subtask's status.
|
|
901
|
+
* Called by mesh-bridge when a task completes/fails.
|
|
902
|
+
* Expects: { plan_id, subtask_id, status, result?, mesh_task_id?, kanban_task_id?, owner? }
|
|
903
|
+
*/
|
|
904
|
+
async function handlePlanSubtaskUpdate(msg) {
|
|
905
|
+
const { plan_id, subtask_id, ...updates } = parseRequest(msg);
|
|
906
|
+
if (!plan_id || !subtask_id) return respondError(msg, 'plan_id and subtask_id required');
|
|
907
|
+
|
|
908
|
+
const plan = await planStore.updateSubtask(plan_id, subtask_id, updates);
|
|
909
|
+
if (!plan) return respondError(msg, `Plan ${plan_id} or subtask ${subtask_id} not found`);
|
|
910
|
+
|
|
911
|
+
log(`PLAN SUBTASK ${plan_id}/${subtask_id}: ${updates.status || 'updated'}`);
|
|
912
|
+
|
|
913
|
+
if (updates.status === SUBTASK_STATUS.COMPLETED) {
|
|
914
|
+
publishPlanEvent('subtask_completed', plan);
|
|
915
|
+
// Check if next wave should dispatch
|
|
916
|
+
await advancePlanWave(plan_id);
|
|
917
|
+
}
|
|
918
|
+
|
|
919
|
+
respond(msg, plan);
|
|
920
|
+
}
|
|
921
|
+
|
|
922
|
+
// ── Plan Wave Advancement ───────────────────────────
|
|
923
|
+
|
|
924
|
+
/**
|
|
925
|
+
* Check if next wave subtasks are ready and dispatch them.
|
|
926
|
+
*/
|
|
927
|
+
async function advancePlanWave(planId) {
|
|
928
|
+
const plan = await planStore.get(planId);
|
|
929
|
+
if (!plan || plan.status !== PLAN_STATUS.EXECUTING) return;
|
|
930
|
+
|
|
931
|
+
// Check if plan is fully done
|
|
932
|
+
if (planStore.isPlanComplete(plan)) {
|
|
933
|
+
await planStore.markCompleted(planId);
|
|
934
|
+
const completedPlan = await planStore.get(planId);
|
|
935
|
+
log(`PLAN COMPLETED ${planId}: all ${plan.subtasks.length} subtasks done`);
|
|
936
|
+
publishPlanEvent('completed', completedPlan);
|
|
937
|
+
|
|
938
|
+
// Mark parent task as waiting-user (Gui reviews)
|
|
939
|
+
const parentTask = await store.get(plan.parent_task_id);
|
|
940
|
+
if (parentTask && parentTask.status !== TASK_STATUS.COMPLETED) {
|
|
941
|
+
await store.markCompleted(plan.parent_task_id, {
|
|
942
|
+
success: !planStore.hasFailures(plan),
|
|
943
|
+
summary: `Plan ${planId} completed (${plan.subtasks.length} subtasks)`,
|
|
944
|
+
plan_id: planId,
|
|
945
|
+
});
|
|
946
|
+
publishEvent('completed', await store.get(plan.parent_task_id));
|
|
947
|
+
}
|
|
948
|
+
return;
|
|
949
|
+
}
|
|
950
|
+
|
|
951
|
+
// Get next wave subtasks
|
|
952
|
+
const ready = planStore.getNextWaveSubtasks(plan);
|
|
953
|
+
if (ready.length === 0) return;
|
|
954
|
+
|
|
955
|
+
const waveNum = ready[0].wave;
|
|
956
|
+
log(`PLAN WAVE ${planId} W${waveNum}: dispatching ${ready.length} subtasks`);
|
|
957
|
+
|
|
958
|
+
for (const st of ready) {
|
|
959
|
+
st.status = SUBTASK_STATUS.QUEUED;
|
|
960
|
+
|
|
961
|
+
// Dispatch based on delegation mode
|
|
962
|
+
switch (st.delegation.mode) {
|
|
963
|
+
case 'solo_mesh':
|
|
964
|
+
case 'collab_mesh': {
|
|
965
|
+
// Submit as mesh task
|
|
966
|
+
const meshTask = createTask({
|
|
967
|
+
task_id: st.subtask_id,
|
|
968
|
+
title: st.title,
|
|
969
|
+
description: st.description,
|
|
970
|
+
budget_minutes: st.budget_minutes,
|
|
971
|
+
metric: st.metric,
|
|
972
|
+
scope: st.scope,
|
|
973
|
+
success_criteria: st.success_criteria,
|
|
974
|
+
tags: ['plan', planId],
|
|
975
|
+
collaboration: st.delegation.collaboration || undefined,
|
|
976
|
+
});
|
|
977
|
+
await store.put(meshTask);
|
|
978
|
+
st.mesh_task_id = meshTask.task_id;
|
|
979
|
+
publishEvent('submitted', meshTask);
|
|
980
|
+
|
|
981
|
+
// Auto-create collab session if needed
|
|
982
|
+
if (st.delegation.collaboration && collabStore) {
|
|
983
|
+
const session = createSession(meshTask.task_id, st.delegation.collaboration);
|
|
984
|
+
await collabStore.put(session);
|
|
985
|
+
|
|
986
|
+
// Store session_id back in mesh task for agent discovery
|
|
987
|
+
meshTask.collab_session_id = session.session_id;
|
|
988
|
+
await store.put(meshTask);
|
|
989
|
+
|
|
990
|
+
log(` → COLLAB SESSION ${session.session_id} for subtask ${st.subtask_id}`);
|
|
991
|
+
publishCollabEvent('created', session);
|
|
992
|
+
|
|
993
|
+
nc.publish(`mesh.collab.${session.session_id}.recruit`, sc.encode(JSON.stringify({
|
|
994
|
+
session_id: session.session_id,
|
|
995
|
+
task_id: meshTask.task_id,
|
|
996
|
+
mode: session.mode,
|
|
997
|
+
min_nodes: session.min_nodes,
|
|
998
|
+
max_nodes: session.max_nodes,
|
|
999
|
+
task_title: meshTask.title,
|
|
1000
|
+
})));
|
|
1001
|
+
}
|
|
1002
|
+
|
|
1003
|
+
log(` → MESH ${st.subtask_id}: "${st.title}" (${st.delegation.mode})`);
|
|
1004
|
+
break;
|
|
1005
|
+
}
|
|
1006
|
+
|
|
1007
|
+
case 'local':
|
|
1008
|
+
case 'soul': {
|
|
1009
|
+
// These are handled via kanban (active-tasks.md) by mesh-bridge
|
|
1010
|
+
// Just mark as queued — bridge will materialize in kanban
|
|
1011
|
+
st.kanban_task_id = st.subtask_id;
|
|
1012
|
+
log(` → LOCAL ${st.subtask_id}: "${st.title}" (${st.delegation.mode}${st.delegation.soul_id ? `: ${st.delegation.soul_id}` : ''})`);
|
|
1013
|
+
break;
|
|
1014
|
+
}
|
|
1015
|
+
|
|
1016
|
+
case 'human': {
|
|
1017
|
+
st.status = SUBTASK_STATUS.BLOCKED;
|
|
1018
|
+
st.kanban_task_id = st.subtask_id;
|
|
1019
|
+
log(` → HUMAN ${st.subtask_id}: "${st.title}" (needs Gui)`);
|
|
1020
|
+
break;
|
|
1021
|
+
}
|
|
1022
|
+
}
|
|
1023
|
+
}
|
|
1024
|
+
|
|
1025
|
+
await planStore.put(plan);
|
|
1026
|
+
|
|
1027
|
+
publishPlanEvent('wave_started', plan);
|
|
1028
|
+
}
|
|
1029
|
+
|
|
1030
|
+
// ── Plan Progress on Task Completion ────────────────
|
|
1031
|
+
|
|
1032
|
+
/**
|
|
1033
|
+
* When a mesh task completes, check if it belongs to a plan and update accordingly.
|
|
1034
|
+
* Called after handleComplete/handleFail.
|
|
1035
|
+
*/
|
|
1036
|
+
async function checkPlanProgress(taskId, status) {
|
|
1037
|
+
// Look for plans that reference this task
|
|
1038
|
+
const allPlans = await planStore.list({ status: PLAN_STATUS.EXECUTING });
|
|
1039
|
+
for (const plan of allPlans) {
|
|
1040
|
+
const st = plan.subtasks.find(s => s.mesh_task_id === taskId || s.subtask_id === taskId);
|
|
1041
|
+
if (!st) continue;
|
|
1042
|
+
|
|
1043
|
+
st.status = status === 'completed' ? SUBTASK_STATUS.COMPLETED : SUBTASK_STATUS.FAILED;
|
|
1044
|
+
await planStore.put(plan);
|
|
1045
|
+
|
|
1046
|
+
log(`PLAN PROGRESS ${plan.plan_id}: subtask ${st.subtask_id} → ${st.status}`);
|
|
1047
|
+
|
|
1048
|
+
if (st.status === SUBTASK_STATUS.COMPLETED) {
|
|
1049
|
+
publishPlanEvent('subtask_completed', plan);
|
|
1050
|
+
await advancePlanWave(plan.plan_id);
|
|
1051
|
+
}
|
|
1052
|
+
|
|
1053
|
+
break;
|
|
1054
|
+
}
|
|
1055
|
+
}
|
|
1056
|
+
|
|
376
1057
|
// ── Main ────────────────────────────────────────────
|
|
377
1058
|
|
|
378
1059
|
async function main() {
|
|
379
1060
|
log('Starting mesh task daemon...');
|
|
380
1061
|
|
|
381
|
-
nc = await connect(
|
|
1062
|
+
nc = await connect({ servers: NATS_URL, timeout: 5000 });
|
|
382
1063
|
log(`Connected to NATS at ${NATS_URL}`);
|
|
383
1064
|
|
|
384
1065
|
// Initialize task store
|
|
@@ -387,6 +1068,16 @@ async function main() {
|
|
|
387
1068
|
store = new TaskStore(kv);
|
|
388
1069
|
log(`Task store initialized (bucket: ${KV_BUCKET})`);
|
|
389
1070
|
|
|
1071
|
+
// Initialize collab store
|
|
1072
|
+
const collabKv = await js.views.kv(COLLAB_KV_BUCKET);
|
|
1073
|
+
collabStore = new CollabStore(collabKv);
|
|
1074
|
+
log(`Collab store initialized (bucket: ${COLLAB_KV_BUCKET})`);
|
|
1075
|
+
|
|
1076
|
+
// Initialize plan store
|
|
1077
|
+
const plansKv = await js.views.kv(PLANS_KV_BUCKET);
|
|
1078
|
+
planStore = new PlanStore(plansKv);
|
|
1079
|
+
log(`Plan store initialized (bucket: ${PLANS_KV_BUCKET})`);
|
|
1080
|
+
|
|
390
1081
|
// Subscribe to all task subjects
|
|
391
1082
|
const handlers = {
|
|
392
1083
|
'mesh.tasks.submit': handleSubmit,
|
|
@@ -400,6 +1091,20 @@ async function main() {
|
|
|
400
1091
|
'mesh.tasks.list': handleList,
|
|
401
1092
|
'mesh.tasks.get': handleGet,
|
|
402
1093
|
'mesh.tasks.cancel': handleCancel,
|
|
1094
|
+
// Collab handlers
|
|
1095
|
+
'mesh.collab.create': handleCollabCreate,
|
|
1096
|
+
'mesh.collab.join': handleCollabJoin,
|
|
1097
|
+
'mesh.collab.leave': handleCollabLeave,
|
|
1098
|
+
'mesh.collab.status': handleCollabStatus,
|
|
1099
|
+
'mesh.collab.find': handleCollabFind,
|
|
1100
|
+
'mesh.collab.reflect': handleCollabReflect,
|
|
1101
|
+
// Plan handlers
|
|
1102
|
+
'mesh.plans.create': handlePlanCreate,
|
|
1103
|
+
'mesh.plans.get': handlePlanGet,
|
|
1104
|
+
'mesh.plans.list': handlePlanList,
|
|
1105
|
+
'mesh.plans.approve': handlePlanApprove,
|
|
1106
|
+
'mesh.plans.abort': handlePlanAbort,
|
|
1107
|
+
'mesh.plans.subtask.update': handlePlanSubtaskUpdate,
|
|
403
1108
|
};
|
|
404
1109
|
|
|
405
1110
|
const subs = [];
|
|
@@ -422,8 +1127,10 @@ async function main() {
|
|
|
422
1127
|
// Start enforcement loops
|
|
423
1128
|
const budgetTimer = setInterval(enforceBudgets, BUDGET_CHECK_INTERVAL);
|
|
424
1129
|
const stallTimer = setInterval(detectStalls, BUDGET_CHECK_INTERVAL);
|
|
1130
|
+
const recruitTimer = setInterval(checkRecruitingDeadlines, 5000); // check every 5s
|
|
425
1131
|
log(`Budget enforcement: every ${BUDGET_CHECK_INTERVAL / 1000}s`);
|
|
426
1132
|
log(`Stall detection: every ${BUDGET_CHECK_INTERVAL / 1000}s (threshold: ${STALL_MINUTES}m)`);
|
|
1133
|
+
log(`Collab recruiting check: every 5s`);
|
|
427
1134
|
|
|
428
1135
|
|
|
429
1136
|
log('Task daemon ready.');
|
|
@@ -433,6 +1140,7 @@ async function main() {
|
|
|
433
1140
|
log('Shutting down...');
|
|
434
1141
|
clearInterval(budgetTimer);
|
|
435
1142
|
clearInterval(stallTimer);
|
|
1143
|
+
clearInterval(recruitTimer);
|
|
436
1144
|
for (const sub of subs) sub.unsubscribe();
|
|
437
1145
|
await nc.drain();
|
|
438
1146
|
process.exit(0);
|