agent-worker 0.12.0 → 0.14.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{backends-DLaP0rMW.mjs → backends-C6WBIn9H.mjs} +345 -108
- package/dist/backends-Cv0oM9Ru.mjs +3 -0
- package/dist/cli/index.mjs +1428 -108
- package/dist/context-CzqQeThq.mjs +4 -0
- package/dist/index.d.mts +115 -64
- package/dist/index.mjs +446 -3
- package/dist/{memory-provider-BtLYtdQH.mjs → memory-provider-0nuDxzYQ.mjs} +1 -1
- package/dist/runner-DV86expc.mjs +663 -0
- package/dist/{workflow-CNlUyGit.mjs → workflow-DogkVjOs.mjs} +60 -6
- package/package.json +5 -2
- package/dist/backends-DG5igQii.mjs +0 -3
- package/dist/context-BqEyt2SF.mjs +0 -4
- package/dist/logger-Bfdo83xL.mjs +0 -63
- package/dist/runner-CQJYnM7D.mjs +0 -1489
- package/dist/worker-CJ5_b2_q.mjs +0 -446
package/dist/cli/index.mjs
CHANGED
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
|
-
import {
|
|
3
|
-
import {
|
|
2
|
+
import { A as parseModel, I as getDefaultModel, P as createModelAsync, a as createMockBackend, j as FRONTIER_MODELS, k as normalizeBackendType, n as createBackend } from "../backends-C6WBIn9H.mjs";
|
|
3
|
+
import { generateText, jsonSchema, stepCountIs, tool } from "ai";
|
|
4
4
|
import { existsSync, mkdirSync, readFileSync, readdirSync, unlinkSync, writeFileSync } from "node:fs";
|
|
5
5
|
import { dirname, isAbsolute, join, relative } from "node:path";
|
|
6
6
|
import { appendFile, mkdir, open, readFile, readdir, stat, unlink, writeFile } from "node:fs/promises";
|
|
7
7
|
import { z } from "zod";
|
|
8
8
|
import { homedir } from "node:os";
|
|
9
|
-
import { spawn } from "node:child_process";
|
|
9
|
+
import { execSync, spawn } from "node:child_process";
|
|
10
10
|
import { Command, Option } from "commander";
|
|
11
11
|
import { Hono } from "hono";
|
|
12
12
|
import { streamSSE } from "hono/streaming";
|
|
@@ -14,6 +14,11 @@ import { randomUUID } from "node:crypto";
|
|
|
14
14
|
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
15
15
|
import { nanoid } from "nanoid";
|
|
16
16
|
import { WebStandardStreamableHTTPServerTransport } from "@modelcontextprotocol/sdk/server/webStandardStreamableHttp.js";
|
|
17
|
+
import { createServer } from "node:http";
|
|
18
|
+
import { StreamableHTTPServerTransport } from "@modelcontextprotocol/sdk/server/streamableHttp.js";
|
|
19
|
+
import { MockLanguageModelV3, mockValues } from "ai/test";
|
|
20
|
+
import { Client } from "@modelcontextprotocol/sdk/client/index.js";
|
|
21
|
+
import { StreamableHTTPClientTransport } from "@modelcontextprotocol/sdk/client/streamableHttp.js";
|
|
17
22
|
|
|
18
23
|
//#region rolldown:runtime
|
|
19
24
|
var __defProp = Object.defineProperty;
|
|
@@ -77,39 +82,53 @@ function isDaemonRunning() {
|
|
|
77
82
|
return null;
|
|
78
83
|
}
|
|
79
84
|
}
|
|
80
|
-
|
|
81
|
-
//#endregion
|
|
82
|
-
//#region src/agent/handle.ts
|
|
85
|
+
const DURATION_RE = /^(\d+(?:\.\d+)?)\s*(ms|s|m|h|d)$/;
|
|
83
86
|
/**
|
|
84
|
-
*
|
|
85
|
-
*
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
}
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
87
|
+
* Parse a duration string like "30s", "5m", "2h" into milliseconds.
|
|
88
|
+
* Returns null if not a valid duration format.
|
|
89
|
+
*/
|
|
90
|
+
function parseDuration(value) {
|
|
91
|
+
const match = value.match(DURATION_RE);
|
|
92
|
+
if (!match) return null;
|
|
93
|
+
return parseFloat(match[1]) * {
|
|
94
|
+
ms: 1,
|
|
95
|
+
s: 1e3,
|
|
96
|
+
m: 60 * 1e3,
|
|
97
|
+
h: 3600 * 1e3,
|
|
98
|
+
d: 1440 * 60 * 1e3
|
|
99
|
+
}[match[2]];
|
|
100
|
+
}
|
|
101
|
+
/**
|
|
102
|
+
* Resolve a wakeup value into a typed schedule.
|
|
103
|
+
* - number → interval (ms)
|
|
104
|
+
* - "30s"/"5m"/"2h" → interval (converted to ms)
|
|
105
|
+
* - cron expression → cron
|
|
106
|
+
*/
|
|
107
|
+
function resolveSchedule(config) {
|
|
108
|
+
const { wakeup, prompt } = config;
|
|
109
|
+
if (typeof wakeup === "number") {
|
|
110
|
+
if (wakeup <= 0) throw new Error("Wakeup interval must be positive");
|
|
111
|
+
return {
|
|
112
|
+
type: "interval",
|
|
113
|
+
ms: wakeup,
|
|
114
|
+
prompt
|
|
115
|
+
};
|
|
111
116
|
}
|
|
112
|
-
|
|
117
|
+
const ms = parseDuration(wakeup);
|
|
118
|
+
if (ms !== null) {
|
|
119
|
+
if (ms <= 0) throw new Error("Wakeup duration must be positive");
|
|
120
|
+
return {
|
|
121
|
+
type: "interval",
|
|
122
|
+
ms,
|
|
123
|
+
prompt
|
|
124
|
+
};
|
|
125
|
+
}
|
|
126
|
+
return {
|
|
127
|
+
type: "cron",
|
|
128
|
+
expr: wakeup,
|
|
129
|
+
prompt
|
|
130
|
+
};
|
|
131
|
+
}
|
|
113
132
|
|
|
114
133
|
//#endregion
|
|
115
134
|
//#region src/agent/store.ts
|
|
@@ -220,7 +239,7 @@ function getAgentId(extra) {
|
|
|
220
239
|
/**
|
|
221
240
|
* Format inbox messages for JSON display.
|
|
222
241
|
*/
|
|
223
|
-
function formatInbox(messages) {
|
|
242
|
+
function formatInbox$1(messages) {
|
|
224
243
|
if (messages.length === 0) return JSON.stringify({
|
|
225
244
|
messages: [],
|
|
226
245
|
count: 0
|
|
@@ -359,7 +378,7 @@ function registerInboxTools(server, ctx, options) {
|
|
|
359
378
|
if (debugLog && messages.length > 0) debugLog(`[mcp:${agent}] my_inbox → ${messages.length} unread`);
|
|
360
379
|
return { content: [{
|
|
361
380
|
type: "text",
|
|
362
|
-
text: formatInbox(messages)
|
|
381
|
+
text: formatInbox$1(messages)
|
|
363
382
|
}] };
|
|
364
383
|
});
|
|
365
384
|
server.tool("my_inbox_ack", "Acknowledge inbox messages up to a message ID. Call after processing messages.", { until: z.string().describe("Acknowledge messages up to and including this message ID") }, async ({ until }, extra) => {
|
|
@@ -1420,19 +1439,1200 @@ function createFileContextProvider(contextDir, validAgents) {
|
|
|
1420
1439
|
return new FileContextProvider(new FileStorage(contextDir), validAgents, contextDir);
|
|
1421
1440
|
}
|
|
1422
1441
|
|
|
1442
|
+
//#endregion
|
|
1443
|
+
//#region src/workflow/context/http-transport.ts
|
|
1444
|
+
/**
|
|
1445
|
+
* HTTP-based MCP Transport
|
|
1446
|
+
*
|
|
1447
|
+
* Hosts MCP server over HTTP using StreamableHTTPServerTransport.
|
|
1448
|
+
* CLI agents (cursor, claude, codex) connect directly via URL — no subprocess bridge needed.
|
|
1449
|
+
*
|
|
1450
|
+
* Each agent gets a unique URL: http://localhost:<port>/mcp?agent=<name>
|
|
1451
|
+
* The agent name is used as the MCP session ID, so tool handlers
|
|
1452
|
+
* receive it via extra.sessionId → getAgentId().
|
|
1453
|
+
*/
|
|
1454
|
+
/**
|
|
1455
|
+
* Parse request body as JSON
|
|
1456
|
+
*/
|
|
1457
|
+
function parseRequestBody(req) {
|
|
1458
|
+
return new Promise((resolve, reject) => {
|
|
1459
|
+
const chunks = [];
|
|
1460
|
+
req.on("data", (chunk) => chunks.push(chunk));
|
|
1461
|
+
req.on("end", () => {
|
|
1462
|
+
try {
|
|
1463
|
+
const body = Buffer.concat(chunks).toString();
|
|
1464
|
+
resolve(body ? JSON.parse(body) : void 0);
|
|
1465
|
+
} catch (err) {
|
|
1466
|
+
reject(err);
|
|
1467
|
+
}
|
|
1468
|
+
});
|
|
1469
|
+
req.on("error", reject);
|
|
1470
|
+
});
|
|
1471
|
+
}
|
|
1472
|
+
/**
|
|
1473
|
+
* Check if a JSON-RPC message is an initialize request
|
|
1474
|
+
*/
|
|
1475
|
+
function isInitializeRequest(body) {
|
|
1476
|
+
if (Array.isArray(body)) return body.some((msg) => msg?.method === "initialize");
|
|
1477
|
+
return body?.method === "initialize";
|
|
1478
|
+
}
|
|
1479
|
+
/**
|
|
1480
|
+
* Start an HTTP MCP server
|
|
1481
|
+
*
|
|
1482
|
+
* Agents connect via: http://localhost:<port>/mcp?agent=<name>
|
|
1483
|
+
* The server creates a per-session StreamableHTTPServerTransport and McpServer.
|
|
1484
|
+
*/
|
|
1485
|
+
async function runWithHttp(options) {
|
|
1486
|
+
const { createServerInstance, port = 0, onConnect, onDisconnect } = options;
|
|
1487
|
+
const sessions = /* @__PURE__ */ new Map();
|
|
1488
|
+
const httpServer = createServer(async (req, res) => {
|
|
1489
|
+
const reqUrl = new URL(req.url || "/", `http://localhost`);
|
|
1490
|
+
if (!reqUrl.pathname.startsWith("/mcp")) {
|
|
1491
|
+
res.writeHead(404, { "Content-Type": "application/json" });
|
|
1492
|
+
res.end(JSON.stringify({ error: "Not found" }));
|
|
1493
|
+
return;
|
|
1494
|
+
}
|
|
1495
|
+
const agentName = reqUrl.searchParams.get("agent") || "anonymous";
|
|
1496
|
+
const sessionId = req.headers["mcp-session-id"];
|
|
1497
|
+
if (sessionId && sessions.has(sessionId)) {
|
|
1498
|
+
const session = sessions.get(sessionId);
|
|
1499
|
+
if (req.method === "DELETE") {
|
|
1500
|
+
await session.transport.close();
|
|
1501
|
+
sessions.delete(sessionId);
|
|
1502
|
+
if (onDisconnect) onDisconnect(session.agentId, sessionId);
|
|
1503
|
+
res.writeHead(200);
|
|
1504
|
+
res.end();
|
|
1505
|
+
return;
|
|
1506
|
+
}
|
|
1507
|
+
const body = req.method === "POST" ? await parseRequestBody(req) : void 0;
|
|
1508
|
+
await session.transport.handleRequest(req, res, body);
|
|
1509
|
+
return;
|
|
1510
|
+
}
|
|
1511
|
+
if (req.method === "POST") {
|
|
1512
|
+
const body = await parseRequestBody(req);
|
|
1513
|
+
if (!isInitializeRequest(body)) {
|
|
1514
|
+
res.writeHead(400, { "Content-Type": "application/json" });
|
|
1515
|
+
res.end(JSON.stringify({ error: "Bad request: session required" }));
|
|
1516
|
+
return;
|
|
1517
|
+
}
|
|
1518
|
+
const transport = new StreamableHTTPServerTransport({
|
|
1519
|
+
sessionIdGenerator: () => `${agentName}-${randomUUID().slice(0, 8)}`,
|
|
1520
|
+
onsessioninitialized: (sid) => {
|
|
1521
|
+
sessions.set(sid, {
|
|
1522
|
+
transport,
|
|
1523
|
+
agentId: agentName
|
|
1524
|
+
});
|
|
1525
|
+
if (onConnect) onConnect(agentName, sid);
|
|
1526
|
+
}
|
|
1527
|
+
});
|
|
1528
|
+
Object.defineProperty(transport, "_agentId", {
|
|
1529
|
+
value: agentName,
|
|
1530
|
+
writable: true
|
|
1531
|
+
});
|
|
1532
|
+
await createServerInstance().connect(transport);
|
|
1533
|
+
await transport.handleRequest(req, res, body);
|
|
1534
|
+
return;
|
|
1535
|
+
}
|
|
1536
|
+
if (req.method === "GET") {
|
|
1537
|
+
res.writeHead(400, { "Content-Type": "application/json" });
|
|
1538
|
+
res.end(JSON.stringify({ error: "Session ID required for GET requests" }));
|
|
1539
|
+
return;
|
|
1540
|
+
}
|
|
1541
|
+
res.writeHead(405, { "Content-Type": "application/json" });
|
|
1542
|
+
res.end(JSON.stringify({ error: "Method not allowed" }));
|
|
1543
|
+
});
|
|
1544
|
+
const actualPort = await new Promise((resolve, reject) => {
|
|
1545
|
+
httpServer.on("error", reject);
|
|
1546
|
+
httpServer.listen(port, "127.0.0.1", () => {
|
|
1547
|
+
httpServer.removeListener("error", reject);
|
|
1548
|
+
const addr = httpServer.address();
|
|
1549
|
+
if (typeof addr === "object" && addr) resolve(addr.port);
|
|
1550
|
+
else reject(/* @__PURE__ */ new Error("Failed to get server address"));
|
|
1551
|
+
});
|
|
1552
|
+
});
|
|
1553
|
+
return {
|
|
1554
|
+
httpServer,
|
|
1555
|
+
url: `http://127.0.0.1:${actualPort}/mcp`,
|
|
1556
|
+
port: actualPort,
|
|
1557
|
+
sessions,
|
|
1558
|
+
async close() {
|
|
1559
|
+
for (const [sid, session] of sessions) {
|
|
1560
|
+
await session.transport.close();
|
|
1561
|
+
if (onDisconnect) onDisconnect(session.agentId, sid);
|
|
1562
|
+
}
|
|
1563
|
+
sessions.clear();
|
|
1564
|
+
await new Promise((resolve) => {
|
|
1565
|
+
httpServer.close(() => resolve());
|
|
1566
|
+
});
|
|
1567
|
+
}
|
|
1568
|
+
};
|
|
1569
|
+
}
|
|
1570
|
+
|
|
1571
|
+
//#endregion
|
|
1572
|
+
//#region src/workflow/controller/types.ts
|
|
1573
|
+
/** Default controller configuration values */
|
|
1574
|
+
const CONTROLLER_DEFAULTS = {
|
|
1575
|
+
pollInterval: 5e3,
|
|
1576
|
+
retry: {
|
|
1577
|
+
maxAttempts: 3,
|
|
1578
|
+
backoffMs: 1e3,
|
|
1579
|
+
backoffMultiplier: 2
|
|
1580
|
+
},
|
|
1581
|
+
recentChannelLimit: 50,
|
|
1582
|
+
idleDebounceMs: 2e3
|
|
1583
|
+
};
|
|
1584
|
+
|
|
1585
|
+
//#endregion
|
|
1586
|
+
//#region src/workflow/controller/prompt.ts
|
|
1587
|
+
/**
|
|
1588
|
+
* Format inbox messages for display
|
|
1589
|
+
*/
|
|
1590
|
+
function formatInbox(inbox) {
|
|
1591
|
+
if (inbox.length === 0) return "(no messages)";
|
|
1592
|
+
return inbox.map((m) => {
|
|
1593
|
+
const priority = m.priority === "high" ? " [HIGH]" : "";
|
|
1594
|
+
const time = m.entry.timestamp.slice(11, 19);
|
|
1595
|
+
const dm = m.entry.to ? " [DM]" : "";
|
|
1596
|
+
return `- [${time}] From @${m.entry.from}${priority}${dm}: ${m.entry.content}`;
|
|
1597
|
+
}).join("\n");
|
|
1598
|
+
}
|
|
1599
|
+
/**
|
|
1600
|
+
* Build the complete agent prompt from run context
|
|
1601
|
+
*/
|
|
1602
|
+
function buildAgentPrompt(ctx) {
|
|
1603
|
+
const sections = [];
|
|
1604
|
+
sections.push("## Project");
|
|
1605
|
+
sections.push(`Working on: ${ctx.projectDir}`);
|
|
1606
|
+
sections.push("");
|
|
1607
|
+
sections.push(`## Inbox (${ctx.inbox.length} message${ctx.inbox.length === 1 ? "" : "s"} for you)`);
|
|
1608
|
+
sections.push(formatInbox(ctx.inbox));
|
|
1609
|
+
sections.push("");
|
|
1610
|
+
sections.push("## Recent Activity");
|
|
1611
|
+
sections.push("Use channel_read tool to view recent channel messages and conversation context if needed.");
|
|
1612
|
+
if (ctx.documentContent) {
|
|
1613
|
+
sections.push("");
|
|
1614
|
+
sections.push("## Shared Document");
|
|
1615
|
+
sections.push(ctx.documentContent);
|
|
1616
|
+
}
|
|
1617
|
+
if (ctx.retryAttempt > 1) {
|
|
1618
|
+
sections.push("");
|
|
1619
|
+
sections.push(`## Note`);
|
|
1620
|
+
sections.push(`This is retry attempt ${ctx.retryAttempt}. Previous attempt failed.`);
|
|
1621
|
+
}
|
|
1622
|
+
sections.push("");
|
|
1623
|
+
sections.push("## Instructions");
|
|
1624
|
+
sections.push("You are an agent in a multi-agent workflow. Communicate ONLY through the MCP tools below.");
|
|
1625
|
+
sections.push("Your text output is NOT seen by other agents — you MUST use channel_send to communicate.");
|
|
1626
|
+
sections.push("");
|
|
1627
|
+
sections.push("### Channel Tools");
|
|
1628
|
+
sections.push("- **channel_send**: Send a message to the shared channel. Use @agentname to mention/notify.");
|
|
1629
|
+
sections.push(" Use the \"to\" parameter for private DMs: channel_send({ message: \"...\", to: \"bob\" })");
|
|
1630
|
+
sections.push("- **channel_read**: Read recent channel messages (DMs and logs are auto-filtered).");
|
|
1631
|
+
sections.push("");
|
|
1632
|
+
sections.push("### Team Tools");
|
|
1633
|
+
sections.push("- **team_members**: List all agents you can @mention. Pass includeStatus=true to see their current state and tasks.");
|
|
1634
|
+
sections.push("- **team_doc_read/write/append/list/create**: Shared team documents.");
|
|
1635
|
+
sections.push("");
|
|
1636
|
+
sections.push("### Personal Tools");
|
|
1637
|
+
sections.push("- **my_inbox**: Check your unread messages.");
|
|
1638
|
+
sections.push("- **my_inbox_ack**: Acknowledge messages after processing (pass the latest message ID).");
|
|
1639
|
+
sections.push("- **my_status_set**: Update your status. Call when starting work (state='running', task='...') or when done (state='idle').");
|
|
1640
|
+
sections.push("");
|
|
1641
|
+
sections.push("### Proposal & Voting Tools");
|
|
1642
|
+
sections.push("- **team_proposal_create**: Create a proposal for team voting (types: election, decision, approval, assignment).");
|
|
1643
|
+
sections.push("- **team_vote**: Cast your vote on an active proposal. You can change your vote by voting again.");
|
|
1644
|
+
sections.push("- **team_proposal_status**: Check status of a proposal, or list all active proposals.");
|
|
1645
|
+
sections.push("- **team_proposal_cancel**: Cancel a proposal you created.");
|
|
1646
|
+
sections.push("");
|
|
1647
|
+
sections.push("### Resource Tools");
|
|
1648
|
+
sections.push("- **resource_create**: Store large content, get a reference (resource:id) for use anywhere.");
|
|
1649
|
+
sections.push("- **resource_read**: Read resource content by ID.");
|
|
1650
|
+
if (ctx.feedback) {
|
|
1651
|
+
sections.push("");
|
|
1652
|
+
sections.push("### Feedback Tool");
|
|
1653
|
+
sections.push("- **feedback_submit**: Report workflow improvement needs — a missing tool, an awkward step, or a capability gap.");
|
|
1654
|
+
sections.push(" Only use when you genuinely hit a pain point during your work.");
|
|
1655
|
+
}
|
|
1656
|
+
sections.push("");
|
|
1657
|
+
sections.push("### Workflow");
|
|
1658
|
+
sections.push("1. Read your inbox messages above");
|
|
1659
|
+
sections.push("2. Do your assigned work using channel_send with @mentions");
|
|
1660
|
+
sections.push("3. Acknowledge your inbox with my_inbox_ack");
|
|
1661
|
+
sections.push("4. Exit when your task is complete");
|
|
1662
|
+
sections.push("");
|
|
1663
|
+
sections.push("### IMPORTANT: When to stop");
|
|
1664
|
+
sections.push("- Once your assigned task is complete, acknowledge your inbox and exit. Do NOT keep chatting.");
|
|
1665
|
+
sections.push("- Do NOT send pleasantries (\"you're welcome\", \"glad to help\", \"thanks again\") — they trigger unnecessary cycles.");
|
|
1666
|
+
sections.push("- Do NOT @mention another agent in your final message unless you need them to do more work.");
|
|
1667
|
+
sections.push("- If you receive a thank-you or acknowledgment, just call my_inbox_ack and exit. Do not reply.");
|
|
1668
|
+
return sections.join("\n");
|
|
1669
|
+
}
|
|
1670
|
+
|
|
1671
|
+
//#endregion
|
|
1672
|
+
//#region src/workflow/controller/mcp-config.ts
|
|
1673
|
+
/**
|
|
1674
|
+
* Generate MCP config for workflow context server.
|
|
1675
|
+
*
|
|
1676
|
+
* Uses HTTP transport — CLI agents connect directly via URL:
|
|
1677
|
+
* { type: "http", url: "http://127.0.0.1:<port>/mcp?agent=<name>" }
|
|
1678
|
+
*/
|
|
1679
|
+
function generateWorkflowMCPConfig(mcpUrl, agentName) {
|
|
1680
|
+
const url = `${mcpUrl}?agent=${encodeURIComponent(agentName)}`;
|
|
1681
|
+
return { mcpServers: { "workflow-context": {
|
|
1682
|
+
type: "http",
|
|
1683
|
+
url
|
|
1684
|
+
} } };
|
|
1685
|
+
}
|
|
1686
|
+
|
|
1687
|
+
//#endregion
|
|
1688
|
+
//#region src/daemon/cron.ts
|
|
1689
|
+
/**
|
|
1690
|
+
* Minimal cron expression parser.
|
|
1691
|
+
* Supports standard 5-field cron: minute hour day-of-month month day-of-week
|
|
1692
|
+
*
|
|
1693
|
+
* Field syntax:
|
|
1694
|
+
* * every value
|
|
1695
|
+
* N exact value
|
|
1696
|
+
* N-M range (inclusive)
|
|
1697
|
+
* N,M,O list
|
|
1698
|
+
* * /step every step (e.g. * /15 = 0,15,30,45) [no space — formatting only]
|
|
1699
|
+
* N-M/step range with step
|
|
1700
|
+
*/
|
|
1701
|
+
function range(min, max) {
|
|
1702
|
+
const r = [];
|
|
1703
|
+
for (let i = min; i <= max; i++) r.push(i);
|
|
1704
|
+
return r;
|
|
1705
|
+
}
|
|
1706
|
+
function parseIntStrict(s, context) {
|
|
1707
|
+
const n = parseInt(s, 10);
|
|
1708
|
+
if (isNaN(n)) throw new Error(`Invalid number "${s}" in ${context}`);
|
|
1709
|
+
return n;
|
|
1710
|
+
}
|
|
1711
|
+
function parseCronField(field, min, max) {
|
|
1712
|
+
const values = /* @__PURE__ */ new Set();
|
|
1713
|
+
for (const part of field.split(",")) if (part === "*") for (const v of range(min, max)) values.add(v);
|
|
1714
|
+
else if (part.includes("/")) {
|
|
1715
|
+
const [rangeStr, stepStr] = part.split("/");
|
|
1716
|
+
const step = parseIntStrict(stepStr, `step "${part}"`);
|
|
1717
|
+
if (step <= 0) throw new Error(`Invalid step: ${part}`);
|
|
1718
|
+
let lo = min;
|
|
1719
|
+
let hi = max;
|
|
1720
|
+
if (rangeStr !== "*") if (rangeStr.includes("-")) {
|
|
1721
|
+
const parts = rangeStr.split("-");
|
|
1722
|
+
lo = parseIntStrict(parts[0], `range "${part}"`);
|
|
1723
|
+
hi = parseIntStrict(parts[1], `range "${part}"`);
|
|
1724
|
+
} else {
|
|
1725
|
+
lo = parseIntStrict(rangeStr, `field "${part}"`);
|
|
1726
|
+
hi = max;
|
|
1727
|
+
}
|
|
1728
|
+
for (let v = lo; v <= hi; v += step) values.add(v);
|
|
1729
|
+
} else if (part.includes("-")) {
|
|
1730
|
+
const parts = part.split("-");
|
|
1731
|
+
const lo = parseIntStrict(parts[0], `range "${part}"`);
|
|
1732
|
+
const hi = parseIntStrict(parts[1], `range "${part}"`);
|
|
1733
|
+
for (const v of range(lo, hi)) values.add(v);
|
|
1734
|
+
} else values.add(parseIntStrict(part, `field "${field}"`));
|
|
1735
|
+
return values;
|
|
1736
|
+
}
|
|
1737
|
+
/**
|
|
1738
|
+
* Parse a 5-field cron expression into sets of matching values.
|
|
1739
|
+
*/
|
|
1740
|
+
function parseCron(expr) {
|
|
1741
|
+
const parts = expr.trim().split(/\s+/);
|
|
1742
|
+
if (parts.length !== 5) throw new Error(`Invalid cron expression (expected 5 fields): ${expr}`);
|
|
1743
|
+
return {
|
|
1744
|
+
minutes: parseCronField(parts[0], 0, 59),
|
|
1745
|
+
hours: parseCronField(parts[1], 0, 23),
|
|
1746
|
+
daysOfMonth: parseCronField(parts[2], 1, 31),
|
|
1747
|
+
months: parseCronField(parts[3], 1, 12),
|
|
1748
|
+
daysOfWeek: parseCronField(parts[4], 0, 6)
|
|
1749
|
+
};
|
|
1750
|
+
}
|
|
1751
|
+
/**
|
|
1752
|
+
* Check if a Date matches a parsed cron expression.
|
|
1753
|
+
*/
|
|
1754
|
+
function matchesCron(date, fields) {
|
|
1755
|
+
return fields.minutes.has(date.getMinutes()) && fields.hours.has(date.getHours()) && fields.daysOfMonth.has(date.getDate()) && fields.months.has(date.getMonth() + 1) && fields.daysOfWeek.has(date.getDay());
|
|
1756
|
+
}
|
|
1757
|
+
/**
|
|
1758
|
+
* Calculate the next occurrence of a cron expression after `from`.
|
|
1759
|
+
* Searches forward minute-by-minute, up to 1 year.
|
|
1760
|
+
* Returns the Date of the next match.
|
|
1761
|
+
*/
|
|
1762
|
+
function nextCronTime(expr, from = /* @__PURE__ */ new Date()) {
|
|
1763
|
+
const fields = parseCron(expr);
|
|
1764
|
+
const next = new Date(from);
|
|
1765
|
+
next.setSeconds(0, 0);
|
|
1766
|
+
next.setMinutes(next.getMinutes() + 1);
|
|
1767
|
+
const maxMinutes = 366 * 24 * 60;
|
|
1768
|
+
for (let i = 0; i < maxMinutes; i++) {
|
|
1769
|
+
if (matchesCron(next, fields)) return next;
|
|
1770
|
+
next.setMinutes(next.getMinutes() + 1);
|
|
1771
|
+
}
|
|
1772
|
+
throw new Error(`No matching cron time found within 1 year: ${expr}`);
|
|
1773
|
+
}
|
|
1774
|
+
/**
|
|
1775
|
+
* Calculate ms until the next cron occurrence.
|
|
1776
|
+
*/
|
|
1777
|
+
function msUntilNextCron(expr, from = /* @__PURE__ */ new Date()) {
|
|
1778
|
+
return nextCronTime(expr, from).getTime() - from.getTime();
|
|
1779
|
+
}
|
|
1780
|
+
|
|
1781
|
+
//#endregion
|
|
1782
|
+
//#region src/workflow/controller/mock-runner.ts
|
|
1783
|
+
/**
|
|
1784
|
+
* Mock Agent Runner
|
|
1785
|
+
*
|
|
1786
|
+
* Orchestrates mock agent execution for workflow integration testing.
|
|
1787
|
+
* Uses AI SDK generateText with MockLanguageModelV3 and real MCP tool calls.
|
|
1788
|
+
*
|
|
1789
|
+
* This lives in the controller layer (not backends) because it does orchestration:
|
|
1790
|
+
* connecting to MCP, building prompts, managing tool loops.
|
|
1791
|
+
* The mock backend itself is just a simple send() adapter.
|
|
1792
|
+
*/
|
|
1793
|
+
/**
|
|
1794
|
+
* Connect to workflow MCP server via HTTP and create AI SDK tool wrappers
|
|
1795
|
+
*/
|
|
1796
|
+
async function createMCPToolBridge$1(mcpUrl, agentName) {
|
|
1797
|
+
const transport = new StreamableHTTPClientTransport(new URL(`${mcpUrl}?agent=${encodeURIComponent(agentName)}`));
|
|
1798
|
+
const client = new Client({
|
|
1799
|
+
name: agentName,
|
|
1800
|
+
version: "1.0.0"
|
|
1801
|
+
});
|
|
1802
|
+
await client.connect(transport);
|
|
1803
|
+
const { tools: mcpTools } = await client.listTools();
|
|
1804
|
+
const aiTools = {};
|
|
1805
|
+
for (const mcpTool of mcpTools) {
|
|
1806
|
+
const toolName = mcpTool.name;
|
|
1807
|
+
aiTools[toolName] = tool({
|
|
1808
|
+
description: mcpTool.description || toolName,
|
|
1809
|
+
inputSchema: jsonSchema(mcpTool.inputSchema),
|
|
1810
|
+
execute: async (args) => {
|
|
1811
|
+
return (await client.callTool({
|
|
1812
|
+
name: toolName,
|
|
1813
|
+
arguments: args
|
|
1814
|
+
})).content;
|
|
1815
|
+
}
|
|
1816
|
+
});
|
|
1817
|
+
}
|
|
1818
|
+
return {
|
|
1819
|
+
tools: aiTools,
|
|
1820
|
+
close: () => client.close()
|
|
1821
|
+
};
|
|
1822
|
+
}
|
|
1823
|
+
/**
|
|
1824
|
+
* Run a mock agent with AI SDK and real MCP tools.
|
|
1825
|
+
*
|
|
1826
|
+
* Used by the controller when backend.type === 'mock'.
|
|
1827
|
+
* Unlike real backends that just send(), the mock runner needs to:
|
|
1828
|
+
* 1. Connect to MCP server for real tool execution
|
|
1829
|
+
* 2. Generate scripted tool calls via MockLanguageModelV3
|
|
1830
|
+
* 3. Execute the full tool loop to test channel/document flow
|
|
1831
|
+
*/
|
|
1832
|
+
async function runMockAgent(ctx, debugLog) {
|
|
1833
|
+
const startTime = Date.now();
|
|
1834
|
+
const log = debugLog || (() => {});
|
|
1835
|
+
try {
|
|
1836
|
+
if (!ctx.mcpUrl) return {
|
|
1837
|
+
success: false,
|
|
1838
|
+
error: "Mock runner requires mcpUrl (HTTP MCP server)",
|
|
1839
|
+
duration: 0
|
|
1840
|
+
};
|
|
1841
|
+
const mcp = await createMCPToolBridge$1(ctx.mcpUrl, ctx.name);
|
|
1842
|
+
log(`MCP connected, ${Object.keys(mcp.tools).length} tools`);
|
|
1843
|
+
const inboxSummary = ctx.inbox.map((m) => `${m.entry.from}: ${m.entry.content.slice(0, 80).replace(/@/g, "")}`).join("; ");
|
|
1844
|
+
const mockModel = new MockLanguageModelV3({ doGenerate: mockValues({
|
|
1845
|
+
content: [{
|
|
1846
|
+
type: "tool-call",
|
|
1847
|
+
toolCallId: `call-${ctx.name}-${Date.now()}`,
|
|
1848
|
+
toolName: "channel_send",
|
|
1849
|
+
input: JSON.stringify({ message: `[${ctx.name}] Processed: ${inboxSummary.slice(0, 200)}` })
|
|
1850
|
+
}],
|
|
1851
|
+
finishReason: {
|
|
1852
|
+
unified: "tool-calls",
|
|
1853
|
+
raw: "tool_use"
|
|
1854
|
+
},
|
|
1855
|
+
usage: {
|
|
1856
|
+
inputTokens: {
|
|
1857
|
+
total: 100,
|
|
1858
|
+
noCache: 100,
|
|
1859
|
+
cacheRead: 0,
|
|
1860
|
+
cacheWrite: 0
|
|
1861
|
+
},
|
|
1862
|
+
outputTokens: {
|
|
1863
|
+
total: 50,
|
|
1864
|
+
text: 50,
|
|
1865
|
+
reasoning: 0
|
|
1866
|
+
}
|
|
1867
|
+
}
|
|
1868
|
+
}, {
|
|
1869
|
+
content: [{
|
|
1870
|
+
type: "text",
|
|
1871
|
+
text: `${ctx.name} done.`
|
|
1872
|
+
}],
|
|
1873
|
+
finishReason: {
|
|
1874
|
+
unified: "stop",
|
|
1875
|
+
raw: "end_turn"
|
|
1876
|
+
},
|
|
1877
|
+
usage: {
|
|
1878
|
+
inputTokens: {
|
|
1879
|
+
total: 50,
|
|
1880
|
+
noCache: 50,
|
|
1881
|
+
cacheRead: 0,
|
|
1882
|
+
cacheWrite: 0
|
|
1883
|
+
},
|
|
1884
|
+
outputTokens: {
|
|
1885
|
+
total: 10,
|
|
1886
|
+
text: 10,
|
|
1887
|
+
reasoning: 0
|
|
1888
|
+
}
|
|
1889
|
+
}
|
|
1890
|
+
}) });
|
|
1891
|
+
const prompt = buildAgentPrompt(ctx);
|
|
1892
|
+
log(`Prompt (${prompt.length} chars)`);
|
|
1893
|
+
const result = await generateText({
|
|
1894
|
+
model: mockModel,
|
|
1895
|
+
tools: mcp.tools,
|
|
1896
|
+
prompt,
|
|
1897
|
+
system: ctx.agent.resolvedSystemPrompt,
|
|
1898
|
+
stopWhen: stepCountIs(3)
|
|
1899
|
+
});
|
|
1900
|
+
const totalToolCalls = result.steps.reduce((n, s) => n + s.toolCalls.length, 0);
|
|
1901
|
+
await mcp.close();
|
|
1902
|
+
return {
|
|
1903
|
+
success: true,
|
|
1904
|
+
duration: Date.now() - startTime,
|
|
1905
|
+
steps: result.steps.length,
|
|
1906
|
+
toolCalls: totalToolCalls
|
|
1907
|
+
};
|
|
1908
|
+
} catch (error) {
|
|
1909
|
+
return {
|
|
1910
|
+
success: false,
|
|
1911
|
+
error: error instanceof Error ? error.message : String(error),
|
|
1912
|
+
duration: Date.now() - startTime
|
|
1913
|
+
};
|
|
1914
|
+
}
|
|
1915
|
+
}
|
|
1916
|
+
|
|
1917
|
+
//#endregion
|
|
1918
|
+
//#region src/workflow/controller/sdk-runner.ts
|
|
1919
|
+
/**
|
|
1920
|
+
* SDK Agent Runner
|
|
1921
|
+
*
|
|
1922
|
+
* Runs SDK agents with full tool access in workflows:
|
|
1923
|
+
* - MCP context tools (channel_send, document_write, etc.)
|
|
1924
|
+
* - Bash tool for shell commands
|
|
1925
|
+
*
|
|
1926
|
+
* Same pattern as mock-runner.ts but with real models via createModelAsync.
|
|
1927
|
+
* This is the standard execution path for SDK backends in workflows —
|
|
1928
|
+
* all agents get MCP + bash regardless of backend type.
|
|
1929
|
+
*/
|
|
1930
|
+
/** Extract useful details from AI SDK errors (statusCode, url, responseBody) */
|
|
1931
|
+
function formatError(error) {
|
|
1932
|
+
if (!(error instanceof Error)) return String(error);
|
|
1933
|
+
const e = error;
|
|
1934
|
+
const parts = [error.message];
|
|
1935
|
+
if (e.statusCode) parts[0] = `HTTP ${e.statusCode}: ${error.message}`;
|
|
1936
|
+
if (e.url) parts.push(`url=${e.url}`);
|
|
1937
|
+
if (e.responseBody && typeof e.responseBody === "string") {
|
|
1938
|
+
const body = e.responseBody.length > 200 ? e.responseBody.slice(0, 200) + "…" : e.responseBody;
|
|
1939
|
+
parts.push(`body=${body}`);
|
|
1940
|
+
}
|
|
1941
|
+
return parts.join(" ");
|
|
1942
|
+
}
|
|
1943
|
+
/** Truncate string, flatten newlines */
|
|
1944
|
+
function truncate(s, max) {
|
|
1945
|
+
const flat = s.replace(/\s+/g, " ").trim();
|
|
1946
|
+
return flat.length > max ? flat.slice(0, max) + "…" : flat;
|
|
1947
|
+
}
|
|
1948
|
+
/** Format a tool call for concise single-line debug output (function call syntax) */
|
|
1949
|
+
function formatToolCall(tc) {
|
|
1950
|
+
const input = tc.input ?? tc.args ?? {};
|
|
1951
|
+
const pairs = Object.entries(input).map(([k, v]) => {
|
|
1952
|
+
return `${k}=${truncate(typeof v === "string" ? v : JSON.stringify(v), 60)}`;
|
|
1953
|
+
});
|
|
1954
|
+
return `${tc.toolName}(${pairs.join(", ")})`;
|
|
1955
|
+
}
|
|
1956
|
+
/**
|
|
1957
|
+
* Connect to workflow MCP server and create AI SDK tool wrappers.
|
|
1958
|
+
* Same bridge as mock-runner — extracted here for SDK agents.
|
|
1959
|
+
*/
|
|
1960
|
+
async function createMCPToolBridge(mcpUrl, agentName) {
|
|
1961
|
+
const transport = new StreamableHTTPClientTransport(new URL(`${mcpUrl}?agent=${encodeURIComponent(agentName)}`));
|
|
1962
|
+
const client = new Client({
|
|
1963
|
+
name: agentName,
|
|
1964
|
+
version: "1.0.0"
|
|
1965
|
+
});
|
|
1966
|
+
await client.connect(transport);
|
|
1967
|
+
const { tools: mcpTools } = await client.listTools();
|
|
1968
|
+
const aiTools = {};
|
|
1969
|
+
for (const mcpTool of mcpTools) {
|
|
1970
|
+
const toolName = mcpTool.name;
|
|
1971
|
+
aiTools[toolName] = tool({
|
|
1972
|
+
description: mcpTool.description || toolName,
|
|
1973
|
+
inputSchema: jsonSchema(mcpTool.inputSchema),
|
|
1974
|
+
execute: async (args) => {
|
|
1975
|
+
return (await client.callTool({
|
|
1976
|
+
name: toolName,
|
|
1977
|
+
arguments: args
|
|
1978
|
+
})).content;
|
|
1979
|
+
}
|
|
1980
|
+
});
|
|
1981
|
+
}
|
|
1982
|
+
return {
|
|
1983
|
+
tools: aiTools,
|
|
1984
|
+
close: () => client.close()
|
|
1985
|
+
};
|
|
1986
|
+
}
|
|
1987
|
+
function createBashTool() {
|
|
1988
|
+
return tool({
|
|
1989
|
+
description: "Execute a shell command and return stdout/stderr.",
|
|
1990
|
+
inputSchema: jsonSchema({
|
|
1991
|
+
type: "object",
|
|
1992
|
+
properties: { command: {
|
|
1993
|
+
type: "string",
|
|
1994
|
+
description: "The shell command to execute"
|
|
1995
|
+
} },
|
|
1996
|
+
required: ["command"]
|
|
1997
|
+
}),
|
|
1998
|
+
execute: async ({ command }) => {
|
|
1999
|
+
try {
|
|
2000
|
+
return execSync(command, {
|
|
2001
|
+
encoding: "utf-8",
|
|
2002
|
+
timeout: 12e4
|
|
2003
|
+
}).trim() || "(no output)";
|
|
2004
|
+
} catch (error) {
|
|
2005
|
+
return `Error (exit ${error.status}): ${error.stderr || error.message}`;
|
|
2006
|
+
}
|
|
2007
|
+
}
|
|
2008
|
+
});
|
|
2009
|
+
}
|
|
2010
|
+
/**
|
|
2011
|
+
* Run an SDK agent with real model + MCP tools + bash.
|
|
2012
|
+
*
|
|
2013
|
+
* Used by the controller when backend.type === 'default'.
|
|
2014
|
+
* Unlike the simple SdkBackend.send() (text-only), this runner:
|
|
2015
|
+
* 1. Connects to MCP server for context tools (channel, document)
|
|
2016
|
+
* 2. Adds bash tool for shell access
|
|
2017
|
+
* 3. Runs generateText with full tool loop
|
|
2018
|
+
*/
|
|
2019
|
+
async function runSdkAgent(ctx, debugLog) {
|
|
2020
|
+
const startTime = Date.now();
|
|
2021
|
+
const log = debugLog || (() => {});
|
|
2022
|
+
try {
|
|
2023
|
+
if (!ctx.mcpUrl) return {
|
|
2024
|
+
success: false,
|
|
2025
|
+
error: "SDK runner requires mcpUrl",
|
|
2026
|
+
duration: 0
|
|
2027
|
+
};
|
|
2028
|
+
const mcp = await createMCPToolBridge(ctx.mcpUrl, ctx.name);
|
|
2029
|
+
log(`MCP connected, ${Object.keys(mcp.tools).length} context tools`);
|
|
2030
|
+
const model = await createModelAsync(ctx.agent.model);
|
|
2031
|
+
const tools = {
|
|
2032
|
+
...mcp.tools,
|
|
2033
|
+
bash: createBashTool()
|
|
2034
|
+
};
|
|
2035
|
+
const prompt = buildAgentPrompt(ctx);
|
|
2036
|
+
log(`Prompt (${prompt.length} chars) → sdk with ${Object.keys(tools).length} tools`);
|
|
2037
|
+
let _stepNum = 0;
|
|
2038
|
+
const result = await generateText({
|
|
2039
|
+
model,
|
|
2040
|
+
tools,
|
|
2041
|
+
system: ctx.agent.resolvedSystemPrompt,
|
|
2042
|
+
prompt,
|
|
2043
|
+
maxOutputTokens: ctx.agent.max_tokens ?? 8192,
|
|
2044
|
+
stopWhen: stepCountIs(ctx.agent.max_steps ?? 200),
|
|
2045
|
+
onStepFinish: (step) => {
|
|
2046
|
+
_stepNum++;
|
|
2047
|
+
if (step.toolCalls?.length && ctx.eventLog) {
|
|
2048
|
+
for (const tc of step.toolCalls) if (tc.toolName === "bash") ctx.eventLog.toolCall(ctx.name, tc.toolName, formatToolCall(tc), "sdk");
|
|
2049
|
+
}
|
|
2050
|
+
}
|
|
2051
|
+
});
|
|
2052
|
+
const totalToolCalls = result.steps.reduce((n, s) => n + s.toolCalls.length, 0);
|
|
2053
|
+
const lastStep = result.steps[result.steps.length - 1];
|
|
2054
|
+
if (ctx.agent.max_steps && result.steps.length >= ctx.agent.max_steps && (lastStep?.toolCalls?.length ?? 0) > 0) {
|
|
2055
|
+
const warning = `⚠️ Agent reached max_steps limit (${ctx.agent.max_steps}) but wanted to continue. Consider increasing max_steps or removing the limit.`;
|
|
2056
|
+
log(warning);
|
|
2057
|
+
await ctx.provider.appendChannel(ctx.name, warning, { kind: "system" }).catch(() => {});
|
|
2058
|
+
}
|
|
2059
|
+
await mcp.close();
|
|
2060
|
+
return {
|
|
2061
|
+
success: true,
|
|
2062
|
+
duration: Date.now() - startTime,
|
|
2063
|
+
content: result.text,
|
|
2064
|
+
steps: result.steps.length,
|
|
2065
|
+
toolCalls: totalToolCalls
|
|
2066
|
+
};
|
|
2067
|
+
} catch (error) {
|
|
2068
|
+
return {
|
|
2069
|
+
success: false,
|
|
2070
|
+
error: formatError(error),
|
|
2071
|
+
duration: Date.now() - startTime
|
|
2072
|
+
};
|
|
2073
|
+
}
|
|
2074
|
+
}
|
|
2075
|
+
|
|
2076
|
+
//#endregion
|
|
2077
|
+
//#region src/workflow/controller/controller.ts
|
|
2078
|
+
/** Check if controller should continue running */
|
|
2079
|
+
function shouldContinue(state) {
|
|
2080
|
+
return state !== "stopped";
|
|
2081
|
+
}
|
|
2082
|
+
/**
|
|
2083
|
+
* Create an agent controller
|
|
2084
|
+
*
|
|
2085
|
+
* The controller:
|
|
2086
|
+
* 1. Polls for inbox messages on an interval
|
|
2087
|
+
* 2. Runs the agent when messages are found
|
|
2088
|
+
* 3. Acknowledges inbox only on successful run
|
|
2089
|
+
* 4. Retries with exponential backoff on failure
|
|
2090
|
+
* 5. Can be woken early via wake()
|
|
2091
|
+
*/
|
|
2092
|
+
function createAgentController(config) {
|
|
2093
|
+
const { name, agent, contextProvider, eventLog, mcpUrl, workspaceDir, projectDir, backend, onRunComplete, log = () => {}, feedback } = config;
|
|
2094
|
+
const infoLog = config.infoLog ?? log;
|
|
2095
|
+
const errorLog = config.errorLog ?? log;
|
|
2096
|
+
const pollInterval = config.pollInterval ?? CONTROLLER_DEFAULTS.pollInterval;
|
|
2097
|
+
const retryConfig = {
|
|
2098
|
+
maxAttempts: config.retry?.maxAttempts ?? CONTROLLER_DEFAULTS.retry.maxAttempts,
|
|
2099
|
+
backoffMs: config.retry?.backoffMs ?? CONTROLLER_DEFAULTS.retry.backoffMs,
|
|
2100
|
+
backoffMultiplier: config.retry?.backoffMultiplier ?? CONTROLLER_DEFAULTS.retry.backoffMultiplier
|
|
2101
|
+
};
|
|
2102
|
+
let state = "stopped";
|
|
2103
|
+
let wakeResolver = null;
|
|
2104
|
+
let pollTimeout = null;
|
|
2105
|
+
let directRunning = false;
|
|
2106
|
+
const scheduleConfig = agent.schedule;
|
|
2107
|
+
let resolvedSchedule;
|
|
2108
|
+
if (scheduleConfig) try {
|
|
2109
|
+
resolvedSchedule = resolveSchedule(scheduleConfig);
|
|
2110
|
+
} catch (err) {
|
|
2111
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
2112
|
+
throw new Error(`Agent "${name}" has invalid schedule config: ${msg}`);
|
|
2113
|
+
}
|
|
2114
|
+
let lastActivityTime = Date.now();
|
|
2115
|
+
/**
|
|
2116
|
+
* Wait for either poll interval or wake() call
|
|
2117
|
+
*/
|
|
2118
|
+
async function waitForWakeOrPoll() {
|
|
2119
|
+
return new Promise((resolve) => {
|
|
2120
|
+
wakeResolver = resolve;
|
|
2121
|
+
pollTimeout = setTimeout(() => {
|
|
2122
|
+
wakeResolver = null;
|
|
2123
|
+
resolve();
|
|
2124
|
+
}, pollInterval);
|
|
2125
|
+
});
|
|
2126
|
+
}
|
|
2127
|
+
/**
|
|
2128
|
+
* Main controller loop
|
|
2129
|
+
*/
|
|
2130
|
+
async function runLoop() {
|
|
2131
|
+
while (shouldContinue(state)) {
|
|
2132
|
+
await waitForWakeOrPoll();
|
|
2133
|
+
if (!shouldContinue(state)) break;
|
|
2134
|
+
if (directRunning) continue;
|
|
2135
|
+
const inbox = await contextProvider.getInbox(name);
|
|
2136
|
+
if (inbox.length === 0) {
|
|
2137
|
+
if (resolvedSchedule) {
|
|
2138
|
+
const now = Date.now();
|
|
2139
|
+
let wakeupDue = false;
|
|
2140
|
+
if (resolvedSchedule.type === "interval") {
|
|
2141
|
+
if (now - lastActivityTime >= resolvedSchedule.ms) wakeupDue = true;
|
|
2142
|
+
} else if (resolvedSchedule.type === "cron") {
|
|
2143
|
+
const msTillNext = msUntilNextCron(resolvedSchedule.expr, new Date(lastActivityTime));
|
|
2144
|
+
if (now >= lastActivityTime + msTillNext) wakeupDue = true;
|
|
2145
|
+
}
|
|
2146
|
+
if (wakeupDue) {
|
|
2147
|
+
const wakeupPrompt = resolvedSchedule.prompt ?? "Scheduled wakeup. Check for any pending work or updates.";
|
|
2148
|
+
log(`Schedule wakeup triggered for ${name}`);
|
|
2149
|
+
await contextProvider.appendChannel("system", `@${name} ${wakeupPrompt}`);
|
|
2150
|
+
lastActivityTime = now;
|
|
2151
|
+
continue;
|
|
2152
|
+
}
|
|
2153
|
+
}
|
|
2154
|
+
state = "idle";
|
|
2155
|
+
await contextProvider.setAgentStatus(name, { state: "idle" });
|
|
2156
|
+
continue;
|
|
2157
|
+
}
|
|
2158
|
+
const senders = inbox.map((m) => m.entry.from);
|
|
2159
|
+
infoLog(`Inbox: ${inbox.length} message(s) from [${senders.join(", ")}]`);
|
|
2160
|
+
for (const msg of inbox) {
|
|
2161
|
+
const preview = msg.entry.content.length > 120 ? msg.entry.content.slice(0, 120) + "..." : msg.entry.content;
|
|
2162
|
+
log(` from @${msg.entry.from}: ${preview}`);
|
|
2163
|
+
}
|
|
2164
|
+
const latestId = inbox[inbox.length - 1].entry.id;
|
|
2165
|
+
await contextProvider.markInboxSeen(name, latestId);
|
|
2166
|
+
let attempt = 0;
|
|
2167
|
+
let lastResult = null;
|
|
2168
|
+
while (attempt < retryConfig.maxAttempts && shouldContinue(state)) {
|
|
2169
|
+
attempt++;
|
|
2170
|
+
state = "running";
|
|
2171
|
+
await contextProvider.setAgentStatus(name, { state: "running" });
|
|
2172
|
+
infoLog(`Running (attempt ${attempt}/${retryConfig.maxAttempts})`);
|
|
2173
|
+
lastResult = await runAgent(backend, {
|
|
2174
|
+
name,
|
|
2175
|
+
agent,
|
|
2176
|
+
inbox,
|
|
2177
|
+
recentChannel: await contextProvider.readChannel({
|
|
2178
|
+
limit: CONTROLLER_DEFAULTS.recentChannelLimit,
|
|
2179
|
+
agent: name
|
|
2180
|
+
}),
|
|
2181
|
+
documentContent: await contextProvider.readDocument(),
|
|
2182
|
+
mcpUrl,
|
|
2183
|
+
workspaceDir,
|
|
2184
|
+
projectDir,
|
|
2185
|
+
retryAttempt: attempt,
|
|
2186
|
+
provider: contextProvider,
|
|
2187
|
+
eventLog,
|
|
2188
|
+
feedback
|
|
2189
|
+
}, log, infoLog);
|
|
2190
|
+
if (lastResult.success) {
|
|
2191
|
+
infoLog(`DONE ${lastResult.steps ? `${lastResult.steps} steps, ${lastResult.toolCalls} tool calls, ${lastResult.duration}ms` : `${lastResult.duration}ms`}`);
|
|
2192
|
+
if (lastResult.content) await contextProvider.appendChannel(name, lastResult.content);
|
|
2193
|
+
await contextProvider.ackInbox(name, latestId);
|
|
2194
|
+
lastActivityTime = Date.now();
|
|
2195
|
+
await contextProvider.setAgentStatus(name, { state: "idle" });
|
|
2196
|
+
break;
|
|
2197
|
+
}
|
|
2198
|
+
errorLog(`ERROR ${lastResult.error}`);
|
|
2199
|
+
if (attempt < retryConfig.maxAttempts && shouldContinue(state)) {
|
|
2200
|
+
const delay = retryConfig.backoffMs * Math.pow(retryConfig.backoffMultiplier, attempt - 1);
|
|
2201
|
+
log(`Retrying in ${delay}ms...`);
|
|
2202
|
+
await sleep(delay);
|
|
2203
|
+
}
|
|
2204
|
+
}
|
|
2205
|
+
if (lastResult && !lastResult.success) {
|
|
2206
|
+
errorLog(`ERROR max retries exhausted, acknowledging to prevent loop`);
|
|
2207
|
+
await contextProvider.ackInbox(name, latestId);
|
|
2208
|
+
}
|
|
2209
|
+
if (lastResult && onRunComplete) onRunComplete(lastResult);
|
|
2210
|
+
state = "idle";
|
|
2211
|
+
await contextProvider.setAgentStatus(name, { state: "idle" });
|
|
2212
|
+
}
|
|
2213
|
+
}
|
|
2214
|
+
return {
|
|
2215
|
+
get name() {
|
|
2216
|
+
return name;
|
|
2217
|
+
},
|
|
2218
|
+
get state() {
|
|
2219
|
+
return state;
|
|
2220
|
+
},
|
|
2221
|
+
async start() {
|
|
2222
|
+
if (state !== "stopped") throw new Error(`Controller ${name} is already running`);
|
|
2223
|
+
state = "idle";
|
|
2224
|
+
lastActivityTime = Date.now();
|
|
2225
|
+
await contextProvider.setAgentStatus(name, { state: "idle" });
|
|
2226
|
+
if (resolvedSchedule) infoLog(`Starting (schedule: ${resolvedSchedule.type === "interval" ? `${resolvedSchedule.ms}ms interval` : `cron "${resolvedSchedule.expr}"`})`);
|
|
2227
|
+
else infoLog(`Starting`);
|
|
2228
|
+
runLoop().catch((error) => {
|
|
2229
|
+
errorLog(`ERROR ${error instanceof Error ? error.message : String(error)}`);
|
|
2230
|
+
state = "stopped";
|
|
2231
|
+
contextProvider.setAgentStatus(name, { state: "stopped" }).catch(() => {});
|
|
2232
|
+
});
|
|
2233
|
+
},
|
|
2234
|
+
async stop() {
|
|
2235
|
+
log(`Stopping`);
|
|
2236
|
+
state = "stopped";
|
|
2237
|
+
await contextProvider.setAgentStatus(name, { state: "stopped" });
|
|
2238
|
+
if (backend.abort) backend.abort();
|
|
2239
|
+
if (pollTimeout) {
|
|
2240
|
+
clearTimeout(pollTimeout);
|
|
2241
|
+
pollTimeout = null;
|
|
2242
|
+
}
|
|
2243
|
+
if (wakeResolver) {
|
|
2244
|
+
wakeResolver();
|
|
2245
|
+
wakeResolver = null;
|
|
2246
|
+
}
|
|
2247
|
+
},
|
|
2248
|
+
wake() {
|
|
2249
|
+
if (state === "idle" && wakeResolver) {
|
|
2250
|
+
log(`Waking`);
|
|
2251
|
+
if (pollTimeout) {
|
|
2252
|
+
clearTimeout(pollTimeout);
|
|
2253
|
+
pollTimeout = null;
|
|
2254
|
+
}
|
|
2255
|
+
wakeResolver();
|
|
2256
|
+
wakeResolver = null;
|
|
2257
|
+
}
|
|
2258
|
+
},
|
|
2259
|
+
async sendDirect(message) {
|
|
2260
|
+
if (directRunning) return {
|
|
2261
|
+
success: false,
|
|
2262
|
+
error: "Agent is already processing a direct request",
|
|
2263
|
+
duration: 0
|
|
2264
|
+
};
|
|
2265
|
+
if (state === "running") return {
|
|
2266
|
+
success: false,
|
|
2267
|
+
error: "Agent is currently running (poll loop)",
|
|
2268
|
+
duration: 0
|
|
2269
|
+
};
|
|
2270
|
+
directRunning = true;
|
|
2271
|
+
const prevState = state;
|
|
2272
|
+
state = "running";
|
|
2273
|
+
await contextProvider.setAgentStatus(name, { state: "running" });
|
|
2274
|
+
try {
|
|
2275
|
+
await contextProvider.appendChannel("user", `@${name} ${message}`);
|
|
2276
|
+
const inbox = await contextProvider.getInbox(name);
|
|
2277
|
+
const latestId = inbox.length > 0 ? inbox[inbox.length - 1].entry.id : void 0;
|
|
2278
|
+
if (latestId) await contextProvider.markInboxSeen(name, latestId);
|
|
2279
|
+
const runContext = {
|
|
2280
|
+
name,
|
|
2281
|
+
agent,
|
|
2282
|
+
inbox,
|
|
2283
|
+
recentChannel: await contextProvider.readChannel({
|
|
2284
|
+
limit: CONTROLLER_DEFAULTS.recentChannelLimit,
|
|
2285
|
+
agent: name
|
|
2286
|
+
}),
|
|
2287
|
+
documentContent: await contextProvider.readDocument(),
|
|
2288
|
+
mcpUrl,
|
|
2289
|
+
workspaceDir,
|
|
2290
|
+
projectDir,
|
|
2291
|
+
retryAttempt: 1,
|
|
2292
|
+
provider: contextProvider,
|
|
2293
|
+
eventLog,
|
|
2294
|
+
feedback
|
|
2295
|
+
};
|
|
2296
|
+
infoLog(`Direct send (${message.length} chars)`);
|
|
2297
|
+
const result = await runAgent(backend, runContext, log, infoLog);
|
|
2298
|
+
if (result.success) {
|
|
2299
|
+
if (result.content) await contextProvider.appendChannel(name, result.content);
|
|
2300
|
+
if (latestId) await contextProvider.ackInbox(name, latestId);
|
|
2301
|
+
lastActivityTime = Date.now();
|
|
2302
|
+
}
|
|
2303
|
+
return result;
|
|
2304
|
+
} finally {
|
|
2305
|
+
directRunning = false;
|
|
2306
|
+
state = prevState === "stopped" ? "stopped" : "idle";
|
|
2307
|
+
await contextProvider.setAgentStatus(name, { state }).catch(() => {});
|
|
2308
|
+
}
|
|
2309
|
+
}
|
|
2310
|
+
};
|
|
2311
|
+
}
|
|
2312
|
+
/**
|
|
2313
|
+
* Run an agent: build prompt, configure workspace, call backend.send()
|
|
2314
|
+
*
|
|
2315
|
+
* This is the single orchestration function that the controller calls.
|
|
2316
|
+
* All the "how to run an agent" logic lives here — backends just send().
|
|
2317
|
+
*
|
|
2318
|
+
* SDK and mock backends get special runners with MCP tool bridge + bash,
|
|
2319
|
+
* because they can't manage tools on their own (unlike CLI backends).
|
|
2320
|
+
*/
|
|
2321
|
+
async function runAgent(backend, ctx, log, infoLog) {
|
|
2322
|
+
const info = infoLog ?? log;
|
|
2323
|
+
if (backend.type === "mock") return runMockAgent(ctx, (msg) => log(msg));
|
|
2324
|
+
if (backend.type === "default") return runSdkAgent(ctx, (msg) => log(msg));
|
|
2325
|
+
const startTime = Date.now();
|
|
2326
|
+
try {
|
|
2327
|
+
if (backend.setWorkspace) {
|
|
2328
|
+
const mcpConfig = generateWorkflowMCPConfig(ctx.mcpUrl, ctx.name);
|
|
2329
|
+
backend.setWorkspace(ctx.workspaceDir, mcpConfig);
|
|
2330
|
+
}
|
|
2331
|
+
const prompt = buildAgentPrompt(ctx);
|
|
2332
|
+
info(`Prompt (${prompt.length} chars) → ${backend.type} backend`);
|
|
2333
|
+
const response = await backend.send(prompt, { system: ctx.agent.resolvedSystemPrompt });
|
|
2334
|
+
return {
|
|
2335
|
+
success: true,
|
|
2336
|
+
duration: Date.now() - startTime,
|
|
2337
|
+
content: response.content
|
|
2338
|
+
};
|
|
2339
|
+
} catch (error) {
|
|
2340
|
+
return {
|
|
2341
|
+
success: false,
|
|
2342
|
+
error: error instanceof Error ? error.message : String(error),
|
|
2343
|
+
duration: Date.now() - startTime
|
|
2344
|
+
};
|
|
2345
|
+
}
|
|
2346
|
+
}
|
|
2347
|
+
/**
|
|
2348
|
+
* Sleep helper
|
|
2349
|
+
*/
|
|
2350
|
+
function sleep(ms) {
|
|
2351
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
2352
|
+
}
|
|
2353
|
+
/**
|
|
2354
|
+
* Check if workflow is complete (all agents idle, no pending work)
|
|
2355
|
+
*/
|
|
2356
|
+
async function checkWorkflowIdle(controllers, provider, debounceMs = CONTROLLER_DEFAULTS.idleDebounceMs) {
|
|
2357
|
+
if (![...controllers.values()].every((c) => c.state === "idle")) return false;
|
|
2358
|
+
for (const [name] of controllers) if ((await provider.getInbox(name)).length > 0) return false;
|
|
2359
|
+
await sleep(debounceMs);
|
|
2360
|
+
return [...controllers.values()].every((c) => c.state === "idle");
|
|
2361
|
+
}
|
|
2362
|
+
|
|
2363
|
+
//#endregion
|
|
2364
|
+
//#region src/workflow/controller/backend.ts
|
|
2365
|
+
/**
|
|
2366
|
+
* Get backend by explicit backend type
|
|
2367
|
+
*
|
|
2368
|
+
* All backends are created via the canonical createBackend() factory
|
|
2369
|
+
* from backends/index.ts. Mock backend is handled specially (no model needed).
|
|
2370
|
+
*/
|
|
2371
|
+
function getBackendByType(backendType, options) {
|
|
2372
|
+
if (backendType === "mock") return createMockBackend(options?.debugLog);
|
|
2373
|
+
const backendOptions = {};
|
|
2374
|
+
if (options?.timeout) backendOptions.timeout = options.timeout;
|
|
2375
|
+
if (options?.streamCallbacks) backendOptions.streamCallbacks = options.streamCallbacks;
|
|
2376
|
+
return createBackend({
|
|
2377
|
+
type: backendType,
|
|
2378
|
+
model: options?.model,
|
|
2379
|
+
...backendType === "default" && options?.provider ? { provider: options.provider } : {},
|
|
2380
|
+
...Object.keys(backendOptions).length > 0 ? { options: backendOptions } : {}
|
|
2381
|
+
});
|
|
2382
|
+
}
|
|
2383
|
+
/**
|
|
2384
|
+
* Get appropriate backend for a model identifier
|
|
2385
|
+
*
|
|
2386
|
+
* Infers backend type from model name and delegates to getBackendByType.
|
|
2387
|
+
* Prefer using getBackendByType with explicit backend field in workflow configs.
|
|
2388
|
+
*/
|
|
2389
|
+
function getBackendForModel(model, options) {
|
|
2390
|
+
if (options?.provider) return getBackendByType("default", {
|
|
2391
|
+
...options,
|
|
2392
|
+
model
|
|
2393
|
+
});
|
|
2394
|
+
const { provider } = parseModel(model);
|
|
2395
|
+
switch (provider) {
|
|
2396
|
+
case "anthropic": return getBackendByType("default", {
|
|
2397
|
+
...options,
|
|
2398
|
+
model
|
|
2399
|
+
});
|
|
2400
|
+
case "claude": return getBackendByType("claude", {
|
|
2401
|
+
...options,
|
|
2402
|
+
model
|
|
2403
|
+
});
|
|
2404
|
+
case "codex": return getBackendByType("codex", {
|
|
2405
|
+
...options,
|
|
2406
|
+
model
|
|
2407
|
+
});
|
|
2408
|
+
default: throw new Error(`Unknown provider: ${provider}. Specify backend explicitly.`);
|
|
2409
|
+
}
|
|
2410
|
+
}
|
|
2411
|
+
|
|
2412
|
+
//#endregion
|
|
2413
|
+
//#region src/workflow/logger.ts
|
|
2414
|
+
var logger_exports = /* @__PURE__ */ __exportAll({
|
|
2415
|
+
createChannelLogger: () => createChannelLogger,
|
|
2416
|
+
createSilentLogger: () => createSilentLogger
|
|
2417
|
+
});
|
|
2418
|
+
/**
|
|
2419
|
+
* Create a silent logger (no output)
|
|
2420
|
+
*/
|
|
2421
|
+
function createSilentLogger() {
|
|
2422
|
+
const noop = () => {};
|
|
2423
|
+
return {
|
|
2424
|
+
debug: noop,
|
|
2425
|
+
info: noop,
|
|
2426
|
+
warn: noop,
|
|
2427
|
+
error: noop,
|
|
2428
|
+
isDebug: () => false,
|
|
2429
|
+
child: () => createSilentLogger()
|
|
2430
|
+
};
|
|
2431
|
+
}
|
|
2432
|
+
/**
|
|
2433
|
+
* Create a logger that writes to the channel.
|
|
2434
|
+
*
|
|
2435
|
+
* - info/warn/error → channel entry with kind="system" (always shown to user)
|
|
2436
|
+
* - debug → channel entry with kind="debug" (only shown with --debug)
|
|
2437
|
+
*
|
|
2438
|
+
* The display layer handles formatting and filtering.
|
|
2439
|
+
*/
|
|
2440
|
+
function createChannelLogger(config) {
|
|
2441
|
+
const { provider, from = "system" } = config;
|
|
2442
|
+
const formatContent = (level, message, args) => {
|
|
2443
|
+
const argsStr = args.length > 0 ? " " + args.map(formatArg).join(" ") : "";
|
|
2444
|
+
if (level === "warn") return `[WARN] ${message}${argsStr}`;
|
|
2445
|
+
if (level === "error") return `[ERROR] ${message}${argsStr}`;
|
|
2446
|
+
return `${message}${argsStr}`;
|
|
2447
|
+
};
|
|
2448
|
+
const write = (level, message, args) => {
|
|
2449
|
+
const content = formatContent(level, message, args);
|
|
2450
|
+
const kind = level === "debug" ? "debug" : "system";
|
|
2451
|
+
provider.appendChannel(from, content, { kind }).catch(() => {});
|
|
2452
|
+
};
|
|
2453
|
+
return {
|
|
2454
|
+
debug: (message, ...args) => write("debug", message, args),
|
|
2455
|
+
info: (message, ...args) => write("info", message, args),
|
|
2456
|
+
warn: (message, ...args) => write("warn", message, args),
|
|
2457
|
+
error: (message, ...args) => write("error", message, args),
|
|
2458
|
+
isDebug: () => true,
|
|
2459
|
+
child: (childPrefix) => {
|
|
2460
|
+
return createChannelLogger({
|
|
2461
|
+
provider,
|
|
2462
|
+
from: from ? `${from}:${childPrefix}` : childPrefix
|
|
2463
|
+
});
|
|
2464
|
+
}
|
|
2465
|
+
};
|
|
2466
|
+
}
|
|
2467
|
+
/** Format an argument for logging */
|
|
2468
|
+
function formatArg(arg) {
|
|
2469
|
+
if (arg === null || arg === void 0) return String(arg);
|
|
2470
|
+
if (typeof arg === "object") try {
|
|
2471
|
+
return JSON.stringify(arg);
|
|
2472
|
+
} catch {
|
|
2473
|
+
return String(arg);
|
|
2474
|
+
}
|
|
2475
|
+
return String(arg);
|
|
2476
|
+
}
|
|
2477
|
+
|
|
2478
|
+
//#endregion
|
|
2479
|
+
//#region src/workflow/factory.ts
|
|
2480
|
+
/**
|
|
2481
|
+
* Workflow Factory — Composable primitives for building workflow runtimes.
|
|
2482
|
+
*
|
|
2483
|
+
* These functions are the building blocks that both runner.ts (CLI direct)
|
|
2484
|
+
* and daemon.ts (service) use to create workflow infrastructure.
|
|
2485
|
+
*
|
|
2486
|
+
* Extracted from the monolithic runWorkflowWithControllers() so that
|
|
2487
|
+
* the daemon can create and manage workflow components independently.
|
|
2488
|
+
*
|
|
2489
|
+
* Usage:
|
|
2490
|
+
* 1. createMinimalRuntime() — context + MCP + event log (the "workspace")
|
|
2491
|
+
* 2. createWiredController() — backend + workspace dir + controller (per agent)
|
|
2492
|
+
* 3. Caller manages lifecycle — start/stop controllers, send kickoff, shutdown
|
|
2493
|
+
*/
|
|
2494
|
+
/**
|
|
2495
|
+
* Create a minimal workflow runtime.
|
|
2496
|
+
*
|
|
2497
|
+
* Sets up the shared infrastructure (context + MCP + event log) without
|
|
2498
|
+
* creating controllers or backends. The daemon can use this to create
|
|
2499
|
+
* workflow infrastructure for both standalone and multi-agent workflows.
|
|
2500
|
+
*
|
|
2501
|
+
* For standalone agents created via `POST /agents`, this gives them
|
|
2502
|
+
* the same context infrastructure that workflow agents get.
|
|
2503
|
+
*/
|
|
2504
|
+
async function createMinimalRuntime(config) {
|
|
2505
|
+
const { workflowName, tag, agentNames, onMention, feedback: feedbackEnabled, debugLog } = config;
|
|
2506
|
+
let contextProvider;
|
|
2507
|
+
let contextDir;
|
|
2508
|
+
let persistent = false;
|
|
2509
|
+
if (config.contextProvider && config.contextDir) {
|
|
2510
|
+
contextProvider = config.contextProvider;
|
|
2511
|
+
contextDir = config.contextDir;
|
|
2512
|
+
persistent = config.persistent ?? false;
|
|
2513
|
+
} else {
|
|
2514
|
+
contextDir = getDefaultContextDir(workflowName, tag);
|
|
2515
|
+
if (!existsSync(contextDir)) mkdirSync(contextDir, { recursive: true });
|
|
2516
|
+
contextProvider = createFileContextProvider(contextDir, agentNames);
|
|
2517
|
+
persistent = false;
|
|
2518
|
+
}
|
|
2519
|
+
await contextProvider.markRunStart();
|
|
2520
|
+
const projectDir = process.cwd();
|
|
2521
|
+
let mcpGetFeedback;
|
|
2522
|
+
let mcpToolNames = /* @__PURE__ */ new Set();
|
|
2523
|
+
const eventLog = new EventLog(contextProvider);
|
|
2524
|
+
const createMCPServerInstance = () => {
|
|
2525
|
+
const mcp = createContextMCPServer({
|
|
2526
|
+
provider: contextProvider,
|
|
2527
|
+
validAgents: agentNames,
|
|
2528
|
+
name: `${workflowName}-context`,
|
|
2529
|
+
version: "1.0.0",
|
|
2530
|
+
onMention,
|
|
2531
|
+
feedback: feedbackEnabled,
|
|
2532
|
+
debugLog
|
|
2533
|
+
});
|
|
2534
|
+
mcpGetFeedback = mcp.getFeedback;
|
|
2535
|
+
mcpToolNames = mcp.mcpToolNames;
|
|
2536
|
+
return mcp.server;
|
|
2537
|
+
};
|
|
2538
|
+
const httpMcpServer = await runWithHttp({
|
|
2539
|
+
createServerInstance: createMCPServerInstance,
|
|
2540
|
+
port: 0
|
|
2541
|
+
});
|
|
2542
|
+
const shutdown = async () => {
|
|
2543
|
+
if (persistent) {
|
|
2544
|
+
if (contextProvider instanceof FileContextProvider) contextProvider.releaseLock();
|
|
2545
|
+
} else await contextProvider.destroy();
|
|
2546
|
+
await httpMcpServer.close();
|
|
2547
|
+
};
|
|
2548
|
+
return {
|
|
2549
|
+
contextProvider,
|
|
2550
|
+
contextDir,
|
|
2551
|
+
persistent,
|
|
2552
|
+
eventLog,
|
|
2553
|
+
httpMcpServer,
|
|
2554
|
+
mcpUrl: httpMcpServer.url,
|
|
2555
|
+
mcpToolNames,
|
|
2556
|
+
projectDir,
|
|
2557
|
+
getFeedback: mcpGetFeedback,
|
|
2558
|
+
shutdown
|
|
2559
|
+
};
|
|
2560
|
+
}
|
|
2561
|
+
/**
|
|
2562
|
+
* Create a fully-wired agent controller.
|
|
2563
|
+
*
|
|
2564
|
+
* This handles the full setup:
|
|
2565
|
+
* 1. Create backend from agent definition (or use custom factory)
|
|
2566
|
+
* 2. Create isolated workspace directory
|
|
2567
|
+
* 3. Configure stream callbacks for structured event logging
|
|
2568
|
+
* 4. Create the AgentController with all wiring
|
|
2569
|
+
*
|
|
2570
|
+
* Extracted from runWorkflowWithControllers() so both runner.ts and
|
|
2571
|
+
* daemon.ts can create controllers with the same quality.
|
|
2572
|
+
*/
|
|
2573
|
+
function createWiredController(config) {
|
|
2574
|
+
const { name, agent, runtime, pollInterval, feedback: feedbackEnabled } = config;
|
|
2575
|
+
const logger = config.logger ?? createSilentLogger();
|
|
2576
|
+
const streamCallbacks = {
|
|
2577
|
+
debugLog: (msg) => logger.debug(msg),
|
|
2578
|
+
outputLog: (msg) => runtime.eventLog.output(name, msg),
|
|
2579
|
+
toolCallLog: (toolName, args) => runtime.eventLog.toolCall(name, toolName, args, "backend"),
|
|
2580
|
+
mcpToolNames: runtime.mcpToolNames
|
|
2581
|
+
};
|
|
2582
|
+
let backend;
|
|
2583
|
+
if (config.createBackend) backend = config.createBackend(name, agent);
|
|
2584
|
+
else if (agent.backend) backend = getBackendByType(agent.backend, {
|
|
2585
|
+
model: agent.model,
|
|
2586
|
+
provider: agent.provider,
|
|
2587
|
+
debugLog: (msg) => logger.debug(msg),
|
|
2588
|
+
streamCallbacks,
|
|
2589
|
+
timeout: agent.timeout
|
|
2590
|
+
});
|
|
2591
|
+
else if (agent.model) backend = getBackendForModel(agent.model, {
|
|
2592
|
+
provider: agent.provider,
|
|
2593
|
+
debugLog: (msg) => logger.debug(msg),
|
|
2594
|
+
streamCallbacks
|
|
2595
|
+
});
|
|
2596
|
+
else throw new Error(`Agent "${name}" requires either a backend or model field`);
|
|
2597
|
+
const workspaceDir = join(runtime.contextDir, "workspaces", name);
|
|
2598
|
+
if (!existsSync(workspaceDir)) mkdirSync(workspaceDir, { recursive: true });
|
|
2599
|
+
return {
|
|
2600
|
+
controller: createAgentController({
|
|
2601
|
+
name,
|
|
2602
|
+
agent,
|
|
2603
|
+
contextProvider: runtime.contextProvider,
|
|
2604
|
+
eventLog: runtime.eventLog,
|
|
2605
|
+
mcpUrl: runtime.mcpUrl,
|
|
2606
|
+
workspaceDir,
|
|
2607
|
+
projectDir: runtime.projectDir,
|
|
2608
|
+
backend,
|
|
2609
|
+
pollInterval,
|
|
2610
|
+
log: (msg) => logger.debug(msg),
|
|
2611
|
+
infoLog: (msg) => logger.info(msg),
|
|
2612
|
+
errorLog: (msg) => logger.error(msg),
|
|
2613
|
+
feedback: feedbackEnabled
|
|
2614
|
+
}),
|
|
2615
|
+
backend
|
|
2616
|
+
};
|
|
2617
|
+
}
|
|
2618
|
+
|
|
1423
2619
|
//#endregion
|
|
1424
2620
|
//#region src/daemon/daemon.ts
|
|
1425
2621
|
/**
|
|
1426
2622
|
* Daemon — Centralized agent coordinator.
|
|
1427
2623
|
*
|
|
2624
|
+
* Architecture: Interface → Daemon → Controller (three layers)
|
|
2625
|
+
* Interface: CLI/REST/MCP clients talk to daemon via HTTP
|
|
2626
|
+
* Daemon: This module — owns lifecycle, creates workflows + controllers
|
|
2627
|
+
* Controller: AgentController + Backend — executes agent reasoning
|
|
2628
|
+
*
|
|
1428
2629
|
* Data ownership:
|
|
1429
|
-
* Registry (configs)
|
|
1430
|
-
*
|
|
1431
|
-
* WorkerHandle (workers) — execution, local or remote
|
|
1432
|
-
* Workflows (workflows) — running workflow instances with controllers
|
|
2630
|
+
* Registry (configs) — what agents exist and their configuration
|
|
2631
|
+
* Workflows (workflows) — running workflow instances with controllers + context
|
|
1433
2632
|
*
|
|
1434
|
-
*
|
|
1435
|
-
*
|
|
2633
|
+
* Key principle: every agent lives in a workflow. Standalone agents created via
|
|
2634
|
+
* POST /agents get a 1-agent workflow (created lazily on first /run or /serve).
|
|
2635
|
+
* This unifies the runtime so there's one code path for execution.
|
|
1436
2636
|
*
|
|
1437
2637
|
* HTTP endpoints:
|
|
1438
2638
|
* GET /health, POST /shutdown
|
|
@@ -1456,9 +2656,6 @@ async function gracefulShutdown() {
|
|
|
1456
2656
|
await wf.shutdown();
|
|
1457
2657
|
} catch {}
|
|
1458
2658
|
state.workflows.clear();
|
|
1459
|
-
for (const [name, handle] of state.workers) try {
|
|
1460
|
-
await state.store.save(name, handle.getState());
|
|
1461
|
-
} catch {}
|
|
1462
2659
|
if (state.server) await state.server.close();
|
|
1463
2660
|
}
|
|
1464
2661
|
for (const [, session] of mcpSessions) try {
|
|
@@ -1476,6 +2673,82 @@ async function parseJsonBody(c) {
|
|
|
1476
2673
|
return null;
|
|
1477
2674
|
}
|
|
1478
2675
|
}
|
|
2676
|
+
/** Map AgentConfig to the ResolvedAgent type needed by the factory */
|
|
2677
|
+
function configToResolvedAgent(cfg) {
|
|
2678
|
+
return {
|
|
2679
|
+
backend: cfg.backend,
|
|
2680
|
+
model: cfg.model,
|
|
2681
|
+
provider: cfg.provider,
|
|
2682
|
+
resolvedSystemPrompt: cfg.system,
|
|
2683
|
+
schedule: cfg.schedule
|
|
2684
|
+
};
|
|
2685
|
+
}
|
|
2686
|
+
/**
|
|
2687
|
+
* Find an agent's controller across all workflows.
|
|
2688
|
+
* Returns the controller if the agent exists in any workflow.
|
|
2689
|
+
*/
|
|
2690
|
+
function findController(s, agentName) {
|
|
2691
|
+
for (const wf of s.workflows.values()) {
|
|
2692
|
+
const ctrl = wf.controllers.get(agentName);
|
|
2693
|
+
if (ctrl) return {
|
|
2694
|
+
controller: ctrl,
|
|
2695
|
+
workflow: wf
|
|
2696
|
+
};
|
|
2697
|
+
}
|
|
2698
|
+
return null;
|
|
2699
|
+
}
|
|
2700
|
+
/**
|
|
2701
|
+
* Ensure a standalone agent has a workflow + controller.
|
|
2702
|
+
* Creates the infrastructure lazily on first call (starts MCP server, etc.).
|
|
2703
|
+
*
|
|
2704
|
+
* This is the bridge between POST /agents (stores config only) and
|
|
2705
|
+
* POST /run or /serve (needs a controller to execute).
|
|
2706
|
+
*/
|
|
2707
|
+
async function ensureAgentController(s, agentName) {
|
|
2708
|
+
const existing = findController(s, agentName);
|
|
2709
|
+
if (existing) return existing;
|
|
2710
|
+
const cfg = s.configs.get(agentName);
|
|
2711
|
+
if (!cfg) throw new Error(`Agent not found: ${agentName}`);
|
|
2712
|
+
const agentDef = configToResolvedAgent(cfg);
|
|
2713
|
+
const wfKey = `standalone:${agentName}`;
|
|
2714
|
+
const runtime = await createMinimalRuntime({
|
|
2715
|
+
workflowName: cfg.workflow,
|
|
2716
|
+
tag: cfg.tag,
|
|
2717
|
+
agentNames: [agentName]
|
|
2718
|
+
});
|
|
2719
|
+
let controller;
|
|
2720
|
+
try {
|
|
2721
|
+
({controller} = createWiredController({
|
|
2722
|
+
name: agentName,
|
|
2723
|
+
agent: agentDef,
|
|
2724
|
+
runtime
|
|
2725
|
+
}));
|
|
2726
|
+
} catch (err) {
|
|
2727
|
+
await runtime.shutdown();
|
|
2728
|
+
throw err;
|
|
2729
|
+
}
|
|
2730
|
+
const handle = {
|
|
2731
|
+
name: cfg.workflow,
|
|
2732
|
+
tag: cfg.tag,
|
|
2733
|
+
key: wfKey,
|
|
2734
|
+
agents: [agentName],
|
|
2735
|
+
controllers: new Map([[agentName, controller]]),
|
|
2736
|
+
contextProvider: runtime.contextProvider,
|
|
2737
|
+
shutdown: async () => {
|
|
2738
|
+
try {
|
|
2739
|
+
await controller.stop();
|
|
2740
|
+
} finally {
|
|
2741
|
+
await runtime.shutdown();
|
|
2742
|
+
}
|
|
2743
|
+
},
|
|
2744
|
+
startedAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
2745
|
+
};
|
|
2746
|
+
s.workflows.set(wfKey, handle);
|
|
2747
|
+
return {
|
|
2748
|
+
controller,
|
|
2749
|
+
workflow: handle
|
|
2750
|
+
};
|
|
2751
|
+
}
|
|
1479
2752
|
function createDaemonApp(optionsOrGetState) {
|
|
1480
2753
|
const { getState, token } = typeof optionsOrGetState === "function" ? {
|
|
1481
2754
|
getState: optionsOrGetState,
|
|
@@ -1516,15 +2789,19 @@ function createDaemonApp(optionsOrGetState) {
|
|
|
1516
2789
|
app.get("/agents", (c) => {
|
|
1517
2790
|
const s = getState();
|
|
1518
2791
|
if (!s) return c.json({ error: "Not ready" }, 503);
|
|
1519
|
-
const standaloneAgents = [...s.configs.values()].map((cfg) =>
|
|
1520
|
-
|
|
1521
|
-
|
|
1522
|
-
|
|
1523
|
-
|
|
1524
|
-
|
|
1525
|
-
|
|
1526
|
-
|
|
1527
|
-
|
|
2792
|
+
const standaloneAgents = [...s.configs.values()].map((cfg) => {
|
|
2793
|
+
const ctrl = findController(s, cfg.name);
|
|
2794
|
+
return {
|
|
2795
|
+
name: cfg.name,
|
|
2796
|
+
model: cfg.model,
|
|
2797
|
+
backend: cfg.backend,
|
|
2798
|
+
workflow: cfg.workflow,
|
|
2799
|
+
tag: cfg.tag,
|
|
2800
|
+
createdAt: cfg.createdAt,
|
|
2801
|
+
source: "standalone",
|
|
2802
|
+
state: ctrl?.controller.state
|
|
2803
|
+
};
|
|
2804
|
+
});
|
|
1528
2805
|
const workflowAgents = [...s.workflows.values()].flatMap((wf) => wf.agents.map((agentName) => {
|
|
1529
2806
|
const controller = wf.controllers.get(agentName);
|
|
1530
2807
|
return {
|
|
@@ -1545,7 +2822,7 @@ function createDaemonApp(optionsOrGetState) {
|
|
|
1545
2822
|
if (!s) return c.json({ error: "Not ready" }, 503);
|
|
1546
2823
|
const body = await parseJsonBody(c);
|
|
1547
2824
|
if (!body || typeof body !== "object") return c.json({ error: "Invalid JSON body" }, 400);
|
|
1548
|
-
const { name, model, system, backend = "default", workflow = "global", tag = "main" } = body;
|
|
2825
|
+
const { name, model, system, backend = "default", provider, workflow = "global", tag = "main", schedule } = body;
|
|
1549
2826
|
if (!name || !model || !system) return c.json({ error: "name, model, system required" }, 400);
|
|
1550
2827
|
if (s.configs.has(name)) return c.json({ error: `Agent already exists: ${name}` }, 409);
|
|
1551
2828
|
const agentConfig = {
|
|
@@ -1553,19 +2830,20 @@ function createDaemonApp(optionsOrGetState) {
|
|
|
1553
2830
|
model,
|
|
1554
2831
|
system,
|
|
1555
2832
|
backend,
|
|
2833
|
+
provider,
|
|
1556
2834
|
workflow,
|
|
1557
2835
|
tag,
|
|
1558
|
-
createdAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
2836
|
+
createdAt: (/* @__PURE__ */ new Date()).toISOString(),
|
|
2837
|
+
schedule
|
|
1559
2838
|
};
|
|
1560
|
-
const handle = new LocalWorker(agentConfig, await s.store.load(name) ?? void 0);
|
|
1561
2839
|
s.configs.set(name, agentConfig);
|
|
1562
|
-
s.workers.set(name, handle);
|
|
1563
2840
|
return c.json({
|
|
1564
2841
|
name,
|
|
1565
2842
|
model,
|
|
1566
2843
|
backend,
|
|
1567
2844
|
workflow,
|
|
1568
|
-
tag
|
|
2845
|
+
tag,
|
|
2846
|
+
schedule
|
|
1569
2847
|
}, 201);
|
|
1570
2848
|
});
|
|
1571
2849
|
app.get("/agents/:name", (c) => {
|
|
@@ -1580,7 +2858,8 @@ function createDaemonApp(optionsOrGetState) {
|
|
|
1580
2858
|
system: cfg.system,
|
|
1581
2859
|
workflow: cfg.workflow,
|
|
1582
2860
|
tag: cfg.tag,
|
|
1583
|
-
createdAt: cfg.createdAt
|
|
2861
|
+
createdAt: cfg.createdAt,
|
|
2862
|
+
schedule: cfg.schedule
|
|
1584
2863
|
});
|
|
1585
2864
|
});
|
|
1586
2865
|
app.delete("/agents/:name", async (c) => {
|
|
@@ -1588,11 +2867,14 @@ function createDaemonApp(optionsOrGetState) {
|
|
|
1588
2867
|
if (!s) return c.json({ error: "Not ready" }, 503);
|
|
1589
2868
|
const name = c.req.param("name");
|
|
1590
2869
|
if (!s.configs.delete(name)) return c.json({ error: "Agent not found" }, 404);
|
|
1591
|
-
const
|
|
1592
|
-
|
|
1593
|
-
|
|
1594
|
-
|
|
1595
|
-
|
|
2870
|
+
const wfKey = `standalone:${name}`;
|
|
2871
|
+
const wf = s.workflows.get(wfKey);
|
|
2872
|
+
if (wf) {
|
|
2873
|
+
try {
|
|
2874
|
+
await wf.shutdown();
|
|
2875
|
+
} catch {}
|
|
2876
|
+
s.workflows.delete(wfKey);
|
|
2877
|
+
}
|
|
1596
2878
|
return c.json({ success: true });
|
|
1597
2879
|
});
|
|
1598
2880
|
app.post("/run", async (c) => {
|
|
@@ -1602,30 +2884,36 @@ function createDaemonApp(optionsOrGetState) {
|
|
|
1602
2884
|
if (!body || typeof body !== "object") return c.json({ error: "Invalid JSON body" }, 400);
|
|
1603
2885
|
const { agent: agentName, message } = body;
|
|
1604
2886
|
if (!agentName || !message) return c.json({ error: "agent and message required" }, 400);
|
|
1605
|
-
|
|
1606
|
-
|
|
2887
|
+
let controller;
|
|
2888
|
+
const controllerResult = findController(s, agentName);
|
|
2889
|
+
if (controllerResult) controller = controllerResult.controller;
|
|
2890
|
+
else if (s.configs.has(agentName)) try {
|
|
2891
|
+
controller = (await ensureAgentController(s, agentName)).controller;
|
|
2892
|
+
} catch (error) {
|
|
2893
|
+
const msg = error instanceof Error ? error.message : String(error);
|
|
2894
|
+
return c.json({ error: `Failed to create agent runtime: ${msg}` }, 500);
|
|
2895
|
+
}
|
|
2896
|
+
if (!controller) return c.json({ error: `Agent not found: ${agentName}` }, 404);
|
|
2897
|
+
const ctrl = controller;
|
|
1607
2898
|
return streamSSE(c, async (stream) => {
|
|
1608
2899
|
try {
|
|
1609
|
-
const
|
|
1610
|
-
|
|
1611
|
-
|
|
1612
|
-
if (done) {
|
|
1613
|
-
const currentState = getState();
|
|
1614
|
-
if (currentState) await currentState.store.save(agentName, handle.getState());
|
|
1615
|
-
await stream.writeSSE({
|
|
1616
|
-
event: "done",
|
|
1617
|
-
data: JSON.stringify(value)
|
|
1618
|
-
});
|
|
1619
|
-
break;
|
|
1620
|
-
}
|
|
1621
|
-
await stream.writeSSE({
|
|
2900
|
+
const result = await ctrl.sendDirect(message);
|
|
2901
|
+
if (result.success) {
|
|
2902
|
+
if (result.content) await stream.writeSSE({
|
|
1622
2903
|
event: "chunk",
|
|
1623
2904
|
data: JSON.stringify({
|
|
1624
2905
|
agent: agentName,
|
|
1625
|
-
text:
|
|
2906
|
+
text: result.content
|
|
1626
2907
|
})
|
|
1627
2908
|
});
|
|
1628
|
-
|
|
2909
|
+
await stream.writeSSE({
|
|
2910
|
+
event: "done",
|
|
2911
|
+
data: JSON.stringify(result)
|
|
2912
|
+
});
|
|
2913
|
+
} else await stream.writeSSE({
|
|
2914
|
+
event: "error",
|
|
2915
|
+
data: JSON.stringify({ error: result.error })
|
|
2916
|
+
});
|
|
1629
2917
|
} catch (error) {
|
|
1630
2918
|
const msg = error instanceof Error ? error.message : String(error);
|
|
1631
2919
|
await stream.writeSSE({
|
|
@@ -1642,12 +2930,24 @@ function createDaemonApp(optionsOrGetState) {
|
|
|
1642
2930
|
if (!body || typeof body !== "object") return c.json({ error: "Invalid JSON body" }, 400);
|
|
1643
2931
|
const { agent: agentName, message } = body;
|
|
1644
2932
|
if (!agentName || !message) return c.json({ error: "agent and message required" }, 400);
|
|
1645
|
-
|
|
1646
|
-
|
|
2933
|
+
let controller;
|
|
2934
|
+
const controllerResult = findController(s, agentName);
|
|
2935
|
+
if (controllerResult) controller = controllerResult.controller;
|
|
2936
|
+
else if (s.configs.has(agentName)) try {
|
|
2937
|
+
controller = (await ensureAgentController(s, agentName)).controller;
|
|
2938
|
+
} catch (error) {
|
|
2939
|
+
const msg = error instanceof Error ? error.message : String(error);
|
|
2940
|
+
return c.json({ error: msg }, 500);
|
|
2941
|
+
}
|
|
2942
|
+
if (!controller) return c.json({ error: `Agent not found: ${agentName}` }, 404);
|
|
1647
2943
|
try {
|
|
1648
|
-
const
|
|
1649
|
-
|
|
1650
|
-
return c.json(
|
|
2944
|
+
const result = await controller.sendDirect(message);
|
|
2945
|
+
if (!result.success) return c.json({ error: result.error }, 500);
|
|
2946
|
+
return c.json({
|
|
2947
|
+
content: result.content ?? "",
|
|
2948
|
+
duration: result.duration,
|
|
2949
|
+
success: true
|
|
2950
|
+
});
|
|
1651
2951
|
} catch (error) {
|
|
1652
2952
|
const msg = error instanceof Error ? error.message : String(error);
|
|
1653
2953
|
return c.json({ error: msg }, 500);
|
|
@@ -1674,15 +2974,18 @@ function createDaemonApp(optionsOrGetState) {
|
|
|
1674
2974
|
const agentCfg = s.configs.get(agentName);
|
|
1675
2975
|
const workflow = agentCfg?.workflow ?? "global";
|
|
1676
2976
|
const tag = agentCfg?.tag ?? "main";
|
|
2977
|
+
const existingWf = findController(s, agentName)?.workflow ?? s.workflows.get(`${workflow}:${tag}`);
|
|
1677
2978
|
const workflowAgents = getWorkflowAgentNames(workflow, tag);
|
|
1678
2979
|
const allNames = [...new Set([
|
|
1679
2980
|
...workflowAgents,
|
|
1680
2981
|
agentName,
|
|
1681
2982
|
"user"
|
|
1682
2983
|
])];
|
|
1683
|
-
const
|
|
1684
|
-
|
|
1685
|
-
|
|
2984
|
+
const provider = existingWf?.contextProvider ?? (() => {
|
|
2985
|
+
const contextDir = getDefaultContextDir(workflow, tag);
|
|
2986
|
+
mkdirSync(contextDir, { recursive: true });
|
|
2987
|
+
return createFileContextProvider(contextDir, allNames);
|
|
2988
|
+
})();
|
|
1686
2989
|
const transport = new WebStandardStreamableHTTPServerTransport({
|
|
1687
2990
|
sessionIdGenerator: () => `${agentName}-${randomUUID().slice(0, 8)}`,
|
|
1688
2991
|
onsessioninitialized: (sid) => {
|
|
@@ -1718,7 +3021,7 @@ function createDaemonApp(optionsOrGetState) {
|
|
|
1718
3021
|
const key = `${workflowName}:${tag}`;
|
|
1719
3022
|
if (s.workflows.has(key)) return c.json({ error: `Workflow already running: ${key}` }, 409);
|
|
1720
3023
|
try {
|
|
1721
|
-
const { runWorkflowWithControllers } = await import("../runner-
|
|
3024
|
+
const { runWorkflowWithControllers } = await import("../runner-DV86expc.mjs");
|
|
1722
3025
|
const result = await runWorkflowWithControllers({
|
|
1723
3026
|
workflow,
|
|
1724
3027
|
workflowName,
|
|
@@ -1820,7 +3123,6 @@ async function startDaemon(config = {}) {
|
|
|
1820
3123
|
});
|
|
1821
3124
|
state = {
|
|
1822
3125
|
configs: /* @__PURE__ */ new Map(),
|
|
1823
|
-
workers: /* @__PURE__ */ new Map(),
|
|
1824
3126
|
workflows: /* @__PURE__ */ new Map(),
|
|
1825
3127
|
store,
|
|
1826
3128
|
server,
|
|
@@ -2069,25 +3371,46 @@ function registerAgentCommands(program) {
|
|
|
2069
3371
|
"claude",
|
|
2070
3372
|
"codex",
|
|
2071
3373
|
"cursor",
|
|
3374
|
+
"opencode",
|
|
2072
3375
|
"mock"
|
|
2073
|
-
]).default("default")).option("-s, --system <prompt>", "System prompt", "You are a helpful assistant.").option("-f, --system-file <file>", "Read system prompt from file").option("--workflow <name>", "Workflow name (default: global)").option("--tag <tag>", "Workflow instance tag (default: main)").option("--port <port>", `Daemon port if starting new daemon (default: ${DEFAULT_PORT})`).option("--host <host>", "Daemon host (default: 127.0.0.1)").option("--json", "Output as JSON").addHelpText("after", `
|
|
3376
|
+
]).default("default")).option("--provider <name>", "Provider SDK name (e.g., anthropic, openai)").option("--base-url <url>", "Override provider base URL").option("--api-key <ref>", "API key env var (e.g., $MINIMAX_API_KEY)").option("-s, --system <prompt>", "System prompt", "You are a helpful assistant.").option("-f, --system-file <file>", "Read system prompt from file").option("--workflow <name>", "Workflow name (default: global)").option("--tag <tag>", "Workflow instance tag (default: main)").option("--wakeup <interval|cron>", "Periodic wakeup schedule (e.g., 30s, 5m, 0 9 * * 1-5)").option("--wakeup-prompt <text>", "Custom prompt for wakeup events").option("--port <port>", `Daemon port if starting new daemon (default: ${DEFAULT_PORT})`).option("--host <host>", "Daemon host (default: 127.0.0.1)").option("--json", "Output as JSON").addHelpText("after", `
|
|
2074
3377
|
Examples:
|
|
2075
3378
|
$ agent-worker new alice -m anthropic/claude-sonnet-4-5
|
|
2076
3379
|
$ agent-worker new bot -b mock
|
|
2077
3380
|
$ agent-worker new reviewer --workflow review --tag pr-123
|
|
3381
|
+
$ agent-worker new monitor --wakeup 30s --system "Check status"
|
|
3382
|
+
$ agent-worker new coder -m MiniMax-M2.5 --provider anthropic --base-url https://api.minimax.io/anthropic/v1 --api-key '$MINIMAX_API_KEY'
|
|
2078
3383
|
`).action(async (name, options) => {
|
|
2079
3384
|
let system = options.system;
|
|
2080
3385
|
if (options.systemFile) system = readFileSync(options.systemFile, "utf-8");
|
|
2081
3386
|
const backend = normalizeBackendType(options.backend ?? "default");
|
|
2082
3387
|
const model = options.model || getDefaultModel();
|
|
3388
|
+
let provider;
|
|
3389
|
+
if (options.provider) if (options.baseUrl || options.apiKey) provider = {
|
|
3390
|
+
name: options.provider,
|
|
3391
|
+
base_url: options.baseUrl,
|
|
3392
|
+
api_key: options.apiKey
|
|
3393
|
+
};
|
|
3394
|
+
else provider = options.provider;
|
|
3395
|
+
if (options.wakeupPrompt && !options.wakeup) {
|
|
3396
|
+
console.error("Error: --wakeup-prompt can only be used with --wakeup.");
|
|
3397
|
+
process.exit(1);
|
|
3398
|
+
}
|
|
3399
|
+
let schedule;
|
|
3400
|
+
if (options.wakeup) {
|
|
3401
|
+
schedule = { wakeup: options.wakeup };
|
|
3402
|
+
if (options.wakeupPrompt) schedule.prompt = options.wakeupPrompt;
|
|
3403
|
+
}
|
|
2083
3404
|
await ensureDaemon(options.port ? parseInt(options.port, 10) : void 0, options.host);
|
|
2084
3405
|
const res = await createAgent({
|
|
2085
3406
|
name,
|
|
2086
3407
|
model,
|
|
2087
3408
|
system,
|
|
2088
3409
|
backend,
|
|
3410
|
+
provider,
|
|
2089
3411
|
workflow: options.workflow,
|
|
2090
|
-
tag: options.tag
|
|
3412
|
+
tag: options.tag,
|
|
3413
|
+
schedule
|
|
2091
3414
|
});
|
|
2092
3415
|
if (res.error) {
|
|
2093
3416
|
console.error("Error:", res.error);
|
|
@@ -2339,7 +3662,7 @@ Examples:
|
|
|
2339
3662
|
|
|
2340
3663
|
Note: Workflow name is inferred from YAML 'name' field or filename
|
|
2341
3664
|
`).action(async (file, options) => {
|
|
2342
|
-
const { parseWorkflowFile, runWorkflowWithControllers } = await import("../workflow-
|
|
3665
|
+
const { parseWorkflowFile, runWorkflowWithControllers } = await import("../workflow-DogkVjOs.mjs");
|
|
2343
3666
|
const tag = options.tag || DEFAULT_TAG;
|
|
2344
3667
|
const parsedWorkflow = await parseWorkflowFile(file, { tag });
|
|
2345
3668
|
const workflowName = parsedWorkflow.name;
|
|
@@ -2350,8 +3673,8 @@ Note: Workflow name is inferred from YAML 'name' field or filename
|
|
|
2350
3673
|
isCleaningUp = true;
|
|
2351
3674
|
console.log("\nInterrupted, cleaning up...");
|
|
2352
3675
|
if (controllers) {
|
|
2353
|
-
const { shutdownControllers } = await import("../workflow-
|
|
2354
|
-
const { createSilentLogger } = await
|
|
3676
|
+
const { shutdownControllers } = await import("../workflow-DogkVjOs.mjs");
|
|
3677
|
+
const { createSilentLogger } = await Promise.resolve().then(() => logger_exports);
|
|
2355
3678
|
await shutdownControllers(controllers, createSilentLogger());
|
|
2356
3679
|
}
|
|
2357
3680
|
process.exit(130);
|
|
@@ -2423,7 +3746,7 @@ Workflow runs inside the daemon. Use ls/stop to manage:
|
|
|
2423
3746
|
|
|
2424
3747
|
Note: Workflow name is inferred from YAML 'name' field or filename
|
|
2425
3748
|
`).action(async (file, options) => {
|
|
2426
|
-
const { parseWorkflowFile } = await import("../workflow-
|
|
3749
|
+
const { parseWorkflowFile } = await import("../workflow-DogkVjOs.mjs");
|
|
2427
3750
|
const { ensureDaemon } = await Promise.resolve().then(() => agent_exports);
|
|
2428
3751
|
const tag = options.tag || DEFAULT_TAG;
|
|
2429
3752
|
const parsedWorkflow = await parseWorkflowFile(file, { tag });
|
|
@@ -2574,10 +3897,6 @@ const PROVIDER_API_KEYS = {
|
|
|
2574
3897
|
xai: {
|
|
2575
3898
|
envVar: "XAI_API_KEY",
|
|
2576
3899
|
description: "xAI Grok"
|
|
2577
|
-
},
|
|
2578
|
-
minimax: {
|
|
2579
|
-
envVar: "MINIMAX_API_KEY",
|
|
2580
|
-
description: "MiniMax"
|
|
2581
3900
|
}
|
|
2582
3901
|
};
|
|
2583
3902
|
function registerInfoCommands(program) {
|
|
@@ -2598,10 +3917,11 @@ function registerInfoCommands(program) {
|
|
|
2598
3917
|
console.log(` Provider only: provider (e.g., openai → ${gatewayExample})`);
|
|
2599
3918
|
console.log(` Gateway format: provider/model (e.g., ${gatewayExample})`);
|
|
2600
3919
|
console.log(` Direct format: provider:model (e.g., ${directExample})`);
|
|
3920
|
+
console.log(` Custom endpoint: --provider anthropic --base-url <url> --api-key '$KEY'`);
|
|
2601
3921
|
console.log(`\nDefault: ${defaultModel} (when no model specified)`);
|
|
2602
3922
|
});
|
|
2603
3923
|
program.command("backends").description("Check available backends (SDK, CLI tools)").action(async () => {
|
|
2604
|
-
const { listBackends } = await import("../backends-
|
|
3924
|
+
const { listBackends } = await import("../backends-Cv0oM9Ru.mjs");
|
|
2605
3925
|
const backends = await listBackends();
|
|
2606
3926
|
console.log("Backend Status:\n");
|
|
2607
3927
|
for (const backend of backends) {
|
|
@@ -2631,7 +3951,7 @@ Examples:
|
|
|
2631
3951
|
$ agent-worker doc read @review:pr-123 # Read specific workflow:tag document
|
|
2632
3952
|
`).action(async (targetInput) => {
|
|
2633
3953
|
const dir = await resolveDir(targetInput);
|
|
2634
|
-
const { createFileContextProvider } = await import("../context-
|
|
3954
|
+
const { createFileContextProvider } = await import("../context-CzqQeThq.mjs");
|
|
2635
3955
|
const content = await createFileContextProvider(dir, []).readDocument();
|
|
2636
3956
|
console.log(content || "(empty document)");
|
|
2637
3957
|
});
|
|
@@ -2649,7 +3969,7 @@ Examples:
|
|
|
2649
3969
|
process.exit(1);
|
|
2650
3970
|
}
|
|
2651
3971
|
const dir = await resolveDir(targetInput);
|
|
2652
|
-
const { createFileContextProvider } = await import("../context-
|
|
3972
|
+
const { createFileContextProvider } = await import("../context-CzqQeThq.mjs");
|
|
2653
3973
|
await createFileContextProvider(dir, []).writeDocument(content);
|
|
2654
3974
|
console.log("Document written");
|
|
2655
3975
|
});
|
|
@@ -2667,7 +3987,7 @@ Examples:
|
|
|
2667
3987
|
process.exit(1);
|
|
2668
3988
|
}
|
|
2669
3989
|
const dir = await resolveDir(targetInput);
|
|
2670
|
-
const { createFileContextProvider } = await import("../context-
|
|
3990
|
+
const { createFileContextProvider } = await import("../context-CzqQeThq.mjs");
|
|
2671
3991
|
await createFileContextProvider(dir, []).appendDocument(content);
|
|
2672
3992
|
console.log("Content appended");
|
|
2673
3993
|
});
|
|
@@ -2681,7 +4001,7 @@ async function resolveDir(targetInput) {
|
|
|
2681
4001
|
|
|
2682
4002
|
//#endregion
|
|
2683
4003
|
//#region package.json
|
|
2684
|
-
var version = "0.
|
|
4004
|
+
var version = "0.14.0";
|
|
2685
4005
|
|
|
2686
4006
|
//#endregion
|
|
2687
4007
|
//#region src/cli/index.ts
|
|
@@ -2704,4 +4024,4 @@ registerDocCommands(program);
|
|
|
2704
4024
|
program.parse();
|
|
2705
4025
|
|
|
2706
4026
|
//#endregion
|
|
2707
|
-
export {
|
|
4027
|
+
export { extractMentions as A, EventLog as B, CONTEXT_DEFAULTS as C, RESOURCE_SCHEME as D, RESOURCE_PREFIX as E, formatProposalList as F, createLogTool as I, formatInbox$1 as L, shouldUseResource as M, createContextMCPServer as N, calculatePriority as O, formatProposal as P, formatToolParams as R, ContextProviderImpl as S, MESSAGE_LENGTH_THRESHOLD as T, createFileContextProvider as _, getBackendByType as a, FileStorage as b, createAgentController as c, generateWorkflowMCPConfig as d, buildAgentPrompt as f, FileContextProvider as g, runWithHttp as h, createSilentLogger as i, generateResourceId as j, createResourceRef as k, runSdkAgent as l, CONTROLLER_DEFAULTS as m, createWiredController as n, getBackendForModel as o, formatInbox as p, createChannelLogger as r, checkWorkflowIdle as s, createMinimalRuntime as t, runMockAgent as u, getDefaultContextDir as v, MENTION_PATTERN as w, MemoryStorage as x, resolveContextDir as y, getAgentId as z };
|