pilotswarm-sdk 0.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agent-loader.d.ts +61 -0
- package/dist/agent-loader.d.ts.map +1 -0
- package/dist/agent-loader.js +212 -0
- package/dist/agent-loader.js.map +1 -0
- package/dist/artifact-tools.d.ts +31 -0
- package/dist/artifact-tools.d.ts.map +1 -0
- package/dist/artifact-tools.js +190 -0
- package/dist/artifact-tools.js.map +1 -0
- package/dist/blob-store.d.ts +73 -0
- package/dist/blob-store.d.ts.map +1 -0
- package/dist/blob-store.js +220 -0
- package/dist/blob-store.js.map +1 -0
- package/dist/client.d.ts +159 -0
- package/dist/client.d.ts.map +1 -0
- package/dist/client.js +676 -0
- package/dist/client.js.map +1 -0
- package/dist/cms.d.ts +129 -0
- package/dist/cms.d.ts.map +1 -0
- package/dist/cms.js +313 -0
- package/dist/cms.js.map +1 -0
- package/dist/index.d.ts +44 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +42 -0
- package/dist/index.js.map +1 -0
- package/dist/managed-session.d.ts +70 -0
- package/dist/managed-session.d.ts.map +1 -0
- package/dist/managed-session.js +717 -0
- package/dist/managed-session.js.map +1 -0
- package/dist/management-client.d.ts +171 -0
- package/dist/management-client.d.ts.map +1 -0
- package/dist/management-client.js +401 -0
- package/dist/management-client.js.map +1 -0
- package/dist/mcp-loader.d.ts +50 -0
- package/dist/mcp-loader.d.ts.map +1 -0
- package/dist/mcp-loader.js +83 -0
- package/dist/mcp-loader.js.map +1 -0
- package/dist/model-providers.d.ts +143 -0
- package/dist/model-providers.d.ts.map +1 -0
- package/dist/model-providers.js +228 -0
- package/dist/model-providers.js.map +1 -0
- package/dist/orchestration-registry.d.ts +7 -0
- package/dist/orchestration-registry.d.ts.map +1 -0
- package/dist/orchestration-registry.js +49 -0
- package/dist/orchestration-registry.js.map +1 -0
- package/dist/orchestration.d.ts +36 -0
- package/dist/orchestration.d.ts.map +1 -0
- package/dist/orchestration.js +1357 -0
- package/dist/orchestration.js.map +1 -0
- package/dist/orchestration_1_0_0.d.ts +20 -0
- package/dist/orchestration_1_0_0.d.ts.map +1 -0
- package/dist/orchestration_1_0_0.js +497 -0
- package/dist/orchestration_1_0_0.js.map +1 -0
- package/dist/orchestration_1_0_1.d.ts +19 -0
- package/dist/orchestration_1_0_1.d.ts.map +1 -0
- package/dist/orchestration_1_0_1.js +546 -0
- package/dist/orchestration_1_0_1.js.map +1 -0
- package/dist/orchestration_1_0_10.d.ts +36 -0
- package/dist/orchestration_1_0_10.d.ts.map +1 -0
- package/dist/orchestration_1_0_10.js +1253 -0
- package/dist/orchestration_1_0_10.js.map +1 -0
- package/dist/orchestration_1_0_11.d.ts +36 -0
- package/dist/orchestration_1_0_11.d.ts.map +1 -0
- package/dist/orchestration_1_0_11.js +1255 -0
- package/dist/orchestration_1_0_11.js.map +1 -0
- package/dist/orchestration_1_0_12.d.ts +36 -0
- package/dist/orchestration_1_0_12.d.ts.map +1 -0
- package/dist/orchestration_1_0_12.js +1250 -0
- package/dist/orchestration_1_0_12.js.map +1 -0
- package/dist/orchestration_1_0_13.d.ts +36 -0
- package/dist/orchestration_1_0_13.d.ts.map +1 -0
- package/dist/orchestration_1_0_13.js +1260 -0
- package/dist/orchestration_1_0_13.js.map +1 -0
- package/dist/orchestration_1_0_14.d.ts +36 -0
- package/dist/orchestration_1_0_14.d.ts.map +1 -0
- package/dist/orchestration_1_0_14.js +1258 -0
- package/dist/orchestration_1_0_14.js.map +1 -0
- package/dist/orchestration_1_0_15.d.ts +36 -0
- package/dist/orchestration_1_0_15.d.ts.map +1 -0
- package/dist/orchestration_1_0_15.js +1266 -0
- package/dist/orchestration_1_0_15.js.map +1 -0
- package/dist/orchestration_1_0_16.d.ts +36 -0
- package/dist/orchestration_1_0_16.d.ts.map +1 -0
- package/dist/orchestration_1_0_16.js +1275 -0
- package/dist/orchestration_1_0_16.js.map +1 -0
- package/dist/orchestration_1_0_17.d.ts +36 -0
- package/dist/orchestration_1_0_17.d.ts.map +1 -0
- package/dist/orchestration_1_0_17.js +1314 -0
- package/dist/orchestration_1_0_17.js.map +1 -0
- package/dist/orchestration_1_0_18.d.ts +36 -0
- package/dist/orchestration_1_0_18.d.ts.map +1 -0
- package/dist/orchestration_1_0_18.js +1328 -0
- package/dist/orchestration_1_0_18.js.map +1 -0
- package/dist/orchestration_1_0_19.d.ts +36 -0
- package/dist/orchestration_1_0_19.d.ts.map +1 -0
- package/dist/orchestration_1_0_19.js +1324 -0
- package/dist/orchestration_1_0_19.js.map +1 -0
- package/dist/orchestration_1_0_2.d.ts +19 -0
- package/dist/orchestration_1_0_2.d.ts.map +1 -0
- package/dist/orchestration_1_0_2.js +749 -0
- package/dist/orchestration_1_0_2.js.map +1 -0
- package/dist/orchestration_1_0_20.d.ts +36 -0
- package/dist/orchestration_1_0_20.d.ts.map +1 -0
- package/dist/orchestration_1_0_20.js +1347 -0
- package/dist/orchestration_1_0_20.js.map +1 -0
- package/dist/orchestration_1_0_3.d.ts +19 -0
- package/dist/orchestration_1_0_3.d.ts.map +1 -0
- package/dist/orchestration_1_0_3.js +826 -0
- package/dist/orchestration_1_0_3.js.map +1 -0
- package/dist/orchestration_1_0_4.d.ts +19 -0
- package/dist/orchestration_1_0_4.d.ts.map +1 -0
- package/dist/orchestration_1_0_4.js +1020 -0
- package/dist/orchestration_1_0_4.js.map +1 -0
- package/dist/orchestration_1_0_5.d.ts +19 -0
- package/dist/orchestration_1_0_5.d.ts.map +1 -0
- package/dist/orchestration_1_0_5.js +1027 -0
- package/dist/orchestration_1_0_5.js.map +1 -0
- package/dist/orchestration_1_0_6.d.ts +19 -0
- package/dist/orchestration_1_0_6.d.ts.map +1 -0
- package/dist/orchestration_1_0_6.js +1034 -0
- package/dist/orchestration_1_0_6.js.map +1 -0
- package/dist/orchestration_1_0_7.d.ts +19 -0
- package/dist/orchestration_1_0_7.d.ts.map +1 -0
- package/dist/orchestration_1_0_7.js +1085 -0
- package/dist/orchestration_1_0_7.js.map +1 -0
- package/dist/orchestration_1_0_8.d.ts +36 -0
- package/dist/orchestration_1_0_8.d.ts.map +1 -0
- package/dist/orchestration_1_0_8.js +1106 -0
- package/dist/orchestration_1_0_8.js.map +1 -0
- package/dist/orchestration_1_0_9.d.ts +36 -0
- package/dist/orchestration_1_0_9.d.ts.map +1 -0
- package/dist/orchestration_1_0_9.js +1207 -0
- package/dist/orchestration_1_0_9.js.map +1 -0
- package/dist/prompt-layering.d.ts +16 -0
- package/dist/prompt-layering.d.ts.map +1 -0
- package/dist/prompt-layering.js +60 -0
- package/dist/prompt-layering.js.map +1 -0
- package/dist/resourcemgr-tools.d.ts +27 -0
- package/dist/resourcemgr-tools.d.ts.map +1 -0
- package/dist/resourcemgr-tools.js +638 -0
- package/dist/resourcemgr-tools.js.map +1 -0
- package/dist/session-dumper.d.ts +26 -0
- package/dist/session-dumper.d.ts.map +1 -0
- package/dist/session-dumper.js +272 -0
- package/dist/session-dumper.js.map +1 -0
- package/dist/session-manager.d.ts +152 -0
- package/dist/session-manager.d.ts.map +1 -0
- package/dist/session-manager.js +493 -0
- package/dist/session-manager.js.map +1 -0
- package/dist/session-proxy.d.ts +68 -0
- package/dist/session-proxy.d.ts.map +1 -0
- package/dist/session-proxy.js +665 -0
- package/dist/session-proxy.js.map +1 -0
- package/dist/session-store.d.ts +35 -0
- package/dist/session-store.d.ts.map +1 -0
- package/dist/session-store.js +88 -0
- package/dist/session-store.js.map +1 -0
- package/dist/skills.d.ts +31 -0
- package/dist/skills.d.ts.map +1 -0
- package/dist/skills.js +93 -0
- package/dist/skills.js.map +1 -0
- package/dist/sweeper-tools.d.ts +28 -0
- package/dist/sweeper-tools.d.ts.map +1 -0
- package/dist/sweeper-tools.js +332 -0
- package/dist/sweeper-tools.js.map +1 -0
- package/dist/types.d.ts +498 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +9 -0
- package/dist/types.js.map +1 -0
- package/dist/worker.d.ts +128 -0
- package/dist/worker.d.ts.map +1 -0
- package/dist/worker.js +562 -0
- package/dist/worker.js.map +1 -0
- package/package.json +74 -0
- package/plugins/mgmt/agents/pilotswarm.agent.md +59 -0
- package/plugins/mgmt/agents/resourcemgr.agent.md +111 -0
- package/plugins/mgmt/agents/sweeper.agent.md +67 -0
- package/plugins/mgmt/skills/resourcemgr/SKILL.md +41 -0
- package/plugins/mgmt/skills/resourcemgr/tools.json +1 -0
- package/plugins/mgmt/skills/sweeper/SKILL.md +44 -0
- package/plugins/mgmt/skills/sweeper/tools.json +1 -0
- package/plugins/system/agents/default.agent.md +58 -0
- package/plugins/system/skills/durable-timers/SKILL.md +39 -0
- package/plugins/system/skills/sub-agents/SKILL.md +75 -0
|
@@ -0,0 +1,1207 @@
|
|
|
1
|
+
import { createSessionProxy, createSessionManagerProxy } from "./session-proxy.js";
|
|
2
|
+
/**
|
|
3
|
+
* Set custom status as a JSON blob of session state.
|
|
4
|
+
* Clients read this via waitForStatusChange() or getStatus().
|
|
5
|
+
* @internal
|
|
6
|
+
*/
|
|
7
|
+
function setStatus(ctx, status, extra) {
|
|
8
|
+
ctx.setCustomStatus(JSON.stringify({ status, ...extra }));
|
|
9
|
+
}
|
|
10
|
+
/**
|
|
11
|
+
* Long-lived durable session orchestration.
|
|
12
|
+
*
|
|
13
|
+
* One orchestration per copilot session. Uses:
|
|
14
|
+
* - SessionProxy for session-scoped operations (runTurn, dehydrate, hydrate, destroy)
|
|
15
|
+
* - SessionManagerProxy for global operations (listModels)
|
|
16
|
+
* - A single FIFO event queue ("messages") for all client→orchestration communication
|
|
17
|
+
*
|
|
18
|
+
* Main loop:
|
|
19
|
+
* 1. Dequeue message from "messages" queue
|
|
20
|
+
* 2. session.hydrate() if needed
|
|
21
|
+
* 3. session.runTurn(prompt) — returns TurnResult
|
|
22
|
+
* 4. Handle result: completed → idle wait, wait → timer, input → wait for answer
|
|
23
|
+
*
|
|
24
|
+
* @internal
|
|
25
|
+
*/
|
|
26
|
+
export const CURRENT_ORCHESTRATION_VERSION = "1.0.9";
|
|
27
|
+
/**
|
|
28
|
+
* Long-lived durable session orchestration.
|
|
29
|
+
*
|
|
30
|
+
* One orchestration per copilot session. Uses:
|
|
31
|
+
* - SessionProxy for session-scoped operations (runTurn, dehydrate, hydrate, destroy)
|
|
32
|
+
* - SessionManagerProxy for global operations (listModels)
|
|
33
|
+
* - A single FIFO event queue ("messages") for all client→orchestration communication
|
|
34
|
+
*
|
|
35
|
+
* Main loop:
|
|
36
|
+
* 1. Dequeue message from "messages" queue
|
|
37
|
+
* 2. session.hydrate() if needed
|
|
38
|
+
* 3. session.runTurn(prompt) — returns TurnResult
|
|
39
|
+
* 4. Handle result: completed → idle wait, wait → timer, input → wait for answer
|
|
40
|
+
*
|
|
41
|
+
* @internal
|
|
42
|
+
*/
|
|
43
|
+
export function* durableSessionOrchestration_1_0_9(ctx, input) {
|
|
44
|
+
const dehydrateThreshold = input.dehydrateThreshold ?? 30;
|
|
45
|
+
const idleTimeout = input.idleTimeout ?? 30;
|
|
46
|
+
const inputGracePeriod = input.inputGracePeriod ?? 30;
|
|
47
|
+
const checkpointInterval = input.checkpointInterval ?? -1; // seconds, -1 = disabled
|
|
48
|
+
const rehydrationMessage = input.rehydrationMessage;
|
|
49
|
+
const blobEnabled = input.blobEnabled ?? false;
|
|
50
|
+
let needsHydration = input.needsHydration ?? false;
|
|
51
|
+
let affinityKey = input.affinityKey ?? input.sessionId;
|
|
52
|
+
let iteration = input.iteration ?? 0;
|
|
53
|
+
let config = { ...input.config };
|
|
54
|
+
let retryCount = input.retryCount ?? 0;
|
|
55
|
+
let taskContext = input.taskContext;
|
|
56
|
+
const baseSystemMessage = input.baseSystemMessage ?? config.systemMessage;
|
|
57
|
+
const isSystem = input.isSystem ?? false;
|
|
58
|
+
const MAX_RETRIES = 3;
|
|
59
|
+
const MAX_SUB_AGENTS = 20;
|
|
60
|
+
const MAX_NESTING_LEVEL = 2; // 0=root, 1=child, 2=grandchild — no deeper
|
|
61
|
+
// ─── Sub-agent tracking ──────────────────────────────────
|
|
62
|
+
let subAgents = input.subAgents ? [...input.subAgents] : [];
|
|
63
|
+
// parentSessionId: prefer new field, fall back to old parentOrchId for backward compat
|
|
64
|
+
const parentSessionId = input.parentSessionId
|
|
65
|
+
?? (input.parentOrchId ? input.parentOrchId.replace(/^session-/, '') : undefined);
|
|
66
|
+
const nestingLevel = input.nestingLevel ?? 0;
|
|
67
|
+
// If we have a captured task context, inject it into the system message
|
|
68
|
+
// so it survives LLM conversation truncation (BasicTruncator never drops system messages).
|
|
69
|
+
if (taskContext) {
|
|
70
|
+
const base = typeof baseSystemMessage === 'string'
|
|
71
|
+
? baseSystemMessage ?? ''
|
|
72
|
+
: baseSystemMessage?.content ?? '';
|
|
73
|
+
config.systemMessage = base + (base ? '\n\n' : '') +
|
|
74
|
+
'[RECURRING TASK]\n' +
|
|
75
|
+
'Original user request (always remember, even if conversation history is truncated):\n"' +
|
|
76
|
+
taskContext + '"';
|
|
77
|
+
}
|
|
78
|
+
// ─── Title summarization timer ───────────────────────────
|
|
79
|
+
// First summarize at iteration 0 + 60s, then every 300s.
|
|
80
|
+
// We track the target timestamp (epoch ms) across continueAsNew.
|
|
81
|
+
// 0 means "schedule on first turn completion".
|
|
82
|
+
let nextSummarizeAt = input.nextSummarizeAt ?? 0;
|
|
83
|
+
// ─── Create proxies ──────────────────────────────────────
|
|
84
|
+
const manager = createSessionManagerProxy(ctx);
|
|
85
|
+
let session = createSessionProxy(ctx, input.sessionId, affinityKey, config);
|
|
86
|
+
// ─── Helper: wrap prompt with resume context after dehydration ──
|
|
87
|
+
function wrapWithResumeContext(userPrompt, extra) {
|
|
88
|
+
const base = rehydrationMessage ??
|
|
89
|
+
`The session was dehydrated and has been rehydrated on a new worker. ` +
|
|
90
|
+
`The LLM conversation history is preserved.`;
|
|
91
|
+
const parts = [userPrompt, ``, `[SYSTEM: ${base}`];
|
|
92
|
+
if (extra)
|
|
93
|
+
parts.push(extra);
|
|
94
|
+
parts.push(`]`);
|
|
95
|
+
return parts.join('\n');
|
|
96
|
+
}
|
|
97
|
+
// ─── Shared continueAsNew input builder ──────────────────
|
|
98
|
+
function continueInput(overrides = {}) {
|
|
99
|
+
return {
|
|
100
|
+
sessionId: input.sessionId,
|
|
101
|
+
config,
|
|
102
|
+
iteration,
|
|
103
|
+
affinityKey,
|
|
104
|
+
needsHydration,
|
|
105
|
+
blobEnabled,
|
|
106
|
+
dehydrateThreshold,
|
|
107
|
+
idleTimeout,
|
|
108
|
+
inputGracePeriod,
|
|
109
|
+
checkpointInterval,
|
|
110
|
+
rehydrationMessage,
|
|
111
|
+
nextSummarizeAt,
|
|
112
|
+
taskContext,
|
|
113
|
+
baseSystemMessage,
|
|
114
|
+
subAgents,
|
|
115
|
+
parentSessionId,
|
|
116
|
+
nestingLevel,
|
|
117
|
+
...(isSystem ? { isSystem: true } : {}),
|
|
118
|
+
retryCount: 0, // reset by default; overrides can set it
|
|
119
|
+
...overrides,
|
|
120
|
+
};
|
|
121
|
+
}
|
|
122
|
+
/** Yield this to continueAsNew into the current (latest) orchestration version. */
|
|
123
|
+
function versionedContinueAsNew(input) {
|
|
124
|
+
return ctx.continueAsNewVersioned(input, CURRENT_ORCHESTRATION_VERSION);
|
|
125
|
+
}
|
|
126
|
+
function parseChildUpdate(promptText) {
|
|
127
|
+
if (typeof promptText !== "string")
|
|
128
|
+
return null;
|
|
129
|
+
const match = promptText.match(/^\[CHILD_UPDATE from=(\S+) type=(\S+)/);
|
|
130
|
+
if (!match)
|
|
131
|
+
return null;
|
|
132
|
+
return {
|
|
133
|
+
sessionId: match[1],
|
|
134
|
+
updateType: match[2].replace(/\]$/, ""),
|
|
135
|
+
content: promptText.split("\n").slice(1).join("\n").trim(),
|
|
136
|
+
};
|
|
137
|
+
}
|
|
138
|
+
function* applyChildUpdate(update) {
|
|
139
|
+
ctx.traceInfo(`[orch] child update from=${update.sessionId} type=${update.updateType}`);
|
|
140
|
+
const agent = subAgents.find(a => a.sessionId === update.sessionId);
|
|
141
|
+
if (!agent)
|
|
142
|
+
return;
|
|
143
|
+
if (update.content) {
|
|
144
|
+
agent.result = update.content.slice(0, 2000);
|
|
145
|
+
}
|
|
146
|
+
if (update.updateType === "completed") {
|
|
147
|
+
agent.status = "completed";
|
|
148
|
+
}
|
|
149
|
+
try {
|
|
150
|
+
const rawStatus = yield manager.getSessionStatus(agent.sessionId);
|
|
151
|
+
const parsed = JSON.parse(rawStatus);
|
|
152
|
+
if (parsed.status === "completed" || parsed.status === "failed" || parsed.status === "idle") {
|
|
153
|
+
agent.status = parsed.status === "failed" ? "failed" : "completed";
|
|
154
|
+
}
|
|
155
|
+
if (parsed.result && parsed.result !== "done") {
|
|
156
|
+
agent.result = parsed.result.slice(0, 2000);
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
catch { }
|
|
160
|
+
}
|
|
161
|
+
// ─── Helper: dehydrate + reset affinity ──────────────────
|
|
162
|
+
function* dehydrateAndReset(reason) {
|
|
163
|
+
ctx.traceInfo(`[orch] dehydrating session (reason=${reason})`);
|
|
164
|
+
yield session.dehydrate(reason);
|
|
165
|
+
needsHydration = true;
|
|
166
|
+
affinityKey = yield ctx.newGuid();
|
|
167
|
+
session = createSessionProxy(ctx, input.sessionId, affinityKey, config);
|
|
168
|
+
}
|
|
169
|
+
// ─── Helper: checkpoint without releasing pin ────────────
|
|
170
|
+
function* maybeCheckpoint() {
|
|
171
|
+
if (!blobEnabled || checkpointInterval < 0)
|
|
172
|
+
return;
|
|
173
|
+
try {
|
|
174
|
+
ctx.traceInfo(`[orch] checkpoint (iteration=${iteration})`);
|
|
175
|
+
yield session.checkpoint();
|
|
176
|
+
}
|
|
177
|
+
catch (err) {
|
|
178
|
+
ctx.traceInfo(`[orch] checkpoint failed: ${err.message ?? err}`);
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
// ─── Helper: summarize session title if due ──────────────
|
|
182
|
+
const FIRST_SUMMARIZE_DELAY = 60_000; // 1 minute
|
|
183
|
+
const REPEAT_SUMMARIZE_DELAY = 300_000; // 5 minutes
|
|
184
|
+
function* maybeSummarize() {
|
|
185
|
+
// System sessions have fixed titles — never summarize
|
|
186
|
+
if (isSystem)
|
|
187
|
+
return;
|
|
188
|
+
const now = yield ctx.utcNow();
|
|
189
|
+
// Schedule first summarize 60s after session start
|
|
190
|
+
if (nextSummarizeAt === 0) {
|
|
191
|
+
nextSummarizeAt = now + FIRST_SUMMARIZE_DELAY;
|
|
192
|
+
return;
|
|
193
|
+
}
|
|
194
|
+
if (now < nextSummarizeAt)
|
|
195
|
+
return;
|
|
196
|
+
// Time to summarize — fire and forget (best effort)
|
|
197
|
+
try {
|
|
198
|
+
ctx.traceInfo(`[orch] summarizing session title`);
|
|
199
|
+
yield manager.summarizeSession(input.sessionId);
|
|
200
|
+
}
|
|
201
|
+
catch (err) {
|
|
202
|
+
ctx.traceInfo(`[orch] summarize failed: ${err.message}`);
|
|
203
|
+
}
|
|
204
|
+
nextSummarizeAt = now + REPEAT_SUMMARIZE_DELAY;
|
|
205
|
+
}
|
|
206
|
+
// ─── Prompt carried from continueAsNew ───────────────────
|
|
207
|
+
let pendingPrompt = input.prompt;
|
|
208
|
+
/** Set by the "completed" handler so the dequeue loop doesn't overwrite it. */
|
|
209
|
+
let lastTurnResult = undefined;
|
|
210
|
+
ctx.traceInfo(`[orch] start: iter=${iteration} pending=${pendingPrompt ? `"${pendingPrompt.slice(0, 40)}"` : 'NONE'} hydrate=${needsHydration} blob=${blobEnabled}`);
|
|
211
|
+
// ─── MAIN LOOP ──────────────────────────────────────────
|
|
212
|
+
while (true) {
|
|
213
|
+
// ① GET NEXT PROMPT
|
|
214
|
+
let prompt = "";
|
|
215
|
+
if (pendingPrompt) {
|
|
216
|
+
prompt = pendingPrompt;
|
|
217
|
+
pendingPrompt = undefined;
|
|
218
|
+
}
|
|
219
|
+
else {
|
|
220
|
+
// If we have a completed turnResult, include it in the idle status
|
|
221
|
+
// so clients can read it via waitForStatusChange. Without this,
|
|
222
|
+
// a bare setStatus("idle") between yields would overwrite it.
|
|
223
|
+
if (lastTurnResult) {
|
|
224
|
+
setStatus(ctx, "idle", { iteration, turnResult: lastTurnResult });
|
|
225
|
+
}
|
|
226
|
+
else {
|
|
227
|
+
setStatus(ctx, "idle", { iteration });
|
|
228
|
+
}
|
|
229
|
+
let gotPrompt = false;
|
|
230
|
+
while (!gotPrompt) {
|
|
231
|
+
// All messages (from users and child agents) arrive on the "messages" queue.
|
|
232
|
+
// Child agents communicate via the SDK (sendToSession), which enqueues
|
|
233
|
+
// to the same "messages" queue as user prompts.
|
|
234
|
+
let msgData;
|
|
235
|
+
const msg = yield ctx.dequeueEvent("messages");
|
|
236
|
+
msgData = typeof msg === "string" ? JSON.parse(msg) : msg;
|
|
237
|
+
// ── Command dispatch ─────────────────────────
|
|
238
|
+
if (msgData.type === "cmd") {
|
|
239
|
+
const cmdMsg = msgData;
|
|
240
|
+
ctx.traceInfo(`[orch-cmd] ${cmdMsg.cmd} id=${cmdMsg.id}`);
|
|
241
|
+
switch (cmdMsg.cmd) {
|
|
242
|
+
case "set_model": {
|
|
243
|
+
const newModel = String(cmdMsg.args?.model || "");
|
|
244
|
+
const oldModel = config.model || "(default)";
|
|
245
|
+
config = { ...config, model: newModel };
|
|
246
|
+
const resp = {
|
|
247
|
+
id: cmdMsg.id,
|
|
248
|
+
cmd: cmdMsg.cmd,
|
|
249
|
+
result: { ok: true, oldModel, newModel },
|
|
250
|
+
};
|
|
251
|
+
setStatus(ctx, "idle", { iteration, cmdResponse: resp });
|
|
252
|
+
yield versionedContinueAsNew(continueInput());
|
|
253
|
+
return "";
|
|
254
|
+
}
|
|
255
|
+
case "list_models": {
|
|
256
|
+
setStatus(ctx, "idle", { iteration, cmdProcessing: cmdMsg.id });
|
|
257
|
+
let models;
|
|
258
|
+
try {
|
|
259
|
+
const raw = yield manager.listModels();
|
|
260
|
+
models = typeof raw === "string" ? JSON.parse(raw) : raw;
|
|
261
|
+
}
|
|
262
|
+
catch (err) {
|
|
263
|
+
const resp = {
|
|
264
|
+
id: cmdMsg.id,
|
|
265
|
+
cmd: cmdMsg.cmd,
|
|
266
|
+
error: err.message || String(err),
|
|
267
|
+
};
|
|
268
|
+
setStatus(ctx, "idle", { iteration, cmdResponse: resp });
|
|
269
|
+
continue;
|
|
270
|
+
}
|
|
271
|
+
const resp = {
|
|
272
|
+
id: cmdMsg.id,
|
|
273
|
+
cmd: cmdMsg.cmd,
|
|
274
|
+
result: { models, currentModel: config.model },
|
|
275
|
+
};
|
|
276
|
+
setStatus(ctx, "idle", { iteration, cmdResponse: resp });
|
|
277
|
+
continue;
|
|
278
|
+
}
|
|
279
|
+
case "get_info": {
|
|
280
|
+
const resp = {
|
|
281
|
+
id: cmdMsg.id,
|
|
282
|
+
cmd: cmdMsg.cmd,
|
|
283
|
+
result: {
|
|
284
|
+
model: config.model || "(default)",
|
|
285
|
+
iteration,
|
|
286
|
+
sessionId: input.sessionId,
|
|
287
|
+
affinityKey: affinityKey?.slice(0, 8),
|
|
288
|
+
needsHydration,
|
|
289
|
+
blobEnabled,
|
|
290
|
+
},
|
|
291
|
+
};
|
|
292
|
+
setStatus(ctx, "idle", { iteration, cmdResponse: resp });
|
|
293
|
+
continue;
|
|
294
|
+
}
|
|
295
|
+
case "done": {
|
|
296
|
+
ctx.traceInfo(`[orch] /done command received — completing session`);
|
|
297
|
+
// Cascade: complete all sub-agents whose orchestrations may still be alive.
|
|
298
|
+
// Include "running" AND "completed" — a child that sent CHILD_UPDATE
|
|
299
|
+
// may still have a live orchestration waiting in its idle loop.
|
|
300
|
+
const liveChildren = subAgents.filter(a => a.status === "running" || a.status === "completed");
|
|
301
|
+
if (liveChildren.length > 0) {
|
|
302
|
+
ctx.traceInfo(`[orch] /done: completing ${liveChildren.length} sub-agent(s)`);
|
|
303
|
+
for (const child of liveChildren) {
|
|
304
|
+
try {
|
|
305
|
+
const childCmdId = `done-cascade-${iteration}-${child.sessionId.slice(0, 8)}`;
|
|
306
|
+
yield manager.sendCommandToSession(child.sessionId, { type: "cmd", cmd: "done", id: childCmdId, args: { reason: "Parent session completing" } });
|
|
307
|
+
child.status = "completed";
|
|
308
|
+
ctx.traceInfo(`[orch] /done: completed child ${child.sessionId}`);
|
|
309
|
+
}
|
|
310
|
+
catch (err) {
|
|
311
|
+
ctx.traceInfo(`[orch] /done: failed to complete child ${child.sessionId}: ${err.message} (non-fatal)`);
|
|
312
|
+
}
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
// If this is a child orchestration, send final result to parent
|
|
316
|
+
if (parentSessionId) {
|
|
317
|
+
try {
|
|
318
|
+
const doneReason = String(cmdMsg.args?.reason || "Session completed by user");
|
|
319
|
+
yield manager.sendToSession(parentSessionId, `[CHILD_UPDATE from=${input.sessionId} type=completed iter=${iteration}]\n${doneReason}`);
|
|
320
|
+
}
|
|
321
|
+
catch (err) {
|
|
322
|
+
ctx.traceInfo(`[orch] sendToSession(parent) on /done failed: ${err.message} (non-fatal)`);
|
|
323
|
+
}
|
|
324
|
+
}
|
|
325
|
+
// Destroy the in-memory session
|
|
326
|
+
try {
|
|
327
|
+
yield session.destroy();
|
|
328
|
+
}
|
|
329
|
+
catch { }
|
|
330
|
+
const resp = {
|
|
331
|
+
id: cmdMsg.id,
|
|
332
|
+
cmd: cmdMsg.cmd,
|
|
333
|
+
result: { ok: true, message: "Session completed" },
|
|
334
|
+
};
|
|
335
|
+
setStatus(ctx, "completed", { iteration, cmdResponse: resp });
|
|
336
|
+
return "done";
|
|
337
|
+
}
|
|
338
|
+
default: {
|
|
339
|
+
const resp = {
|
|
340
|
+
id: cmdMsg.id,
|
|
341
|
+
cmd: cmdMsg.cmd,
|
|
342
|
+
error: `Unknown command: ${cmdMsg.cmd}`,
|
|
343
|
+
};
|
|
344
|
+
setStatus(ctx, "idle", { iteration, cmdResponse: resp });
|
|
345
|
+
continue;
|
|
346
|
+
}
|
|
347
|
+
}
|
|
348
|
+
}
|
|
349
|
+
const childUpdate = parseChildUpdate(msgData.prompt);
|
|
350
|
+
if (childUpdate) {
|
|
351
|
+
yield* applyChildUpdate(childUpdate);
|
|
352
|
+
continue;
|
|
353
|
+
}
|
|
354
|
+
prompt = msgData.prompt;
|
|
355
|
+
gotPrompt = true;
|
|
356
|
+
lastTurnResult = undefined; // Clear after new prompt arrives
|
|
357
|
+
}
|
|
358
|
+
}
|
|
359
|
+
// If the session needs hydration, the LLM lost in-memory context.
|
|
360
|
+
// Wrap the user's prompt with resume instructions so the LLM picks up where it left off.
|
|
361
|
+
if (needsHydration && blobEnabled && prompt) {
|
|
362
|
+
prompt = wrapWithResumeContext(prompt);
|
|
363
|
+
}
|
|
364
|
+
ctx.traceInfo(`[turn ${iteration}] session=${input.sessionId} prompt="${prompt.slice(0, 80)}"`);
|
|
365
|
+
// ② HYDRATE if session was dehydrated (with retry)
|
|
366
|
+
if (needsHydration && blobEnabled) {
|
|
367
|
+
let hydrateAttempts = 0;
|
|
368
|
+
while (true) {
|
|
369
|
+
try {
|
|
370
|
+
affinityKey = yield ctx.newGuid();
|
|
371
|
+
session = createSessionProxy(ctx, input.sessionId, affinityKey, config);
|
|
372
|
+
yield session.hydrate();
|
|
373
|
+
needsHydration = false;
|
|
374
|
+
break;
|
|
375
|
+
}
|
|
376
|
+
catch (hydrateErr) {
|
|
377
|
+
const hMsg = hydrateErr.message || String(hydrateErr);
|
|
378
|
+
// Blob was deleted (e.g. after a reset) — skip hydration, start fresh
|
|
379
|
+
if (hMsg.includes("blob does not exist") || hMsg.includes("BlobNotFound") || hMsg.includes("404")) {
|
|
380
|
+
ctx.traceInfo(`[orch] hydrate skipped — blob not found, starting fresh session`);
|
|
381
|
+
needsHydration = false;
|
|
382
|
+
break;
|
|
383
|
+
}
|
|
384
|
+
hydrateAttempts++;
|
|
385
|
+
ctx.traceInfo(`[orch] hydrate FAILED (attempt ${hydrateAttempts}/${MAX_RETRIES}): ${hMsg}`);
|
|
386
|
+
if (hydrateAttempts >= MAX_RETRIES) {
|
|
387
|
+
setStatus(ctx, "error", {
|
|
388
|
+
iteration,
|
|
389
|
+
error: `Hydrate failed after ${MAX_RETRIES} attempts: ${hMsg}`,
|
|
390
|
+
retriesExhausted: true,
|
|
391
|
+
});
|
|
392
|
+
// Can't proceed without hydration — wait for next user message to retry
|
|
393
|
+
break;
|
|
394
|
+
}
|
|
395
|
+
const hydrateDelay = 10 * Math.pow(2, hydrateAttempts - 1);
|
|
396
|
+
setStatus(ctx, "error", {
|
|
397
|
+
iteration,
|
|
398
|
+
error: `Hydrate failed: ${hMsg} (retry ${hydrateAttempts}/${MAX_RETRIES} in ${hydrateDelay}s)`,
|
|
399
|
+
});
|
|
400
|
+
yield ctx.scheduleTimer(hydrateDelay * 1000);
|
|
401
|
+
}
|
|
402
|
+
}
|
|
403
|
+
if (needsHydration)
|
|
404
|
+
continue; // hydrate exhausted retries — go back to dequeue
|
|
405
|
+
}
|
|
406
|
+
// ③ RUN TURN via SessionProxy (with retry on failure)
|
|
407
|
+
setStatus(ctx, "running", { iteration });
|
|
408
|
+
let turnResult;
|
|
409
|
+
try {
|
|
410
|
+
turnResult = yield session.runTurn(prompt);
|
|
411
|
+
}
|
|
412
|
+
catch (err) {
|
|
413
|
+
// Activity failed (e.g. Copilot timeout, network error).
|
|
414
|
+
const errorMsg = err.message || String(err);
|
|
415
|
+
retryCount++;
|
|
416
|
+
ctx.traceInfo(`[orch] runTurn FAILED (attempt ${retryCount}/${MAX_RETRIES}): ${errorMsg}`);
|
|
417
|
+
if (retryCount >= MAX_RETRIES) {
|
|
418
|
+
// Exhausted retries — park in error state but don't crash.
|
|
419
|
+
// The orchestration stays alive and will retry on the next user message.
|
|
420
|
+
ctx.traceInfo(`[orch] max retries exhausted, waiting for user input`);
|
|
421
|
+
setStatus(ctx, "error", {
|
|
422
|
+
iteration,
|
|
423
|
+
error: `Failed after ${MAX_RETRIES} attempts: ${errorMsg}`,
|
|
424
|
+
retriesExhausted: true,
|
|
425
|
+
});
|
|
426
|
+
// Reset retry count and wait for next user message
|
|
427
|
+
retryCount = 0;
|
|
428
|
+
continue;
|
|
429
|
+
}
|
|
430
|
+
setStatus(ctx, "error", {
|
|
431
|
+
iteration,
|
|
432
|
+
error: `${errorMsg} (retry ${retryCount}/${MAX_RETRIES} in 15s)`,
|
|
433
|
+
});
|
|
434
|
+
// Exponential backoff: 15s, 30s, 60s
|
|
435
|
+
const retryDelay = 15 * Math.pow(2, retryCount - 1);
|
|
436
|
+
ctx.traceInfo(`[orch] retrying in ${retryDelay}s`);
|
|
437
|
+
if (blobEnabled) {
|
|
438
|
+
yield* dehydrateAndReset("error");
|
|
439
|
+
}
|
|
440
|
+
yield ctx.scheduleTimer(retryDelay * 1000);
|
|
441
|
+
yield versionedContinueAsNew(continueInput({
|
|
442
|
+
prompt,
|
|
443
|
+
retryCount,
|
|
444
|
+
needsHydration: blobEnabled ? true : needsHydration,
|
|
445
|
+
}));
|
|
446
|
+
return "";
|
|
447
|
+
}
|
|
448
|
+
// Successful activity — reset retry counter
|
|
449
|
+
retryCount = 0;
|
|
450
|
+
const result = typeof turnResult === "string"
|
|
451
|
+
? JSON.parse(turnResult) : turnResult;
|
|
452
|
+
iteration++;
|
|
453
|
+
// Strip events from result before putting in customStatus (events go to CMS, not status)
|
|
454
|
+
const { events: _events, ...statusResult } = result;
|
|
455
|
+
// ── Summarize title if due ──────────────────────────
|
|
456
|
+
yield* maybeSummarize();
|
|
457
|
+
// ④ HANDLE RESULT
|
|
458
|
+
switch (result.type) {
|
|
459
|
+
case "completed":
|
|
460
|
+
ctx.traceInfo(`[response] ${result.content}`);
|
|
461
|
+
// If this is a child orchestration, notify the parent about our completion
|
|
462
|
+
// via the SDK — sends to the parent's "messages" queue like any other message.
|
|
463
|
+
if (parentSessionId) {
|
|
464
|
+
try {
|
|
465
|
+
yield manager.sendToSession(parentSessionId, `[CHILD_UPDATE from=${input.sessionId} type=completed iter=${iteration}]\n${result.content.slice(0, 2000)}`);
|
|
466
|
+
}
|
|
467
|
+
catch (err) {
|
|
468
|
+
ctx.traceInfo(`[orch] sendToSession(parent) failed: ${err.message} (non-fatal)`);
|
|
469
|
+
}
|
|
470
|
+
// System sub-agents (sweeper, resourcemgr) should keep running forever.
|
|
471
|
+
// Non-system sub-agents auto-terminate after completing their task.
|
|
472
|
+
if (input.isSystem) {
|
|
473
|
+
ctx.traceInfo(`[orch] system sub-agent completed turn, continuing loop`);
|
|
474
|
+
lastTurnResult = statusResult;
|
|
475
|
+
yield* maybeCheckpoint();
|
|
476
|
+
continue;
|
|
477
|
+
}
|
|
478
|
+
// Non-system sub-agents auto-terminate after completing their task and notifying
|
|
479
|
+
// the parent. Without this, they sit in the idle loop forever (idleTimeout=-1)
|
|
480
|
+
// and accumulate as zombie orchestrations.
|
|
481
|
+
ctx.traceInfo(`[orch] sub-agent completed task, auto-terminating`);
|
|
482
|
+
try {
|
|
483
|
+
yield session.destroy();
|
|
484
|
+
}
|
|
485
|
+
catch { }
|
|
486
|
+
setStatus(ctx, "completed", { iteration, turnResult: statusResult });
|
|
487
|
+
return "done";
|
|
488
|
+
}
|
|
489
|
+
if (!blobEnabled || idleTimeout < 0) {
|
|
490
|
+
// Store the result so the dequeue-idle setStatus includes it
|
|
491
|
+
lastTurnResult = statusResult;
|
|
492
|
+
// Checkpoint while idle (no dehydration path)
|
|
493
|
+
yield* maybeCheckpoint();
|
|
494
|
+
continue;
|
|
495
|
+
}
|
|
496
|
+
// Race: next message vs idle timeout
|
|
497
|
+
{
|
|
498
|
+
setStatus(ctx, "idle", { iteration, turnResult: statusResult });
|
|
499
|
+
yield* maybeCheckpoint();
|
|
500
|
+
const idleDeadline = (yield ctx.utcNow()) + idleTimeout * 1000;
|
|
501
|
+
while (true) {
|
|
502
|
+
const now = yield ctx.utcNow();
|
|
503
|
+
const remainingMs = Math.max(0, idleDeadline - now);
|
|
504
|
+
if (remainingMs === 0)
|
|
505
|
+
break;
|
|
506
|
+
const nextMsg = ctx.dequeueEvent("messages");
|
|
507
|
+
const idleTimer = ctx.scheduleTimer(remainingMs);
|
|
508
|
+
const raceResult = yield ctx.race(nextMsg, idleTimer);
|
|
509
|
+
if (raceResult.index === 0) {
|
|
510
|
+
const raceMsg = typeof raceResult.value === "string"
|
|
511
|
+
? JSON.parse(raceResult.value) : (raceResult.value ?? {});
|
|
512
|
+
const childUpdate = parseChildUpdate(raceMsg.prompt);
|
|
513
|
+
if (childUpdate) {
|
|
514
|
+
yield* applyChildUpdate(childUpdate);
|
|
515
|
+
continue;
|
|
516
|
+
}
|
|
517
|
+
ctx.traceInfo("[session] user responded within idle window");
|
|
518
|
+
if (raceMsg.prompt) {
|
|
519
|
+
yield versionedContinueAsNew(continueInput({ prompt: raceMsg.prompt }));
|
|
520
|
+
}
|
|
521
|
+
else {
|
|
522
|
+
yield versionedContinueAsNew(continueInput());
|
|
523
|
+
}
|
|
524
|
+
return "";
|
|
525
|
+
}
|
|
526
|
+
break;
|
|
527
|
+
}
|
|
528
|
+
// Idle timeout → dehydrate. Next message will need resume context.
|
|
529
|
+
ctx.traceInfo("[session] idle timeout, dehydrating");
|
|
530
|
+
yield* dehydrateAndReset("idle");
|
|
531
|
+
// Don't continueAsNew with a prompt — wait for the next user message,
|
|
532
|
+
// which will be wrapped with resume context because needsHydration=true.
|
|
533
|
+
yield versionedContinueAsNew(continueInput());
|
|
534
|
+
return "";
|
|
535
|
+
}
|
|
536
|
+
case "wait":
|
|
537
|
+
// Capture original user prompt as task context for recurring tasks.
|
|
538
|
+
// This ensures the LLM remembers its task even after conversation truncation.
|
|
539
|
+
if (!taskContext) {
|
|
540
|
+
taskContext = prompt.slice(0, 2000);
|
|
541
|
+
const base = typeof baseSystemMessage === 'string'
|
|
542
|
+
? baseSystemMessage ?? ''
|
|
543
|
+
: baseSystemMessage?.content ?? '';
|
|
544
|
+
config.systemMessage = base + (base ? '\n\n' : '') +
|
|
545
|
+
'[RECURRING TASK]\n' +
|
|
546
|
+
'Original user request (always remember, even if conversation history is truncated):\n"' +
|
|
547
|
+
taskContext + '"';
|
|
548
|
+
}
|
|
549
|
+
if (result.content) {
|
|
550
|
+
setStatus(ctx, "running", { iteration, intermediateContent: result.content });
|
|
551
|
+
ctx.traceInfo(`[orch] intermediate: ${result.content.slice(0, 80)}`);
|
|
552
|
+
}
|
|
553
|
+
// If this is a child orchestration, notify the parent on every wait cycle
|
|
554
|
+
// via the SDK — sends a message to the parent's "messages" queue.
|
|
555
|
+
if (parentSessionId) {
|
|
556
|
+
try {
|
|
557
|
+
const notifyContent = result.content
|
|
558
|
+
? result.content.slice(0, 2000)
|
|
559
|
+
: `[wait: ${result.reason} (${result.seconds}s)]`;
|
|
560
|
+
yield manager.sendToSession(parentSessionId, `[CHILD_UPDATE from=${input.sessionId} type=wait iter=${iteration}]\n${notifyContent}`);
|
|
561
|
+
}
|
|
562
|
+
catch (err) {
|
|
563
|
+
ctx.traceInfo(`[orch] sendToSession(parent) wait failed: ${err.message} (non-fatal)`);
|
|
564
|
+
}
|
|
565
|
+
}
|
|
566
|
+
ctx.traceInfo(`[orch] durable timer: ${result.seconds}s (${result.reason})`);
|
|
567
|
+
{
|
|
568
|
+
const shouldDehydrate = blobEnabled && result.seconds > dehydrateThreshold;
|
|
569
|
+
if (shouldDehydrate) {
|
|
570
|
+
yield* dehydrateAndReset("timer");
|
|
571
|
+
}
|
|
572
|
+
const waitStartedAt = yield ctx.utcNow();
|
|
573
|
+
setStatus(ctx, "waiting", {
|
|
574
|
+
iteration,
|
|
575
|
+
waitSeconds: result.seconds,
|
|
576
|
+
waitReason: result.reason,
|
|
577
|
+
waitStartedAt,
|
|
578
|
+
...(result.content ? { turnResult: { type: "completed", content: result.content } } : {}),
|
|
579
|
+
});
|
|
580
|
+
// Checkpoint before the blocking wait
|
|
581
|
+
if (!shouldDehydrate)
|
|
582
|
+
yield* maybeCheckpoint();
|
|
583
|
+
const timerTask = ctx.scheduleTimer(result.seconds * 1000);
|
|
584
|
+
const interruptMsg = ctx.dequeueEvent("messages");
|
|
585
|
+
const timerRace = yield ctx.race(timerTask, interruptMsg);
|
|
586
|
+
if (timerRace.index === 1) {
|
|
587
|
+
const interruptData = typeof timerRace.value === "string"
|
|
588
|
+
? JSON.parse(timerRace.value) : (timerRace.value ?? {});
|
|
589
|
+
const childUpdate = parseChildUpdate(interruptData.prompt);
|
|
590
|
+
if (childUpdate) {
|
|
591
|
+
yield* applyChildUpdate(childUpdate);
|
|
592
|
+
const interruptedAt = yield ctx.utcNow();
|
|
593
|
+
const elapsedSec = Math.round((interruptedAt - waitStartedAt) / 1000);
|
|
594
|
+
const remainingSec = Math.max(0, result.seconds - elapsedSec);
|
|
595
|
+
if (remainingSec === 0) {
|
|
596
|
+
const timerPrompt = `The ${result.seconds} second wait is now complete. Continue with your task.`;
|
|
597
|
+
yield versionedContinueAsNew(continueInput({
|
|
598
|
+
prompt: timerPrompt,
|
|
599
|
+
needsHydration: shouldDehydrate ? true : needsHydration,
|
|
600
|
+
}));
|
|
601
|
+
}
|
|
602
|
+
else {
|
|
603
|
+
yield versionedContinueAsNew(continueInput({
|
|
604
|
+
prompt: `The wait was partially completed (${elapsedSec}s elapsed, ${remainingSec}s remain). Resume the wait for the remaining ${remainingSec} seconds.`,
|
|
605
|
+
needsHydration: shouldDehydrate ? true : needsHydration,
|
|
606
|
+
}));
|
|
607
|
+
}
|
|
608
|
+
return "";
|
|
609
|
+
}
|
|
610
|
+
ctx.traceInfo(`[session] wait interrupted: "${(interruptData.prompt || "").slice(0, 60)}"`);
|
|
611
|
+
// Calculate remaining time for resume context
|
|
612
|
+
const interruptedAt = yield ctx.utcNow();
|
|
613
|
+
const elapsedSec = Math.round((interruptedAt - waitStartedAt) / 1000);
|
|
614
|
+
const remainingSec = Math.max(0, result.seconds - elapsedSec);
|
|
615
|
+
const userPrompt = interruptData.prompt || "";
|
|
616
|
+
let finalPrompt;
|
|
617
|
+
if (shouldDehydrate && userPrompt) {
|
|
618
|
+
finalPrompt = wrapWithResumeContext(userPrompt, `Your timer was interrupted by a USER MESSAGE. You MUST respond to the user's message below before doing anything else. ` +
|
|
619
|
+
`Timer context: ${result.seconds}s timer (reason: "${result.reason}"), ` +
|
|
620
|
+
`${elapsedSec}s elapsed, ${remainingSec}s remain. ` +
|
|
621
|
+
`After fully addressing the user's message, resume the wait for the remaining ${remainingSec} seconds.`);
|
|
622
|
+
}
|
|
623
|
+
else if (userPrompt) {
|
|
624
|
+
// Not dehydrated but still interrupted — give timing context
|
|
625
|
+
finalPrompt = `${userPrompt}\n\n` +
|
|
626
|
+
`[SYSTEM: IMPORTANT — The above is a USER MESSAGE that interrupted your ${result.seconds}s timer (reason: "${result.reason}"). ` +
|
|
627
|
+
`You MUST respond to the user's message FIRST. ${elapsedSec}s elapsed, ${remainingSec}s remain. ` +
|
|
628
|
+
`After fully answering the user, resume the wait for the remaining ${remainingSec} seconds.]`;
|
|
629
|
+
}
|
|
630
|
+
else {
|
|
631
|
+
finalPrompt = userPrompt;
|
|
632
|
+
}
|
|
633
|
+
yield versionedContinueAsNew(continueInput({
|
|
634
|
+
prompt: finalPrompt,
|
|
635
|
+
needsHydration: shouldDehydrate ? true : needsHydration,
|
|
636
|
+
}));
|
|
637
|
+
return "";
|
|
638
|
+
}
|
|
639
|
+
const timerPrompt = `The ${result.seconds} second wait is now complete. Continue with your task.`;
|
|
640
|
+
yield versionedContinueAsNew(continueInput({
|
|
641
|
+
prompt: timerPrompt,
|
|
642
|
+
needsHydration: shouldDehydrate ? true : needsHydration,
|
|
643
|
+
}));
|
|
644
|
+
return "";
|
|
645
|
+
}
|
|
646
|
+
case "input_required":
|
|
647
|
+
ctx.traceInfo(`[orch] waiting for user input: ${result.question}`);
|
|
648
|
+
if (!blobEnabled || inputGracePeriod < 0) {
|
|
649
|
+
setStatus(ctx, "input_required", {
|
|
650
|
+
iteration,
|
|
651
|
+
turnResult: statusResult,
|
|
652
|
+
pendingQuestion: result.question,
|
|
653
|
+
choices: result.choices,
|
|
654
|
+
allowFreeform: result.allowFreeform,
|
|
655
|
+
});
|
|
656
|
+
yield* maybeCheckpoint();
|
|
657
|
+
const answerMsg = yield ctx.dequeueEvent("messages");
|
|
658
|
+
const answerData = typeof answerMsg === "string"
|
|
659
|
+
? JSON.parse(answerMsg) : answerMsg;
|
|
660
|
+
yield versionedContinueAsNew(continueInput({
|
|
661
|
+
prompt: `The user was asked: "${result.question}"\nThe user responded: "${answerData.answer}"`,
|
|
662
|
+
needsHydration: false,
|
|
663
|
+
}));
|
|
664
|
+
return "";
|
|
665
|
+
}
|
|
666
|
+
if (inputGracePeriod === 0) {
|
|
667
|
+
setStatus(ctx, "input_required", {
|
|
668
|
+
iteration,
|
|
669
|
+
turnResult: statusResult,
|
|
670
|
+
pendingQuestion: result.question,
|
|
671
|
+
});
|
|
672
|
+
yield* dehydrateAndReset("input_required");
|
|
673
|
+
const answerMsg = yield ctx.dequeueEvent("messages");
|
|
674
|
+
const answerData = typeof answerMsg === "string"
|
|
675
|
+
? JSON.parse(answerMsg) : answerMsg;
|
|
676
|
+
yield versionedContinueAsNew(continueInput({
|
|
677
|
+
prompt: `The user was asked: "${result.question}"\nThe user responded: "${answerData.answer}"`,
|
|
678
|
+
}));
|
|
679
|
+
return "";
|
|
680
|
+
}
|
|
681
|
+
// Race: user answer vs grace period
|
|
682
|
+
{
|
|
683
|
+
setStatus(ctx, "input_required", {
|
|
684
|
+
iteration,
|
|
685
|
+
turnResult: statusResult,
|
|
686
|
+
pendingQuestion: result.question,
|
|
687
|
+
choices: result.choices,
|
|
688
|
+
allowFreeform: result.allowFreeform,
|
|
689
|
+
});
|
|
690
|
+
const answerEvt = ctx.dequeueEvent("messages");
|
|
691
|
+
const graceTimer = ctx.scheduleTimer(inputGracePeriod * 1000);
|
|
692
|
+
const raceResult = yield ctx.race(answerEvt, graceTimer);
|
|
693
|
+
if (raceResult.index === 0) {
|
|
694
|
+
const answerData = typeof raceResult.value === "string"
|
|
695
|
+
? JSON.parse(raceResult.value) : (raceResult.value ?? {});
|
|
696
|
+
yield versionedContinueAsNew(continueInput({
|
|
697
|
+
prompt: `The user was asked: "${result.question}"\nThe user responded: "${answerData.answer}"`,
|
|
698
|
+
needsHydration: false,
|
|
699
|
+
}));
|
|
700
|
+
return "";
|
|
701
|
+
}
|
|
702
|
+
yield* dehydrateAndReset("input_required");
|
|
703
|
+
const answerMsg = yield ctx.dequeueEvent("messages");
|
|
704
|
+
const answerData = typeof answerMsg === "string"
|
|
705
|
+
? JSON.parse(answerMsg) : answerMsg;
|
|
706
|
+
yield versionedContinueAsNew(continueInput({
|
|
707
|
+
prompt: `The user was asked: "${result.question}"\nThe user responded: "${answerData.answer}"`,
|
|
708
|
+
}));
|
|
709
|
+
return "";
|
|
710
|
+
}
|
|
711
|
+
case "cancelled":
|
|
712
|
+
ctx.traceInfo("[session] turn cancelled");
|
|
713
|
+
continue;
|
|
714
|
+
// ─── Sub-Agent Result Handlers ───────────────────
|
|
715
|
+
case "spawn_agent": {
|
|
716
|
+
// Enforce nesting depth limit
|
|
717
|
+
const childNestingLevel = nestingLevel + 1;
|
|
718
|
+
if (childNestingLevel > MAX_NESTING_LEVEL) {
|
|
719
|
+
ctx.traceInfo(`[orch] spawn_agent denied: nesting level ${nestingLevel} is at max (${MAX_NESTING_LEVEL})`);
|
|
720
|
+
yield versionedContinueAsNew(continueInput({
|
|
721
|
+
prompt: `[SYSTEM: spawn_agent failed — you are already at nesting level ${nestingLevel} (max ${MAX_NESTING_LEVEL}). ` +
|
|
722
|
+
`Sub-agents at this depth cannot spawn further sub-agents. Handle the task directly instead.]`,
|
|
723
|
+
}));
|
|
724
|
+
return "";
|
|
725
|
+
}
|
|
726
|
+
// Enforce max sub-agents
|
|
727
|
+
const activeCount = subAgents.filter(a => a.status === "running").length;
|
|
728
|
+
if (activeCount >= MAX_SUB_AGENTS) {
|
|
729
|
+
ctx.traceInfo(`[orch] spawn_agent denied: ${activeCount}/${MAX_SUB_AGENTS} agents running`);
|
|
730
|
+
yield versionedContinueAsNew(continueInput({
|
|
731
|
+
prompt: `[SYSTEM: spawn_agent failed — you already have ${activeCount} running sub-agents (max ${MAX_SUB_AGENTS}). ` +
|
|
732
|
+
`Wait for some to complete before spawning more.]`,
|
|
733
|
+
}));
|
|
734
|
+
return "";
|
|
735
|
+
}
|
|
736
|
+
// ─── Resolve agent config if agent_name is provided ───
|
|
737
|
+
let agentTask = result.task;
|
|
738
|
+
let agentSystemMessage = result.systemMessage;
|
|
739
|
+
let agentToolNames = result.toolNames;
|
|
740
|
+
let agentModel = result.model;
|
|
741
|
+
let agentIsSystem = false;
|
|
742
|
+
let agentTitle;
|
|
743
|
+
let agentId;
|
|
744
|
+
let agentSplash;
|
|
745
|
+
let resolvedAgentName = result.agentName;
|
|
746
|
+
const applyAgentDef = (agentDef, useDefinitionDefaults = false) => {
|
|
747
|
+
agentTask = useDefinitionDefaults
|
|
748
|
+
? (agentDef.initialPrompt || `You are the ${agentDef.name} agent. Begin your work.`)
|
|
749
|
+
: (result.task || agentDef.initialPrompt || `You are the ${agentDef.name} agent. Begin your work.`);
|
|
750
|
+
agentSystemMessage = useDefinitionDefaults
|
|
751
|
+
? ({ mode: "replace", content: agentDef.prompt })
|
|
752
|
+
: (result.systemMessage ?? { mode: "replace", content: agentDef.prompt });
|
|
753
|
+
agentToolNames = useDefinitionDefaults
|
|
754
|
+
? (agentDef.tools ?? undefined)
|
|
755
|
+
: (result.toolNames ?? agentDef.tools ?? undefined);
|
|
756
|
+
agentIsSystem = agentDef.system ?? false;
|
|
757
|
+
agentTitle = agentDef.title;
|
|
758
|
+
agentId = agentDef.id ?? resolvedAgentName;
|
|
759
|
+
agentSplash = agentDef.splash;
|
|
760
|
+
};
|
|
761
|
+
if (!resolvedAgentName && input.isSystem && agentTask) {
|
|
762
|
+
const titleMatch = agentTask.match(/You are the \*{0,2}([^*\n]+?Agent)\*{0,2}/i);
|
|
763
|
+
const inferredLookup = titleMatch?.[1]?.trim();
|
|
764
|
+
if (inferredLookup) {
|
|
765
|
+
const inferredDef = yield manager.resolveAgentConfig(inferredLookup);
|
|
766
|
+
if (inferredDef?.system && inferredDef?.parent) {
|
|
767
|
+
resolvedAgentName = inferredDef.id ?? inferredDef.name;
|
|
768
|
+
ctx.traceInfo(`[orch] normalized custom system spawn to named agent: ${resolvedAgentName}`);
|
|
769
|
+
applyAgentDef(inferredDef, true);
|
|
770
|
+
}
|
|
771
|
+
}
|
|
772
|
+
}
|
|
773
|
+
if (resolvedAgentName) {
|
|
774
|
+
ctx.traceInfo(`[orch] resolving agent config for: ${resolvedAgentName}`);
|
|
775
|
+
const agentDef = yield manager.resolveAgentConfig(resolvedAgentName);
|
|
776
|
+
if (!agentDef) {
|
|
777
|
+
yield versionedContinueAsNew(continueInput({
|
|
778
|
+
prompt: `[SYSTEM: spawn_agent failed — agent "${resolvedAgentName}" not found. Use list_agents to see available agents.]`,
|
|
779
|
+
}));
|
|
780
|
+
return "";
|
|
781
|
+
}
|
|
782
|
+
applyAgentDef(agentDef, resolvedAgentName !== result.agentName);
|
|
783
|
+
}
|
|
784
|
+
// If the parent is a system session, propagate isSystem to children
|
|
785
|
+
if (input.isSystem) {
|
|
786
|
+
agentIsSystem = true;
|
|
787
|
+
}
|
|
788
|
+
// Auto-detect title for custom spawns by system sessions:
|
|
789
|
+
// If the LLM didn't use agent_name, try to extract a reasonable title
|
|
790
|
+
// from the task or system_message rather than showing "System Agent".
|
|
791
|
+
if (!agentTitle && agentIsSystem) {
|
|
792
|
+
const text = agentTask || "";
|
|
793
|
+
// Look for "You are the **XYZ Agent**" or "You are the XYZ Agent" patterns
|
|
794
|
+
const titleMatch = text.match(/You are the \*{0,2}([^*\n]+?)\*{0,2}\s*[—–-]/i)
|
|
795
|
+
|| text.match(/You are the \*{0,2}([^*\n]+?Agent)\*{0,2}/i);
|
|
796
|
+
if (titleMatch) {
|
|
797
|
+
agentTitle = titleMatch[1].trim();
|
|
798
|
+
}
|
|
799
|
+
}
|
|
800
|
+
ctx.traceInfo(`[orch] spawning sub-agent via SDK: task="${agentTask.slice(0, 80)}" model=${agentModel || "inherit"} agent=${resolvedAgentName || "custom"} nestingLevel=${childNestingLevel}`);
|
|
801
|
+
// Build child config — inherit parent's config with optional overrides
|
|
802
|
+
const childConfig = {
|
|
803
|
+
...config,
|
|
804
|
+
...(agentModel ? { model: agentModel } : {}),
|
|
805
|
+
...(agentSystemMessage ? { systemMessage: agentSystemMessage } : {}),
|
|
806
|
+
...(agentToolNames ? { toolNames: agentToolNames } : {}),
|
|
807
|
+
};
|
|
808
|
+
// Inject sub-agent identity into the child's system message so the LLM
|
|
809
|
+
// knows it's a sub-agent, what its task is, and that its output will be
|
|
810
|
+
// forwarded to the parent automatically.
|
|
811
|
+
const parentSystemMsg = typeof childConfig.systemMessage === "string"
|
|
812
|
+
? childConfig.systemMessage
|
|
813
|
+
: childConfig.systemMessage?.content ?? "";
|
|
814
|
+
const canSpawnMore = childNestingLevel < MAX_NESTING_LEVEL;
|
|
815
|
+
const subAgentPreamble = `[SUB-AGENT CONTEXT]\n` +
|
|
816
|
+
`You are a sub-agent spawned by a parent session (ID: session-${input.sessionId}).\n` +
|
|
817
|
+
`Your nesting level: ${childNestingLevel} (max: ${MAX_NESTING_LEVEL}).\n` +
|
|
818
|
+
`Your task: "${agentTask.slice(0, 500)}"\n\n` +
|
|
819
|
+
`Instructions:\n` +
|
|
820
|
+
`- Focus exclusively on your assigned task.\n` +
|
|
821
|
+
`- Your final response will be automatically forwarded to the parent agent.\n` +
|
|
822
|
+
`- Be thorough but concise — the parent will synthesize results from multiple agents.\n` +
|
|
823
|
+
`- Do NOT ask the user for input — you are autonomous.\n` +
|
|
824
|
+
`- When your task is complete, provide a clear summary of your findings/results.\n` +
|
|
825
|
+
`- If you write any files with write_artifact, you MUST also call export_artifact and include the artifact:// link in your response.\n` +
|
|
826
|
+
`- For ANY waiting, sleeping, delaying, or scheduling, you MUST use the \`wait\` tool. ` +
|
|
827
|
+
`NEVER use setTimeout, sleep, setInterval, cron, or any other timing mechanism. ` +
|
|
828
|
+
`The wait tool is durable and survives process restarts.\n` +
|
|
829
|
+
(canSpawnMore
|
|
830
|
+
? `- You CAN spawn your own sub-agents (you have ${MAX_NESTING_LEVEL - childNestingLevel} level(s) remaining). ` +
|
|
831
|
+
`Use them for parallel independent tasks.\n`
|
|
832
|
+
: `- You CANNOT spawn sub-agents — you are at the maximum nesting depth. Handle everything directly.\n`);
|
|
833
|
+
childConfig.systemMessage = subAgentPreamble + (parentSystemMsg ? "\n\n" + parentSystemMsg : "");
|
|
834
|
+
// Use the PilotSwarmClient SDK to create and start the child session.
|
|
835
|
+
// The activity generates a random UUID for the child session ID and returns it.
|
|
836
|
+
// This handles: CMS registration (with parentSessionId), orchestration startup,
|
|
837
|
+
// and initial task prompt — all through the standard SDK path.
|
|
838
|
+
let childSessionId;
|
|
839
|
+
try {
|
|
840
|
+
childSessionId = yield manager.spawnChildSession(input.sessionId, childConfig, agentTask, childNestingLevel, agentIsSystem, agentTitle, agentId, agentSplash);
|
|
841
|
+
}
|
|
842
|
+
catch (err) {
|
|
843
|
+
ctx.traceInfo(`[orch] spawnChildSession failed: ${err.message}`);
|
|
844
|
+
yield versionedContinueAsNew(continueInput({
|
|
845
|
+
prompt: `[SYSTEM: spawn_agent failed: ${err.message}]`,
|
|
846
|
+
}));
|
|
847
|
+
return "";
|
|
848
|
+
}
|
|
849
|
+
const childOrchId = `session-${childSessionId}`;
|
|
850
|
+
// Track the sub-agent
|
|
851
|
+
subAgents.push({
|
|
852
|
+
orchId: childOrchId,
|
|
853
|
+
sessionId: childSessionId,
|
|
854
|
+
task: agentTask.slice(0, 500),
|
|
855
|
+
status: "running",
|
|
856
|
+
});
|
|
857
|
+
// Feed confirmation back to the LLM
|
|
858
|
+
const spawnMsg = `[SYSTEM: Sub-agent spawned successfully.\n` +
|
|
859
|
+
` Agent ID: ${childOrchId}\n` +
|
|
860
|
+
` ${resolvedAgentName ? `Agent: ${resolvedAgentName}\n ` : ``}Task: "${agentTask.slice(0, 200)}"\n` +
|
|
861
|
+
` The agent is now running autonomously. Use check_agents to monitor progress, ` +
|
|
862
|
+
`message_agent to send instructions. To wait for completion, use wait + check_agents ` +
|
|
863
|
+
`in a loop (choose an appropriate interval) so you can report progress to the user.]`;
|
|
864
|
+
yield versionedContinueAsNew(continueInput({ prompt: spawnMsg }));
|
|
865
|
+
return "";
|
|
866
|
+
}
|
|
867
|
+
case "message_agent": {
|
|
868
|
+
const targetOrchId = result.agentId;
|
|
869
|
+
const agentEntry = subAgents.find(a => a.orchId === targetOrchId);
|
|
870
|
+
if (!agentEntry) {
|
|
871
|
+
ctx.traceInfo(`[orch] message_agent: unknown agent ${targetOrchId}`);
|
|
872
|
+
yield versionedContinueAsNew(continueInput({
|
|
873
|
+
prompt: `[SYSTEM: message_agent failed — agent "${targetOrchId}" not found. ` +
|
|
874
|
+
`Known agents: ${subAgents.map(a => a.orchId).join(", ") || "none"}]`,
|
|
875
|
+
}));
|
|
876
|
+
return "";
|
|
877
|
+
}
|
|
878
|
+
ctx.traceInfo(`[orch] message_agent via SDK: ${agentEntry.sessionId} msg="${result.message.slice(0, 60)}"`);
|
|
879
|
+
try {
|
|
880
|
+
yield manager.sendToSession(agentEntry.sessionId, result.message);
|
|
881
|
+
}
|
|
882
|
+
catch (err) {
|
|
883
|
+
ctx.traceInfo(`[orch] message_agent failed: ${err.message}`);
|
|
884
|
+
yield versionedContinueAsNew(continueInput({
|
|
885
|
+
prompt: `[SYSTEM: message_agent failed: ${err.message}]`,
|
|
886
|
+
}));
|
|
887
|
+
return "";
|
|
888
|
+
}
|
|
889
|
+
yield versionedContinueAsNew(continueInput({
|
|
890
|
+
prompt: `[SYSTEM: Message sent to sub-agent ${targetOrchId}: "${result.message.slice(0, 200)}"]`,
|
|
891
|
+
}));
|
|
892
|
+
return "";
|
|
893
|
+
}
|
|
894
|
+
case "check_agents": {
|
|
895
|
+
ctx.traceInfo(`[orch] check_agents: ${subAgents.length} agents tracked`);
|
|
896
|
+
if (subAgents.length === 0) {
|
|
897
|
+
yield versionedContinueAsNew(continueInput({
|
|
898
|
+
prompt: `[SYSTEM: No sub-agents have been spawned yet.]`,
|
|
899
|
+
}));
|
|
900
|
+
return "";
|
|
901
|
+
}
|
|
902
|
+
// Get fresh status for each agent via the SDK
|
|
903
|
+
const statusLines = [];
|
|
904
|
+
for (const agent of subAgents) {
|
|
905
|
+
try {
|
|
906
|
+
const rawStatus = yield manager.getSessionStatus(agent.sessionId);
|
|
907
|
+
const parsed = JSON.parse(rawStatus);
|
|
908
|
+
// Update local tracking
|
|
909
|
+
// Sub-agents go "idle" when their turn completes
|
|
910
|
+
if (parsed.status === "completed" || parsed.status === "failed" || parsed.status === "idle") {
|
|
911
|
+
agent.status = parsed.status === "failed" ? "failed" : "completed";
|
|
912
|
+
if (parsed.result)
|
|
913
|
+
agent.result = parsed.result.slice(0, 1000);
|
|
914
|
+
}
|
|
915
|
+
statusLines.push(` - Agent ${agent.orchId}\n` +
|
|
916
|
+
` Task: "${agent.task.slice(0, 120)}"\n` +
|
|
917
|
+
` Status: ${parsed.status}\n` +
|
|
918
|
+
` Iterations: ${parsed.iterations ?? 0}\n` +
|
|
919
|
+
` Output: ${parsed.result ?? "(no output yet)"}`);
|
|
920
|
+
}
|
|
921
|
+
catch (err) {
|
|
922
|
+
statusLines.push(` - Agent ${agent.orchId}\n` +
|
|
923
|
+
` Task: "${agent.task.slice(0, 120)}"\n` +
|
|
924
|
+
` Status: unknown (error: ${err.message})`);
|
|
925
|
+
}
|
|
926
|
+
}
|
|
927
|
+
yield versionedContinueAsNew(continueInput({
|
|
928
|
+
prompt: `[SYSTEM: Sub-agent status report (${subAgents.length} agents):\n${statusLines.join("\n")}]`,
|
|
929
|
+
}));
|
|
930
|
+
return "";
|
|
931
|
+
}
|
|
932
|
+
case "list_sessions": {
|
|
933
|
+
ctx.traceInfo(`[orch] list_sessions`);
|
|
934
|
+
const rawSessions = yield manager.listSessions();
|
|
935
|
+
const sessions = JSON.parse(rawSessions);
|
|
936
|
+
const lines = sessions.map((s) => ` - ${s.sessionId}${s.sessionId === input.sessionId ? " (this session)" : ""}\n` +
|
|
937
|
+
` Title: ${s.title ?? "(untitled)"}\n` +
|
|
938
|
+
` Status: ${s.status}, Iterations: ${s.iterations ?? 0}\n` +
|
|
939
|
+
` Parent: ${s.parentSessionId ?? "none"}`);
|
|
940
|
+
yield versionedContinueAsNew(continueInput({
|
|
941
|
+
prompt: `[SYSTEM: Active sessions (${sessions.length}):\n${lines.join("\n")}]`,
|
|
942
|
+
}));
|
|
943
|
+
return "";
|
|
944
|
+
}
|
|
945
|
+
case "wait_for_agents": {
|
|
946
|
+
let targetIds = result.agentIds;
|
|
947
|
+
// If empty, wait for all running agents
|
|
948
|
+
if (!targetIds || targetIds.length === 0) {
|
|
949
|
+
targetIds = subAgents.filter(a => a.status === "running").map(a => a.orchId);
|
|
950
|
+
}
|
|
951
|
+
if (targetIds.length === 0) {
|
|
952
|
+
ctx.traceInfo(`[orch] wait_for_agents: no running agents to wait for`);
|
|
953
|
+
yield versionedContinueAsNew(continueInput({
|
|
954
|
+
prompt: `[SYSTEM: No running sub-agents to wait for. All agents have already completed.]`,
|
|
955
|
+
}));
|
|
956
|
+
return "";
|
|
957
|
+
}
|
|
958
|
+
ctx.traceInfo(`[orch] wait_for_agents: waiting for ${targetIds.length} agents`);
|
|
959
|
+
setStatus(ctx, "running", {
|
|
960
|
+
iteration,
|
|
961
|
+
waitingForAgents: targetIds,
|
|
962
|
+
});
|
|
963
|
+
// Event-driven wait: children send updates to the parent's "messages"
|
|
964
|
+
// queue via sendToSession. We race messages vs a fallback poll timer.
|
|
965
|
+
// Child updates arrive as "[CHILD_UPDATE from=... type=...]" messages.
|
|
966
|
+
const POLL_INTERVAL_MS = 30_000; // 30s fallback poll (event-driven, so rarely needed)
|
|
967
|
+
const MAX_WAIT_ITERATIONS = 360;
|
|
968
|
+
for (let waitIter = 0; waitIter < MAX_WAIT_ITERATIONS; waitIter++) {
|
|
969
|
+
// Check if all targets are done (from local tracking)
|
|
970
|
+
const stillRunning = targetIds.filter(id => {
|
|
971
|
+
const agent = subAgents.find(a => a.orchId === id);
|
|
972
|
+
return agent && agent.status === "running";
|
|
973
|
+
});
|
|
974
|
+
if (stillRunning.length === 0)
|
|
975
|
+
break;
|
|
976
|
+
// Race: message (child update or user) vs fallback poll timer
|
|
977
|
+
const msg = ctx.dequeueEvent("messages");
|
|
978
|
+
const pollTimer = ctx.scheduleTimer(POLL_INTERVAL_MS);
|
|
979
|
+
const waitRace = yield ctx.race(msg, pollTimer);
|
|
980
|
+
if (waitRace.index === 0) {
|
|
981
|
+
// Message arrived — could be a child update or a user message
|
|
982
|
+
const msgData = typeof waitRace.value === "string"
|
|
983
|
+
? JSON.parse(waitRace.value) : (waitRace.value ?? {});
|
|
984
|
+
// Check if it's a child update (sent by sendToSession from child orch)
|
|
985
|
+
const childUpdateMatch = typeof msgData.prompt === "string"
|
|
986
|
+
&& msgData.prompt.match(/^\[CHILD_UPDATE from=(\S+) type=(\S+)/);
|
|
987
|
+
if (childUpdateMatch) {
|
|
988
|
+
const childSessionId = childUpdateMatch[1];
|
|
989
|
+
const updateType = childUpdateMatch[2].replace(/\]$/, "");
|
|
990
|
+
const content = msgData.prompt.split("\n").slice(1).join("\n").trim();
|
|
991
|
+
ctx.traceInfo(`[orch] wait_for_agents: child update from=${childSessionId} type=${updateType}`);
|
|
992
|
+
const agent = subAgents.find(a => a.sessionId === childSessionId);
|
|
993
|
+
if (agent) {
|
|
994
|
+
if (content)
|
|
995
|
+
agent.result = content.slice(0, 2000);
|
|
996
|
+
// Check via SDK if done (the update type alone isn't authoritative
|
|
997
|
+
// since "completed" means turn completed, not necessarily finished)
|
|
998
|
+
try {
|
|
999
|
+
const rawStatus = yield manager.getSessionStatus(agent.sessionId);
|
|
1000
|
+
const parsed = JSON.parse(rawStatus);
|
|
1001
|
+
// Sub-agents go "idle" when their turn completes (they have no user to wait for)
|
|
1002
|
+
if (parsed.status === "completed" || parsed.status === "failed" || parsed.status === "idle") {
|
|
1003
|
+
agent.status = parsed.status === "failed" ? "failed" : "completed";
|
|
1004
|
+
if (parsed.result)
|
|
1005
|
+
agent.result = parsed.result.slice(0, 2000);
|
|
1006
|
+
}
|
|
1007
|
+
}
|
|
1008
|
+
catch { }
|
|
1009
|
+
}
|
|
1010
|
+
continue;
|
|
1011
|
+
}
|
|
1012
|
+
// Not a child update — it's a user message interrupting the wait
|
|
1013
|
+
if (msgData.prompt) {
|
|
1014
|
+
ctx.traceInfo(`[orch] wait_for_agents interrupted by user: "${msgData.prompt.slice(0, 60)}"`);
|
|
1015
|
+
yield versionedContinueAsNew(continueInput({
|
|
1016
|
+
prompt: msgData.prompt,
|
|
1017
|
+
}));
|
|
1018
|
+
return "";
|
|
1019
|
+
}
|
|
1020
|
+
}
|
|
1021
|
+
else {
|
|
1022
|
+
// Timer fired — fallback poll via SDK for any agents we missed
|
|
1023
|
+
ctx.traceInfo(`[orch] wait_for_agents: fallback poll, checking ${stillRunning.length} agents`);
|
|
1024
|
+
for (const targetId of stillRunning) {
|
|
1025
|
+
const agent = subAgents.find(a => a.orchId === targetId);
|
|
1026
|
+
if (!agent || agent.status !== "running")
|
|
1027
|
+
continue;
|
|
1028
|
+
try {
|
|
1029
|
+
const rawStatus = yield manager.getSessionStatus(agent.sessionId);
|
|
1030
|
+
const parsed = JSON.parse(rawStatus);
|
|
1031
|
+
// Sub-agents go "idle" when their turn completes
|
|
1032
|
+
if (parsed.status === "completed" || parsed.status === "failed" || parsed.status === "idle") {
|
|
1033
|
+
agent.status = parsed.status === "failed" ? "failed" : "completed";
|
|
1034
|
+
if (parsed.result)
|
|
1035
|
+
agent.result = parsed.result.slice(0, 2000);
|
|
1036
|
+
}
|
|
1037
|
+
}
|
|
1038
|
+
catch { }
|
|
1039
|
+
}
|
|
1040
|
+
}
|
|
1041
|
+
}
|
|
1042
|
+
// Build results summary
|
|
1043
|
+
const resultLines = [];
|
|
1044
|
+
for (const targetId of targetIds) {
|
|
1045
|
+
const agent = subAgents.find(a => a.orchId === targetId);
|
|
1046
|
+
if (!agent)
|
|
1047
|
+
continue;
|
|
1048
|
+
resultLines.push(` - Agent ${agent.orchId}\n` +
|
|
1049
|
+
` Task: "${agent.task.slice(0, 120)}"\n` +
|
|
1050
|
+
` Status: ${agent.status}\n` +
|
|
1051
|
+
` Result: ${agent.result ?? "(no result)"}`);
|
|
1052
|
+
}
|
|
1053
|
+
yield versionedContinueAsNew(continueInput({
|
|
1054
|
+
prompt: `[SYSTEM: Sub-agents completed:\n${resultLines.join("\n")}]`,
|
|
1055
|
+
}));
|
|
1056
|
+
return "";
|
|
1057
|
+
}
|
|
1058
|
+
case "complete_agent": {
|
|
1059
|
+
const targetOrchId = result.agentId;
|
|
1060
|
+
const agentEntry = subAgents.find(a => a.orchId === targetOrchId);
|
|
1061
|
+
if (!agentEntry) {
|
|
1062
|
+
ctx.traceInfo(`[orch] complete_agent: unknown agent ${targetOrchId}`);
|
|
1063
|
+
yield versionedContinueAsNew(continueInput({
|
|
1064
|
+
prompt: `[SYSTEM: complete_agent failed — agent "${targetOrchId}" not found. ` +
|
|
1065
|
+
`Known agents: ${subAgents.map(a => a.orchId).join(", ") || "none"}]`,
|
|
1066
|
+
}));
|
|
1067
|
+
return "";
|
|
1068
|
+
}
|
|
1069
|
+
ctx.traceInfo(`[orch] complete_agent: sending /done to ${agentEntry.sessionId}`);
|
|
1070
|
+
try {
|
|
1071
|
+
// Send a /done command to the child's orchestration
|
|
1072
|
+
const cmdId = `done-${iteration}`;
|
|
1073
|
+
yield manager.sendCommandToSession(agentEntry.sessionId, { type: "cmd", cmd: "done", id: cmdId, args: { reason: "Completed by parent" } });
|
|
1074
|
+
agentEntry.status = "completed";
|
|
1075
|
+
}
|
|
1076
|
+
catch (err) {
|
|
1077
|
+
ctx.traceInfo(`[orch] complete_agent failed: ${err.message}`);
|
|
1078
|
+
yield versionedContinueAsNew(continueInput({
|
|
1079
|
+
prompt: `[SYSTEM: complete_agent failed: ${err.message}]`,
|
|
1080
|
+
}));
|
|
1081
|
+
return "";
|
|
1082
|
+
}
|
|
1083
|
+
yield versionedContinueAsNew(continueInput({
|
|
1084
|
+
prompt: `[SYSTEM: Sub-agent ${targetOrchId} has been completed gracefully.]`,
|
|
1085
|
+
}));
|
|
1086
|
+
return "";
|
|
1087
|
+
}
|
|
1088
|
+
case "cancel_agent": {
|
|
1089
|
+
const targetOrchId = result.agentId;
|
|
1090
|
+
const agentEntry = subAgents.find(a => a.orchId === targetOrchId);
|
|
1091
|
+
if (!agentEntry) {
|
|
1092
|
+
ctx.traceInfo(`[orch] cancel_agent: unknown agent ${targetOrchId}`);
|
|
1093
|
+
yield versionedContinueAsNew(continueInput({
|
|
1094
|
+
prompt: `[SYSTEM: cancel_agent failed — agent "${targetOrchId}" not found. ` +
|
|
1095
|
+
`Known agents: ${subAgents.map(a => a.orchId).join(", ") || "none"}]`,
|
|
1096
|
+
}));
|
|
1097
|
+
return "";
|
|
1098
|
+
}
|
|
1099
|
+
const cancelReason = result.reason ?? "Cancelled by parent";
|
|
1100
|
+
ctx.traceInfo(`[orch] cancel_agent: cancelling ${agentEntry.sessionId} reason="${cancelReason}"`);
|
|
1101
|
+
try {
|
|
1102
|
+
// Cascade: cancel all descendants of the target agent first
|
|
1103
|
+
const descendants = yield manager.getDescendantSessionIds(agentEntry.sessionId);
|
|
1104
|
+
if (descendants.length > 0) {
|
|
1105
|
+
ctx.traceInfo(`[orch] cancel_agent: cascading cancel to ${descendants.length} descendant(s)`);
|
|
1106
|
+
for (const descId of descendants) {
|
|
1107
|
+
try {
|
|
1108
|
+
yield manager.cancelSession(descId, `Ancestor ${agentEntry.sessionId} cancelled: ${cancelReason}`);
|
|
1109
|
+
}
|
|
1110
|
+
catch (err) {
|
|
1111
|
+
ctx.traceInfo(`[orch] cancel_agent: failed to cancel descendant ${descId}: ${err.message} (non-fatal)`);
|
|
1112
|
+
}
|
|
1113
|
+
}
|
|
1114
|
+
}
|
|
1115
|
+
yield manager.cancelSession(agentEntry.sessionId, cancelReason);
|
|
1116
|
+
agentEntry.status = "cancelled";
|
|
1117
|
+
}
|
|
1118
|
+
catch (err) {
|
|
1119
|
+
ctx.traceInfo(`[orch] cancel_agent failed: ${err.message}`);
|
|
1120
|
+
yield versionedContinueAsNew(continueInput({
|
|
1121
|
+
prompt: `[SYSTEM: cancel_agent failed: ${err.message}]`,
|
|
1122
|
+
}));
|
|
1123
|
+
return "";
|
|
1124
|
+
}
|
|
1125
|
+
yield versionedContinueAsNew(continueInput({
|
|
1126
|
+
prompt: `[SYSTEM: Sub-agent ${targetOrchId} has been cancelled.${result.reason ? ` Reason: ${result.reason}` : ""}]`,
|
|
1127
|
+
}));
|
|
1128
|
+
return "";
|
|
1129
|
+
}
|
|
1130
|
+
case "delete_agent": {
|
|
1131
|
+
const targetOrchId = result.agentId;
|
|
1132
|
+
const agentEntry = subAgents.find(a => a.orchId === targetOrchId);
|
|
1133
|
+
if (!agentEntry) {
|
|
1134
|
+
ctx.traceInfo(`[orch] delete_agent: unknown agent ${targetOrchId}`);
|
|
1135
|
+
yield versionedContinueAsNew(continueInput({
|
|
1136
|
+
prompt: `[SYSTEM: delete_agent failed — agent "${targetOrchId}" not found. ` +
|
|
1137
|
+
`Known agents: ${subAgents.map(a => a.orchId).join(", ") || "none"}]`,
|
|
1138
|
+
}));
|
|
1139
|
+
return "";
|
|
1140
|
+
}
|
|
1141
|
+
const deleteReason = result.reason ?? "Deleted by parent";
|
|
1142
|
+
ctx.traceInfo(`[orch] delete_agent: deleting ${agentEntry.sessionId} reason="${deleteReason}"`);
|
|
1143
|
+
try {
|
|
1144
|
+
// Cascade: delete all descendants of the target agent first
|
|
1145
|
+
const descendants = yield manager.getDescendantSessionIds(agentEntry.sessionId);
|
|
1146
|
+
if (descendants.length > 0) {
|
|
1147
|
+
ctx.traceInfo(`[orch] delete_agent: cascading delete to ${descendants.length} descendant(s)`);
|
|
1148
|
+
for (const descId of descendants) {
|
|
1149
|
+
try {
|
|
1150
|
+
yield manager.deleteSession(descId, `Ancestor ${agentEntry.sessionId} deleted: ${deleteReason}`);
|
|
1151
|
+
}
|
|
1152
|
+
catch (err) {
|
|
1153
|
+
ctx.traceInfo(`[orch] delete_agent: failed to delete descendant ${descId}: ${err.message} (non-fatal)`);
|
|
1154
|
+
}
|
|
1155
|
+
}
|
|
1156
|
+
}
|
|
1157
|
+
yield manager.deleteSession(agentEntry.sessionId, deleteReason);
|
|
1158
|
+
// Remove from subAgents tracking entirely
|
|
1159
|
+
subAgents = subAgents.filter(a => a.orchId !== targetOrchId);
|
|
1160
|
+
}
|
|
1161
|
+
catch (err) {
|
|
1162
|
+
ctx.traceInfo(`[orch] delete_agent failed: ${err.message}`);
|
|
1163
|
+
yield versionedContinueAsNew(continueInput({
|
|
1164
|
+
prompt: `[SYSTEM: delete_agent failed: ${err.message}]`,
|
|
1165
|
+
}));
|
|
1166
|
+
return "";
|
|
1167
|
+
}
|
|
1168
|
+
yield versionedContinueAsNew(continueInput({
|
|
1169
|
+
prompt: `[SYSTEM: Sub-agent ${targetOrchId} has been deleted.${result.reason ? ` Reason: ${result.reason}` : ""}]`,
|
|
1170
|
+
}));
|
|
1171
|
+
return "";
|
|
1172
|
+
}
|
|
1173
|
+
case "error": {
|
|
1174
|
+
// Treat like an activity failure — retry with backoff.
|
|
1175
|
+
retryCount++;
|
|
1176
|
+
ctx.traceInfo(`[orch] turn returned error (attempt ${retryCount}/${MAX_RETRIES}): ${result.message}`);
|
|
1177
|
+
if (retryCount >= MAX_RETRIES) {
|
|
1178
|
+
ctx.traceInfo(`[orch] max retries exhausted for turn error, waiting for user input`);
|
|
1179
|
+
setStatus(ctx, "error", {
|
|
1180
|
+
iteration,
|
|
1181
|
+
error: `Failed after ${MAX_RETRIES} attempts: ${result.message}`,
|
|
1182
|
+
retriesExhausted: true,
|
|
1183
|
+
});
|
|
1184
|
+
retryCount = 0;
|
|
1185
|
+
continue;
|
|
1186
|
+
}
|
|
1187
|
+
setStatus(ctx, "error", {
|
|
1188
|
+
iteration,
|
|
1189
|
+
error: `${result.message} (retry ${retryCount}/${MAX_RETRIES})`,
|
|
1190
|
+
});
|
|
1191
|
+
const errorRetryDelay = 15 * Math.pow(2, retryCount - 1);
|
|
1192
|
+
ctx.traceInfo(`[orch] retrying in ${errorRetryDelay}s after turn error`);
|
|
1193
|
+
if (blobEnabled) {
|
|
1194
|
+
yield* dehydrateAndReset("error");
|
|
1195
|
+
}
|
|
1196
|
+
yield ctx.scheduleTimer(errorRetryDelay * 1000);
|
|
1197
|
+
yield versionedContinueAsNew(continueInput({
|
|
1198
|
+
prompt,
|
|
1199
|
+
retryCount,
|
|
1200
|
+
needsHydration: blobEnabled ? true : needsHydration,
|
|
1201
|
+
}));
|
|
1202
|
+
return "";
|
|
1203
|
+
}
|
|
1204
|
+
}
|
|
1205
|
+
}
|
|
1206
|
+
}
|
|
1207
|
+
//# sourceMappingURL=orchestration_1_0_9.js.map
|