pilotswarm-sdk 0.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agent-loader.d.ts +61 -0
- package/dist/agent-loader.d.ts.map +1 -0
- package/dist/agent-loader.js +212 -0
- package/dist/agent-loader.js.map +1 -0
- package/dist/artifact-tools.d.ts +31 -0
- package/dist/artifact-tools.d.ts.map +1 -0
- package/dist/artifact-tools.js +190 -0
- package/dist/artifact-tools.js.map +1 -0
- package/dist/blob-store.d.ts +73 -0
- package/dist/blob-store.d.ts.map +1 -0
- package/dist/blob-store.js +220 -0
- package/dist/blob-store.js.map +1 -0
- package/dist/client.d.ts +159 -0
- package/dist/client.d.ts.map +1 -0
- package/dist/client.js +676 -0
- package/dist/client.js.map +1 -0
- package/dist/cms.d.ts +129 -0
- package/dist/cms.d.ts.map +1 -0
- package/dist/cms.js +313 -0
- package/dist/cms.js.map +1 -0
- package/dist/index.d.ts +44 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +42 -0
- package/dist/index.js.map +1 -0
- package/dist/managed-session.d.ts +70 -0
- package/dist/managed-session.d.ts.map +1 -0
- package/dist/managed-session.js +717 -0
- package/dist/managed-session.js.map +1 -0
- package/dist/management-client.d.ts +171 -0
- package/dist/management-client.d.ts.map +1 -0
- package/dist/management-client.js +401 -0
- package/dist/management-client.js.map +1 -0
- package/dist/mcp-loader.d.ts +50 -0
- package/dist/mcp-loader.d.ts.map +1 -0
- package/dist/mcp-loader.js +83 -0
- package/dist/mcp-loader.js.map +1 -0
- package/dist/model-providers.d.ts +143 -0
- package/dist/model-providers.d.ts.map +1 -0
- package/dist/model-providers.js +228 -0
- package/dist/model-providers.js.map +1 -0
- package/dist/orchestration-registry.d.ts +7 -0
- package/dist/orchestration-registry.d.ts.map +1 -0
- package/dist/orchestration-registry.js +49 -0
- package/dist/orchestration-registry.js.map +1 -0
- package/dist/orchestration.d.ts +36 -0
- package/dist/orchestration.d.ts.map +1 -0
- package/dist/orchestration.js +1357 -0
- package/dist/orchestration.js.map +1 -0
- package/dist/orchestration_1_0_0.d.ts +20 -0
- package/dist/orchestration_1_0_0.d.ts.map +1 -0
- package/dist/orchestration_1_0_0.js +497 -0
- package/dist/orchestration_1_0_0.js.map +1 -0
- package/dist/orchestration_1_0_1.d.ts +19 -0
- package/dist/orchestration_1_0_1.d.ts.map +1 -0
- package/dist/orchestration_1_0_1.js +546 -0
- package/dist/orchestration_1_0_1.js.map +1 -0
- package/dist/orchestration_1_0_10.d.ts +36 -0
- package/dist/orchestration_1_0_10.d.ts.map +1 -0
- package/dist/orchestration_1_0_10.js +1253 -0
- package/dist/orchestration_1_0_10.js.map +1 -0
- package/dist/orchestration_1_0_11.d.ts +36 -0
- package/dist/orchestration_1_0_11.d.ts.map +1 -0
- package/dist/orchestration_1_0_11.js +1255 -0
- package/dist/orchestration_1_0_11.js.map +1 -0
- package/dist/orchestration_1_0_12.d.ts +36 -0
- package/dist/orchestration_1_0_12.d.ts.map +1 -0
- package/dist/orchestration_1_0_12.js +1250 -0
- package/dist/orchestration_1_0_12.js.map +1 -0
- package/dist/orchestration_1_0_13.d.ts +36 -0
- package/dist/orchestration_1_0_13.d.ts.map +1 -0
- package/dist/orchestration_1_0_13.js +1260 -0
- package/dist/orchestration_1_0_13.js.map +1 -0
- package/dist/orchestration_1_0_14.d.ts +36 -0
- package/dist/orchestration_1_0_14.d.ts.map +1 -0
- package/dist/orchestration_1_0_14.js +1258 -0
- package/dist/orchestration_1_0_14.js.map +1 -0
- package/dist/orchestration_1_0_15.d.ts +36 -0
- package/dist/orchestration_1_0_15.d.ts.map +1 -0
- package/dist/orchestration_1_0_15.js +1266 -0
- package/dist/orchestration_1_0_15.js.map +1 -0
- package/dist/orchestration_1_0_16.d.ts +36 -0
- package/dist/orchestration_1_0_16.d.ts.map +1 -0
- package/dist/orchestration_1_0_16.js +1275 -0
- package/dist/orchestration_1_0_16.js.map +1 -0
- package/dist/orchestration_1_0_17.d.ts +36 -0
- package/dist/orchestration_1_0_17.d.ts.map +1 -0
- package/dist/orchestration_1_0_17.js +1314 -0
- package/dist/orchestration_1_0_17.js.map +1 -0
- package/dist/orchestration_1_0_18.d.ts +36 -0
- package/dist/orchestration_1_0_18.d.ts.map +1 -0
- package/dist/orchestration_1_0_18.js +1328 -0
- package/dist/orchestration_1_0_18.js.map +1 -0
- package/dist/orchestration_1_0_19.d.ts +36 -0
- package/dist/orchestration_1_0_19.d.ts.map +1 -0
- package/dist/orchestration_1_0_19.js +1324 -0
- package/dist/orchestration_1_0_19.js.map +1 -0
- package/dist/orchestration_1_0_2.d.ts +19 -0
- package/dist/orchestration_1_0_2.d.ts.map +1 -0
- package/dist/orchestration_1_0_2.js +749 -0
- package/dist/orchestration_1_0_2.js.map +1 -0
- package/dist/orchestration_1_0_20.d.ts +36 -0
- package/dist/orchestration_1_0_20.d.ts.map +1 -0
- package/dist/orchestration_1_0_20.js +1347 -0
- package/dist/orchestration_1_0_20.js.map +1 -0
- package/dist/orchestration_1_0_3.d.ts +19 -0
- package/dist/orchestration_1_0_3.d.ts.map +1 -0
- package/dist/orchestration_1_0_3.js +826 -0
- package/dist/orchestration_1_0_3.js.map +1 -0
- package/dist/orchestration_1_0_4.d.ts +19 -0
- package/dist/orchestration_1_0_4.d.ts.map +1 -0
- package/dist/orchestration_1_0_4.js +1020 -0
- package/dist/orchestration_1_0_4.js.map +1 -0
- package/dist/orchestration_1_0_5.d.ts +19 -0
- package/dist/orchestration_1_0_5.d.ts.map +1 -0
- package/dist/orchestration_1_0_5.js +1027 -0
- package/dist/orchestration_1_0_5.js.map +1 -0
- package/dist/orchestration_1_0_6.d.ts +19 -0
- package/dist/orchestration_1_0_6.d.ts.map +1 -0
- package/dist/orchestration_1_0_6.js +1034 -0
- package/dist/orchestration_1_0_6.js.map +1 -0
- package/dist/orchestration_1_0_7.d.ts +19 -0
- package/dist/orchestration_1_0_7.d.ts.map +1 -0
- package/dist/orchestration_1_0_7.js +1085 -0
- package/dist/orchestration_1_0_7.js.map +1 -0
- package/dist/orchestration_1_0_8.d.ts +36 -0
- package/dist/orchestration_1_0_8.d.ts.map +1 -0
- package/dist/orchestration_1_0_8.js +1106 -0
- package/dist/orchestration_1_0_8.js.map +1 -0
- package/dist/orchestration_1_0_9.d.ts +36 -0
- package/dist/orchestration_1_0_9.d.ts.map +1 -0
- package/dist/orchestration_1_0_9.js +1207 -0
- package/dist/orchestration_1_0_9.js.map +1 -0
- package/dist/prompt-layering.d.ts +16 -0
- package/dist/prompt-layering.d.ts.map +1 -0
- package/dist/prompt-layering.js +60 -0
- package/dist/prompt-layering.js.map +1 -0
- package/dist/resourcemgr-tools.d.ts +27 -0
- package/dist/resourcemgr-tools.d.ts.map +1 -0
- package/dist/resourcemgr-tools.js +638 -0
- package/dist/resourcemgr-tools.js.map +1 -0
- package/dist/session-dumper.d.ts +26 -0
- package/dist/session-dumper.d.ts.map +1 -0
- package/dist/session-dumper.js +272 -0
- package/dist/session-dumper.js.map +1 -0
- package/dist/session-manager.d.ts +152 -0
- package/dist/session-manager.d.ts.map +1 -0
- package/dist/session-manager.js +493 -0
- package/dist/session-manager.js.map +1 -0
- package/dist/session-proxy.d.ts +68 -0
- package/dist/session-proxy.d.ts.map +1 -0
- package/dist/session-proxy.js +665 -0
- package/dist/session-proxy.js.map +1 -0
- package/dist/session-store.d.ts +35 -0
- package/dist/session-store.d.ts.map +1 -0
- package/dist/session-store.js +88 -0
- package/dist/session-store.js.map +1 -0
- package/dist/skills.d.ts +31 -0
- package/dist/skills.d.ts.map +1 -0
- package/dist/skills.js +93 -0
- package/dist/skills.js.map +1 -0
- package/dist/sweeper-tools.d.ts +28 -0
- package/dist/sweeper-tools.d.ts.map +1 -0
- package/dist/sweeper-tools.js +332 -0
- package/dist/sweeper-tools.js.map +1 -0
- package/dist/types.d.ts +498 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +9 -0
- package/dist/types.js.map +1 -0
- package/dist/worker.d.ts +128 -0
- package/dist/worker.d.ts.map +1 -0
- package/dist/worker.js +562 -0
- package/dist/worker.js.map +1 -0
- package/package.json +74 -0
- package/plugins/mgmt/agents/pilotswarm.agent.md +59 -0
- package/plugins/mgmt/agents/resourcemgr.agent.md +111 -0
- package/plugins/mgmt/agents/sweeper.agent.md +67 -0
- package/plugins/mgmt/skills/resourcemgr/SKILL.md +41 -0
- package/plugins/mgmt/skills/resourcemgr/tools.json +1 -0
- package/plugins/mgmt/skills/sweeper/SKILL.md +44 -0
- package/plugins/mgmt/skills/sweeper/tools.json +1 -0
- package/plugins/system/agents/default.agent.md +58 -0
- package/plugins/system/skills/durable-timers/SKILL.md +39 -0
- package/plugins/system/skills/sub-agents/SKILL.md +75 -0
|
@@ -0,0 +1,826 @@
|
|
|
1
|
+
import { createSessionProxy, createSessionManagerProxy } from "./session-proxy.js";
|
|
2
|
+
/**
|
|
3
|
+
* Set custom status as a JSON blob of session state.
|
|
4
|
+
* Clients read this via waitForStatusChange() or getStatus().
|
|
5
|
+
* @internal
|
|
6
|
+
*/
|
|
7
|
+
function setStatus(ctx, status, extra) {
|
|
8
|
+
ctx.setCustomStatus(JSON.stringify({ status, ...extra }));
|
|
9
|
+
}
|
|
10
|
+
/**
|
|
11
|
+
* Long-lived durable session orchestration.
|
|
12
|
+
*
|
|
13
|
+
* One orchestration per copilot session. Uses:
|
|
14
|
+
* - SessionProxy for session-scoped operations (runTurn, dehydrate, hydrate, destroy)
|
|
15
|
+
* - SessionManagerProxy for global operations (listModels)
|
|
16
|
+
* - A single FIFO event queue ("messages") for all client→orchestration communication
|
|
17
|
+
*
|
|
18
|
+
* Main loop:
|
|
19
|
+
* 1. Dequeue message from "messages" queue
|
|
20
|
+
* 2. session.hydrate() if needed
|
|
21
|
+
* 3. session.runTurn(prompt) — returns TurnResult
|
|
22
|
+
* 4. Handle result: completed → idle wait, wait → timer, input → wait for answer
|
|
23
|
+
*
|
|
24
|
+
* @internal
|
|
25
|
+
*/
|
|
26
|
+
export function* durableSessionOrchestration_1_0_3(ctx, input) {
|
|
27
|
+
const dehydrateThreshold = input.dehydrateThreshold ?? 30;
|
|
28
|
+
const idleTimeout = input.idleTimeout ?? 30;
|
|
29
|
+
const inputGracePeriod = input.inputGracePeriod ?? 30;
|
|
30
|
+
const checkpointInterval = input.checkpointInterval ?? -1; // seconds, -1 = disabled
|
|
31
|
+
const rehydrationMessage = input.rehydrationMessage;
|
|
32
|
+
const blobEnabled = input.blobEnabled ?? false;
|
|
33
|
+
let needsHydration = input.needsHydration ?? false;
|
|
34
|
+
let affinityKey = input.affinityKey ?? input.sessionId;
|
|
35
|
+
let iteration = input.iteration ?? 0;
|
|
36
|
+
let config = { ...input.config };
|
|
37
|
+
let retryCount = input.retryCount ?? 0;
|
|
38
|
+
let taskContext = input.taskContext;
|
|
39
|
+
const baseSystemMessage = input.baseSystemMessage ?? config.systemMessage;
|
|
40
|
+
const MAX_RETRIES = 3;
|
|
41
|
+
const MAX_SUB_AGENTS = 5;
|
|
42
|
+
// ─── Sub-agent tracking ──────────────────────────────────
|
|
43
|
+
let subAgents = input.subAgents ? [...input.subAgents] : [];
|
|
44
|
+
const parentOrchId = input.parentOrchId;
|
|
45
|
+
// If we have a captured task context, inject it into the system message
|
|
46
|
+
// so it survives LLM conversation truncation (BasicTruncator never drops system messages).
|
|
47
|
+
if (taskContext) {
|
|
48
|
+
const base = typeof baseSystemMessage === 'string'
|
|
49
|
+
? baseSystemMessage ?? ''
|
|
50
|
+
: baseSystemMessage?.content ?? '';
|
|
51
|
+
config.systemMessage = base + (base ? '\n\n' : '') +
|
|
52
|
+
'[RECURRING TASK]\n' +
|
|
53
|
+
'Original user request (always remember, even if conversation history is truncated):\n"' +
|
|
54
|
+
taskContext + '"';
|
|
55
|
+
}
|
|
56
|
+
// ─── Title summarization timer ───────────────────────────
|
|
57
|
+
// First summarize at iteration 0 + 60s, then every 300s.
|
|
58
|
+
// We track the target timestamp (epoch ms) across continueAsNew.
|
|
59
|
+
// 0 means "schedule on first turn completion".
|
|
60
|
+
let nextSummarizeAt = input.nextSummarizeAt ?? 0;
|
|
61
|
+
// ─── Create proxies ──────────────────────────────────────
|
|
62
|
+
const manager = createSessionManagerProxy(ctx);
|
|
63
|
+
let session = createSessionProxy(ctx, input.sessionId, affinityKey, config);
|
|
64
|
+
// ─── Helper: wrap prompt with resume context after dehydration ──
|
|
65
|
+
function wrapWithResumeContext(userPrompt, extra) {
|
|
66
|
+
const base = rehydrationMessage ??
|
|
67
|
+
`The session was dehydrated and has been rehydrated on a new worker. ` +
|
|
68
|
+
`The LLM conversation history is preserved, but you should acknowledge the context switch. ` +
|
|
69
|
+
`After responding to the user's message below, resume exactly what you were doing before. ` +
|
|
70
|
+
`If you were in the middle of a recurring task, continue it.`;
|
|
71
|
+
const parts = [`[SYSTEM: ${base}`];
|
|
72
|
+
if (extra)
|
|
73
|
+
parts.push(extra);
|
|
74
|
+
parts.push(`]`);
|
|
75
|
+
parts.push(``);
|
|
76
|
+
parts.push(userPrompt);
|
|
77
|
+
return parts.join('\n');
|
|
78
|
+
}
|
|
79
|
+
// ─── Shared continueAsNew input builder ──────────────────
|
|
80
|
+
function continueInput(overrides = {}) {
|
|
81
|
+
return {
|
|
82
|
+
sessionId: input.sessionId,
|
|
83
|
+
config,
|
|
84
|
+
iteration,
|
|
85
|
+
affinityKey,
|
|
86
|
+
needsHydration,
|
|
87
|
+
blobEnabled,
|
|
88
|
+
dehydrateThreshold,
|
|
89
|
+
idleTimeout,
|
|
90
|
+
inputGracePeriod,
|
|
91
|
+
checkpointInterval,
|
|
92
|
+
rehydrationMessage,
|
|
93
|
+
nextSummarizeAt,
|
|
94
|
+
taskContext,
|
|
95
|
+
baseSystemMessage,
|
|
96
|
+
subAgents,
|
|
97
|
+
parentOrchId,
|
|
98
|
+
retryCount: 0, // reset by default; overrides can set it
|
|
99
|
+
...overrides,
|
|
100
|
+
};
|
|
101
|
+
}
|
|
102
|
+
// ─── Helper: dehydrate + reset affinity ──────────────────
|
|
103
|
+
function* dehydrateAndReset(reason) {
|
|
104
|
+
ctx.traceInfo(`[orch] dehydrating session (reason=${reason})`);
|
|
105
|
+
yield session.dehydrate(reason);
|
|
106
|
+
needsHydration = true;
|
|
107
|
+
affinityKey = yield ctx.newGuid();
|
|
108
|
+
session = createSessionProxy(ctx, input.sessionId, affinityKey, config);
|
|
109
|
+
}
|
|
110
|
+
// ─── Helper: checkpoint without releasing pin ────────────
|
|
111
|
+
function* maybeCheckpoint() {
|
|
112
|
+
if (!blobEnabled || checkpointInterval < 0)
|
|
113
|
+
return;
|
|
114
|
+
try {
|
|
115
|
+
ctx.traceInfo(`[orch] checkpoint (iteration=${iteration})`);
|
|
116
|
+
yield session.checkpoint();
|
|
117
|
+
}
|
|
118
|
+
catch (err) {
|
|
119
|
+
ctx.traceInfo(`[orch] checkpoint failed: ${err.message ?? err}`);
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
// ─── Helper: summarize session title if due ──────────────
|
|
123
|
+
const FIRST_SUMMARIZE_DELAY = 60_000; // 1 minute
|
|
124
|
+
const REPEAT_SUMMARIZE_DELAY = 300_000; // 5 minutes
|
|
125
|
+
function* maybeSummarize() {
|
|
126
|
+
const now = yield ctx.utcNow();
|
|
127
|
+
// Schedule first summarize 60s after session start
|
|
128
|
+
if (nextSummarizeAt === 0) {
|
|
129
|
+
nextSummarizeAt = now + FIRST_SUMMARIZE_DELAY;
|
|
130
|
+
return;
|
|
131
|
+
}
|
|
132
|
+
if (now < nextSummarizeAt)
|
|
133
|
+
return;
|
|
134
|
+
// Time to summarize — fire and forget (best effort)
|
|
135
|
+
try {
|
|
136
|
+
ctx.traceInfo(`[orch] summarizing session title`);
|
|
137
|
+
yield manager.summarizeSession(input.sessionId);
|
|
138
|
+
}
|
|
139
|
+
catch (err) {
|
|
140
|
+
ctx.traceInfo(`[orch] summarize failed: ${err.message}`);
|
|
141
|
+
}
|
|
142
|
+
nextSummarizeAt = now + REPEAT_SUMMARIZE_DELAY;
|
|
143
|
+
}
|
|
144
|
+
// ─── Prompt carried from continueAsNew ───────────────────
|
|
145
|
+
let pendingPrompt = input.prompt;
|
|
146
|
+
/** Set by the "completed" handler so the dequeue loop doesn't overwrite it. */
|
|
147
|
+
let lastTurnResult = undefined;
|
|
148
|
+
ctx.traceInfo(`[orch] start: iter=${iteration} pending=${pendingPrompt ? `"${pendingPrompt.slice(0, 40)}"` : 'NONE'} hydrate=${needsHydration} blob=${blobEnabled}`);
|
|
149
|
+
// ─── MAIN LOOP ──────────────────────────────────────────
|
|
150
|
+
while (true) {
|
|
151
|
+
// ① GET NEXT PROMPT
|
|
152
|
+
let prompt = "";
|
|
153
|
+
if (pendingPrompt) {
|
|
154
|
+
prompt = pendingPrompt;
|
|
155
|
+
pendingPrompt = undefined;
|
|
156
|
+
}
|
|
157
|
+
else {
|
|
158
|
+
// If we have a completed turnResult, include it in the idle status
|
|
159
|
+
// so clients can read it via waitForStatusChange. Without this,
|
|
160
|
+
// a bare setStatus("idle") between yields would overwrite it.
|
|
161
|
+
if (lastTurnResult) {
|
|
162
|
+
setStatus(ctx, "idle", { iteration, turnResult: lastTurnResult });
|
|
163
|
+
}
|
|
164
|
+
else {
|
|
165
|
+
setStatus(ctx, "idle", { iteration });
|
|
166
|
+
}
|
|
167
|
+
let gotPrompt = false;
|
|
168
|
+
while (!gotPrompt) {
|
|
169
|
+
// If sub-agents are running, race messages with child_updates
|
|
170
|
+
// so the parent wakes up when a child reports back
|
|
171
|
+
const hasRunningAgents = subAgents.some(a => a.status === "running");
|
|
172
|
+
let msgData;
|
|
173
|
+
if (hasRunningAgents) {
|
|
174
|
+
const userMsg = ctx.dequeueEvent("messages");
|
|
175
|
+
const childUpdate = ctx.dequeueEvent("child_updates");
|
|
176
|
+
const raceResult = yield ctx.race(userMsg, childUpdate);
|
|
177
|
+
if (raceResult.index === 1) {
|
|
178
|
+
// Child update arrived — process it
|
|
179
|
+
const childData = typeof raceResult.value === "string"
|
|
180
|
+
? JSON.parse(raceResult.value) : raceResult.value;
|
|
181
|
+
ctx.traceInfo(`[orch] child_update: child=${childData.childOrchId} type=${childData.type}`);
|
|
182
|
+
// Update local sub-agent tracking
|
|
183
|
+
const agent = subAgents.find((a) => a.orchId === childData.childOrchId);
|
|
184
|
+
if (agent) {
|
|
185
|
+
if (childData.type === "turn_completed") {
|
|
186
|
+
agent.result = (childData.content ?? "").slice(0, 2000);
|
|
187
|
+
// Check if the child orchestration actually finished
|
|
188
|
+
// (a "turn_completed" from a child that will keep going is intermediate)
|
|
189
|
+
}
|
|
190
|
+
if (childData.type === "finished") {
|
|
191
|
+
agent.status = "completed";
|
|
192
|
+
agent.result = (childData.content ?? "").slice(0, 2000);
|
|
193
|
+
}
|
|
194
|
+
if (childData.type === "failed") {
|
|
195
|
+
agent.status = "failed";
|
|
196
|
+
agent.result = childData.error ?? "unknown error";
|
|
197
|
+
}
|
|
198
|
+
}
|
|
199
|
+
// Feed child update to the parent LLM so it can react
|
|
200
|
+
const childSummary = agent
|
|
201
|
+
? `Agent ${agent.orchId} (task: "${agent.task.slice(0, 100)}"): ${childData.type} — ${(childData.content ?? "").slice(0, 500)}`
|
|
202
|
+
: `Unknown agent ${childData.childOrchId}: ${childData.type}`;
|
|
203
|
+
prompt = `[SYSTEM: Sub-agent update received:\n ${childSummary}]`;
|
|
204
|
+
gotPrompt = true;
|
|
205
|
+
lastTurnResult = undefined;
|
|
206
|
+
continue;
|
|
207
|
+
}
|
|
208
|
+
// User message won the race
|
|
209
|
+
msgData = typeof raceResult.value === "string"
|
|
210
|
+
? JSON.parse(raceResult.value) : (raceResult.value ?? {});
|
|
211
|
+
}
|
|
212
|
+
else {
|
|
213
|
+
const msg = yield ctx.dequeueEvent("messages");
|
|
214
|
+
msgData = typeof msg === "string" ? JSON.parse(msg) : msg;
|
|
215
|
+
}
|
|
216
|
+
// ── Command dispatch ─────────────────────────
|
|
217
|
+
if (msgData.type === "cmd") {
|
|
218
|
+
const cmdMsg = msgData;
|
|
219
|
+
ctx.traceInfo(`[orch-cmd] ${cmdMsg.cmd} id=${cmdMsg.id}`);
|
|
220
|
+
switch (cmdMsg.cmd) {
|
|
221
|
+
case "set_model": {
|
|
222
|
+
const newModel = String(cmdMsg.args?.model || "");
|
|
223
|
+
const oldModel = config.model || "(default)";
|
|
224
|
+
config = { ...config, model: newModel };
|
|
225
|
+
const resp = {
|
|
226
|
+
id: cmdMsg.id,
|
|
227
|
+
cmd: cmdMsg.cmd,
|
|
228
|
+
result: { ok: true, oldModel, newModel },
|
|
229
|
+
};
|
|
230
|
+
setStatus(ctx, "idle", { iteration, cmdResponse: resp });
|
|
231
|
+
yield ctx.continueAsNew(continueInput());
|
|
232
|
+
return "";
|
|
233
|
+
}
|
|
234
|
+
case "list_models": {
|
|
235
|
+
setStatus(ctx, "idle", { iteration, cmdProcessing: cmdMsg.id });
|
|
236
|
+
let models;
|
|
237
|
+
try {
|
|
238
|
+
const raw = yield manager.listModels();
|
|
239
|
+
models = typeof raw === "string" ? JSON.parse(raw) : raw;
|
|
240
|
+
}
|
|
241
|
+
catch (err) {
|
|
242
|
+
const resp = {
|
|
243
|
+
id: cmdMsg.id,
|
|
244
|
+
cmd: cmdMsg.cmd,
|
|
245
|
+
error: err.message || String(err),
|
|
246
|
+
};
|
|
247
|
+
setStatus(ctx, "idle", { iteration, cmdResponse: resp });
|
|
248
|
+
continue;
|
|
249
|
+
}
|
|
250
|
+
const resp = {
|
|
251
|
+
id: cmdMsg.id,
|
|
252
|
+
cmd: cmdMsg.cmd,
|
|
253
|
+
result: { models, currentModel: config.model },
|
|
254
|
+
};
|
|
255
|
+
setStatus(ctx, "idle", { iteration, cmdResponse: resp });
|
|
256
|
+
continue;
|
|
257
|
+
}
|
|
258
|
+
case "get_info": {
|
|
259
|
+
const resp = {
|
|
260
|
+
id: cmdMsg.id,
|
|
261
|
+
cmd: cmdMsg.cmd,
|
|
262
|
+
result: {
|
|
263
|
+
model: config.model || "(default)",
|
|
264
|
+
iteration,
|
|
265
|
+
sessionId: input.sessionId,
|
|
266
|
+
affinityKey: affinityKey?.slice(0, 8),
|
|
267
|
+
needsHydration,
|
|
268
|
+
blobEnabled,
|
|
269
|
+
},
|
|
270
|
+
};
|
|
271
|
+
setStatus(ctx, "idle", { iteration, cmdResponse: resp });
|
|
272
|
+
continue;
|
|
273
|
+
}
|
|
274
|
+
default: {
|
|
275
|
+
const resp = {
|
|
276
|
+
id: cmdMsg.id,
|
|
277
|
+
cmd: cmdMsg.cmd,
|
|
278
|
+
error: `Unknown command: ${cmdMsg.cmd}`,
|
|
279
|
+
};
|
|
280
|
+
setStatus(ctx, "idle", { iteration, cmdResponse: resp });
|
|
281
|
+
continue;
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
prompt = msgData.prompt;
|
|
286
|
+
gotPrompt = true;
|
|
287
|
+
lastTurnResult = undefined; // Clear after new prompt arrives
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
// If the session needs hydration, the LLM lost in-memory context.
|
|
291
|
+
// Wrap the user's prompt with resume instructions so the LLM picks up where it left off.
|
|
292
|
+
if (needsHydration && blobEnabled && prompt) {
|
|
293
|
+
prompt = wrapWithResumeContext(prompt);
|
|
294
|
+
}
|
|
295
|
+
ctx.traceInfo(`[turn ${iteration}] session=${input.sessionId} prompt="${prompt.slice(0, 80)}"`);
|
|
296
|
+
// ② HYDRATE if session was dehydrated (with retry)
|
|
297
|
+
if (needsHydration && blobEnabled) {
|
|
298
|
+
let hydrateAttempts = 0;
|
|
299
|
+
while (true) {
|
|
300
|
+
try {
|
|
301
|
+
affinityKey = yield ctx.newGuid();
|
|
302
|
+
session = createSessionProxy(ctx, input.sessionId, affinityKey, config);
|
|
303
|
+
yield session.hydrate();
|
|
304
|
+
needsHydration = false;
|
|
305
|
+
break;
|
|
306
|
+
}
|
|
307
|
+
catch (hydrateErr) {
|
|
308
|
+
hydrateAttempts++;
|
|
309
|
+
const hMsg = hydrateErr.message || String(hydrateErr);
|
|
310
|
+
ctx.traceInfo(`[orch] hydrate FAILED (attempt ${hydrateAttempts}/${MAX_RETRIES}): ${hMsg}`);
|
|
311
|
+
if (hydrateAttempts >= MAX_RETRIES) {
|
|
312
|
+
setStatus(ctx, "error", {
|
|
313
|
+
iteration,
|
|
314
|
+
error: `Hydrate failed after ${MAX_RETRIES} attempts: ${hMsg}`,
|
|
315
|
+
retriesExhausted: true,
|
|
316
|
+
});
|
|
317
|
+
// Can't proceed without hydration — wait for next user message to retry
|
|
318
|
+
break;
|
|
319
|
+
}
|
|
320
|
+
const hydrateDelay = 10 * Math.pow(2, hydrateAttempts - 1);
|
|
321
|
+
setStatus(ctx, "error", {
|
|
322
|
+
iteration,
|
|
323
|
+
error: `Hydrate failed: ${hMsg} (retry ${hydrateAttempts}/${MAX_RETRIES} in ${hydrateDelay}s)`,
|
|
324
|
+
});
|
|
325
|
+
yield ctx.scheduleTimer(hydrateDelay * 1000);
|
|
326
|
+
}
|
|
327
|
+
}
|
|
328
|
+
if (needsHydration)
|
|
329
|
+
continue; // hydrate exhausted retries — go back to dequeue
|
|
330
|
+
}
|
|
331
|
+
// ③ RUN TURN via SessionProxy (with retry on failure)
|
|
332
|
+
setStatus(ctx, "running", { iteration });
|
|
333
|
+
let turnResult;
|
|
334
|
+
try {
|
|
335
|
+
turnResult = yield session.runTurn(prompt);
|
|
336
|
+
}
|
|
337
|
+
catch (err) {
|
|
338
|
+
// Activity failed (e.g. Copilot timeout, network error).
|
|
339
|
+
const errorMsg = err.message || String(err);
|
|
340
|
+
retryCount++;
|
|
341
|
+
ctx.traceInfo(`[orch] runTurn FAILED (attempt ${retryCount}/${MAX_RETRIES}): ${errorMsg}`);
|
|
342
|
+
if (retryCount >= MAX_RETRIES) {
|
|
343
|
+
// Exhausted retries — park in error state but don't crash.
|
|
344
|
+
// The orchestration stays alive and will retry on the next user message.
|
|
345
|
+
ctx.traceInfo(`[orch] max retries exhausted, waiting for user input`);
|
|
346
|
+
setStatus(ctx, "error", {
|
|
347
|
+
iteration,
|
|
348
|
+
error: `Failed after ${MAX_RETRIES} attempts: ${errorMsg}`,
|
|
349
|
+
retriesExhausted: true,
|
|
350
|
+
});
|
|
351
|
+
// Reset retry count and wait for next user message
|
|
352
|
+
retryCount = 0;
|
|
353
|
+
continue;
|
|
354
|
+
}
|
|
355
|
+
setStatus(ctx, "error", {
|
|
356
|
+
iteration,
|
|
357
|
+
error: `${errorMsg} (retry ${retryCount}/${MAX_RETRIES} in 15s)`,
|
|
358
|
+
});
|
|
359
|
+
// Exponential backoff: 15s, 30s, 60s
|
|
360
|
+
const retryDelay = 15 * Math.pow(2, retryCount - 1);
|
|
361
|
+
ctx.traceInfo(`[orch] retrying in ${retryDelay}s`);
|
|
362
|
+
if (blobEnabled) {
|
|
363
|
+
yield* dehydrateAndReset("error");
|
|
364
|
+
}
|
|
365
|
+
yield ctx.scheduleTimer(retryDelay * 1000);
|
|
366
|
+
yield ctx.continueAsNew(continueInput({
|
|
367
|
+
prompt,
|
|
368
|
+
retryCount,
|
|
369
|
+
needsHydration: blobEnabled ? true : needsHydration,
|
|
370
|
+
}));
|
|
371
|
+
return "";
|
|
372
|
+
}
|
|
373
|
+
// Successful activity — reset retry counter
|
|
374
|
+
retryCount = 0;
|
|
375
|
+
const result = typeof turnResult === "string"
|
|
376
|
+
? JSON.parse(turnResult) : turnResult;
|
|
377
|
+
iteration++;
|
|
378
|
+
// Strip events from result before putting in customStatus (events go to CMS, not status)
|
|
379
|
+
const { events: _events, ...statusResult } = result;
|
|
380
|
+
// ── Summarize title if due ──────────────────────────
|
|
381
|
+
yield* maybeSummarize();
|
|
382
|
+
// ④ HANDLE RESULT
|
|
383
|
+
switch (result.type) {
|
|
384
|
+
case "completed":
|
|
385
|
+
ctx.traceInfo(`[response] ${result.content}`);
|
|
386
|
+
// If this is a child orchestration, notify the parent about our completion
|
|
387
|
+
if (parentOrchId) {
|
|
388
|
+
try {
|
|
389
|
+
yield manager.notifyParent(parentOrchId, `session-${input.sessionId}`, input.sessionId, {
|
|
390
|
+
type: "turn_completed",
|
|
391
|
+
content: result.content.slice(0, 2000),
|
|
392
|
+
iteration,
|
|
393
|
+
});
|
|
394
|
+
}
|
|
395
|
+
catch (err) {
|
|
396
|
+
ctx.traceInfo(`[orch] notifyParent failed: ${err.message} (non-fatal)`);
|
|
397
|
+
}
|
|
398
|
+
}
|
|
399
|
+
if (!blobEnabled || idleTimeout < 0) {
|
|
400
|
+
// Store the result so the dequeue-idle setStatus includes it
|
|
401
|
+
lastTurnResult = statusResult;
|
|
402
|
+
// Checkpoint while idle (no dehydration path)
|
|
403
|
+
yield* maybeCheckpoint();
|
|
404
|
+
continue;
|
|
405
|
+
}
|
|
406
|
+
// Race: next message vs idle timeout
|
|
407
|
+
{
|
|
408
|
+
setStatus(ctx, "idle", { iteration, turnResult: statusResult });
|
|
409
|
+
yield* maybeCheckpoint();
|
|
410
|
+
const nextMsg = ctx.dequeueEvent("messages");
|
|
411
|
+
const idleTimer = ctx.scheduleTimer(idleTimeout * 1000);
|
|
412
|
+
const raceResult = yield ctx.race(nextMsg, idleTimer);
|
|
413
|
+
if (raceResult.index === 0) {
|
|
414
|
+
ctx.traceInfo("[session] user responded within idle window");
|
|
415
|
+
const raceMsg = typeof raceResult.value === "string"
|
|
416
|
+
? JSON.parse(raceResult.value) : (raceResult.value ?? {});
|
|
417
|
+
if (raceMsg.prompt) {
|
|
418
|
+
yield ctx.continueAsNew(continueInput({ prompt: raceMsg.prompt }));
|
|
419
|
+
}
|
|
420
|
+
else {
|
|
421
|
+
yield ctx.continueAsNew(continueInput());
|
|
422
|
+
}
|
|
423
|
+
return "";
|
|
424
|
+
}
|
|
425
|
+
// Idle timeout → dehydrate. Next message will need resume context.
|
|
426
|
+
ctx.traceInfo("[session] idle timeout, dehydrating");
|
|
427
|
+
yield* dehydrateAndReset("idle");
|
|
428
|
+
// Don't continueAsNew with a prompt — wait for the next user message,
|
|
429
|
+
// which will be wrapped with resume context because needsHydration=true.
|
|
430
|
+
yield ctx.continueAsNew(continueInput());
|
|
431
|
+
return "";
|
|
432
|
+
}
|
|
433
|
+
case "wait":
|
|
434
|
+
// Capture original user prompt as task context for recurring tasks.
|
|
435
|
+
// This ensures the LLM remembers its task even after conversation truncation.
|
|
436
|
+
if (!taskContext) {
|
|
437
|
+
taskContext = prompt.slice(0, 2000);
|
|
438
|
+
const base = typeof baseSystemMessage === 'string'
|
|
439
|
+
? baseSystemMessage ?? ''
|
|
440
|
+
: baseSystemMessage?.content ?? '';
|
|
441
|
+
config.systemMessage = base + (base ? '\n\n' : '') +
|
|
442
|
+
'[RECURRING TASK]\n' +
|
|
443
|
+
'Original user request (always remember, even if conversation history is truncated):\n"' +
|
|
444
|
+
taskContext + '"';
|
|
445
|
+
}
|
|
446
|
+
if (result.content) {
|
|
447
|
+
setStatus(ctx, "running", { iteration, intermediateContent: result.content });
|
|
448
|
+
ctx.traceInfo(`[orch] intermediate: ${result.content.slice(0, 80)}`);
|
|
449
|
+
}
|
|
450
|
+
ctx.traceInfo(`[orch] durable timer: ${result.seconds}s (${result.reason})`);
|
|
451
|
+
{
|
|
452
|
+
const shouldDehydrate = blobEnabled && result.seconds > dehydrateThreshold;
|
|
453
|
+
if (shouldDehydrate) {
|
|
454
|
+
yield* dehydrateAndReset("timer");
|
|
455
|
+
}
|
|
456
|
+
const waitStartedAt = yield ctx.utcNow();
|
|
457
|
+
setStatus(ctx, "waiting", {
|
|
458
|
+
iteration,
|
|
459
|
+
waitSeconds: result.seconds,
|
|
460
|
+
waitReason: result.reason,
|
|
461
|
+
waitStartedAt,
|
|
462
|
+
...(result.content ? { turnResult: { type: "completed", content: result.content } } : {}),
|
|
463
|
+
});
|
|
464
|
+
// Checkpoint before the blocking wait
|
|
465
|
+
if (!shouldDehydrate)
|
|
466
|
+
yield* maybeCheckpoint();
|
|
467
|
+
const timerTask = ctx.scheduleTimer(result.seconds * 1000);
|
|
468
|
+
const interruptMsg = ctx.dequeueEvent("messages");
|
|
469
|
+
const timerRace = yield ctx.race(timerTask, interruptMsg);
|
|
470
|
+
if (timerRace.index === 1) {
|
|
471
|
+
const interruptData = typeof timerRace.value === "string"
|
|
472
|
+
? JSON.parse(timerRace.value) : (timerRace.value ?? {});
|
|
473
|
+
ctx.traceInfo(`[session] wait interrupted: "${(interruptData.prompt || "").slice(0, 60)}"`);
|
|
474
|
+
// Calculate remaining time for resume context
|
|
475
|
+
const interruptedAt = yield ctx.utcNow();
|
|
476
|
+
const elapsedSec = Math.round((interruptedAt - waitStartedAt) / 1000);
|
|
477
|
+
const remainingSec = Math.max(0, result.seconds - elapsedSec);
|
|
478
|
+
const userPrompt = interruptData.prompt || "";
|
|
479
|
+
let finalPrompt;
|
|
480
|
+
if (shouldDehydrate && userPrompt) {
|
|
481
|
+
finalPrompt = wrapWithResumeContext(userPrompt, `You were waiting on a ${result.seconds}s timer (reason: "${result.reason}"). ` +
|
|
482
|
+
`${elapsedSec}s have elapsed, ${remainingSec}s remain. ` +
|
|
483
|
+
`After handling the user's message, restart the wait using the wait tool for the remaining ${remainingSec} seconds only.`);
|
|
484
|
+
}
|
|
485
|
+
else if (userPrompt) {
|
|
486
|
+
// Not dehydrated but still interrupted — give timing context
|
|
487
|
+
finalPrompt = `${userPrompt}\n\n` +
|
|
488
|
+
`[SYSTEM: You were waiting on a ${result.seconds}s timer (reason: "${result.reason}"). ` +
|
|
489
|
+
`${elapsedSec}s elapsed, ${remainingSec}s remain. ` +
|
|
490
|
+
`After handling this message, restart the wait using the wait tool for the remaining ${remainingSec} seconds only.]`;
|
|
491
|
+
}
|
|
492
|
+
else {
|
|
493
|
+
finalPrompt = userPrompt;
|
|
494
|
+
}
|
|
495
|
+
yield ctx.continueAsNew(continueInput({
|
|
496
|
+
prompt: finalPrompt,
|
|
497
|
+
needsHydration: shouldDehydrate ? true : needsHydration,
|
|
498
|
+
}));
|
|
499
|
+
return "";
|
|
500
|
+
}
|
|
501
|
+
const timerPrompt = `The ${result.seconds} second wait is now complete. Continue with your task.`;
|
|
502
|
+
yield ctx.continueAsNew(continueInput({
|
|
503
|
+
prompt: timerPrompt,
|
|
504
|
+
needsHydration: shouldDehydrate ? true : needsHydration,
|
|
505
|
+
}));
|
|
506
|
+
return "";
|
|
507
|
+
}
|
|
508
|
+
case "input_required":
|
|
509
|
+
ctx.traceInfo(`[orch] waiting for user input: ${result.question}`);
|
|
510
|
+
if (!blobEnabled || inputGracePeriod < 0) {
|
|
511
|
+
setStatus(ctx, "input_required", {
|
|
512
|
+
iteration,
|
|
513
|
+
turnResult: statusResult,
|
|
514
|
+
pendingQuestion: result.question,
|
|
515
|
+
choices: result.choices,
|
|
516
|
+
allowFreeform: result.allowFreeform,
|
|
517
|
+
});
|
|
518
|
+
yield* maybeCheckpoint();
|
|
519
|
+
const answerMsg = yield ctx.dequeueEvent("messages");
|
|
520
|
+
const answerData = typeof answerMsg === "string"
|
|
521
|
+
? JSON.parse(answerMsg) : answerMsg;
|
|
522
|
+
yield ctx.continueAsNew(continueInput({
|
|
523
|
+
prompt: `The user was asked: "${result.question}"\nThe user responded: "${answerData.answer}"`,
|
|
524
|
+
needsHydration: false,
|
|
525
|
+
}));
|
|
526
|
+
return "";
|
|
527
|
+
}
|
|
528
|
+
if (inputGracePeriod === 0) {
|
|
529
|
+
setStatus(ctx, "input_required", {
|
|
530
|
+
iteration,
|
|
531
|
+
turnResult: statusResult,
|
|
532
|
+
pendingQuestion: result.question,
|
|
533
|
+
});
|
|
534
|
+
yield* dehydrateAndReset("input_required");
|
|
535
|
+
const answerMsg = yield ctx.dequeueEvent("messages");
|
|
536
|
+
const answerData = typeof answerMsg === "string"
|
|
537
|
+
? JSON.parse(answerMsg) : answerMsg;
|
|
538
|
+
yield ctx.continueAsNew(continueInput({
|
|
539
|
+
prompt: `The user was asked: "${result.question}"\nThe user responded: "${answerData.answer}"`,
|
|
540
|
+
}));
|
|
541
|
+
return "";
|
|
542
|
+
}
|
|
543
|
+
// Race: user answer vs grace period
|
|
544
|
+
{
|
|
545
|
+
setStatus(ctx, "input_required", {
|
|
546
|
+
iteration,
|
|
547
|
+
turnResult: statusResult,
|
|
548
|
+
pendingQuestion: result.question,
|
|
549
|
+
choices: result.choices,
|
|
550
|
+
allowFreeform: result.allowFreeform,
|
|
551
|
+
});
|
|
552
|
+
const answerEvt = ctx.dequeueEvent("messages");
|
|
553
|
+
const graceTimer = ctx.scheduleTimer(inputGracePeriod * 1000);
|
|
554
|
+
const raceResult = yield ctx.race(answerEvt, graceTimer);
|
|
555
|
+
if (raceResult.index === 0) {
|
|
556
|
+
const answerData = typeof raceResult.value === "string"
|
|
557
|
+
? JSON.parse(raceResult.value) : (raceResult.value ?? {});
|
|
558
|
+
yield ctx.continueAsNew(continueInput({
|
|
559
|
+
prompt: `The user was asked: "${result.question}"\nThe user responded: "${answerData.answer}"`,
|
|
560
|
+
needsHydration: false,
|
|
561
|
+
}));
|
|
562
|
+
return "";
|
|
563
|
+
}
|
|
564
|
+
yield* dehydrateAndReset("input_required");
|
|
565
|
+
const answerMsg = yield ctx.dequeueEvent("messages");
|
|
566
|
+
const answerData = typeof answerMsg === "string"
|
|
567
|
+
? JSON.parse(answerMsg) : answerMsg;
|
|
568
|
+
yield ctx.continueAsNew(continueInput({
|
|
569
|
+
prompt: `The user was asked: "${result.question}"\nThe user responded: "${answerData.answer}"`,
|
|
570
|
+
}));
|
|
571
|
+
return "";
|
|
572
|
+
}
|
|
573
|
+
case "cancelled":
|
|
574
|
+
ctx.traceInfo("[session] turn cancelled");
|
|
575
|
+
continue;
|
|
576
|
+
// ─── Sub-Agent Result Handlers ───────────────────
|
|
577
|
+
case "spawn_agent": {
|
|
578
|
+
// Enforce max sub-agents
|
|
579
|
+
const activeCount = subAgents.filter(a => a.status === "running").length;
|
|
580
|
+
if (activeCount >= MAX_SUB_AGENTS) {
|
|
581
|
+
ctx.traceInfo(`[orch] spawn_agent denied: ${activeCount}/${MAX_SUB_AGENTS} agents running`);
|
|
582
|
+
yield ctx.continueAsNew(continueInput({
|
|
583
|
+
prompt: `[SYSTEM: spawn_agent failed — you already have ${activeCount} running sub-agents (max ${MAX_SUB_AGENTS}). ` +
|
|
584
|
+
`Wait for some to complete before spawning more.]`,
|
|
585
|
+
}));
|
|
586
|
+
return "";
|
|
587
|
+
}
|
|
588
|
+
// Child session ID is generated by the activity (random UUID)
|
|
589
|
+
let childSessionId;
|
|
590
|
+
ctx.traceInfo(`[orch] spawning sub-agent via SDK: task="${result.task.slice(0, 80)}"`);
|
|
591
|
+
// Build child config — inherit parent's config with optional overrides
|
|
592
|
+
const childConfig = {
|
|
593
|
+
...config,
|
|
594
|
+
...(result.systemMessage ? { systemMessage: result.systemMessage } : {}),
|
|
595
|
+
...(result.toolNames ? { toolNames: result.toolNames } : {}),
|
|
596
|
+
};
|
|
597
|
+
// Use the PilotSwarmClient SDK to create and start the child session.
|
|
598
|
+
// This handles: CMS registration (with parentSessionId), orchestration startup,
|
|
599
|
+
// and initial task prompt — all through the standard SDK path.
|
|
600
|
+
try {
|
|
601
|
+
childSessionId = yield manager.spawnChildSession(input.sessionId, childConfig, result.task);
|
|
602
|
+
}
|
|
603
|
+
catch (err) {
|
|
604
|
+
ctx.traceInfo(`[orch] spawnChildSession failed: ${err.message}`);
|
|
605
|
+
yield ctx.continueAsNew(continueInput({
|
|
606
|
+
prompt: `[SYSTEM: spawn_agent failed: ${err.message}]`,
|
|
607
|
+
}));
|
|
608
|
+
return "";
|
|
609
|
+
}
|
|
610
|
+
const childOrchId = `session-${childSessionId}`;
|
|
611
|
+
// Track the sub-agent
|
|
612
|
+
subAgents.push({
|
|
613
|
+
orchId: childOrchId,
|
|
614
|
+
sessionId: childSessionId,
|
|
615
|
+
task: result.task.slice(0, 500),
|
|
616
|
+
status: "running",
|
|
617
|
+
});
|
|
618
|
+
// Feed confirmation back to the LLM
|
|
619
|
+
const spawnMsg = `[SYSTEM: Sub-agent spawned successfully.\n` +
|
|
620
|
+
` Agent ID: ${childOrchId}\n` +
|
|
621
|
+
` Task: "${result.task.slice(0, 200)}"\n` +
|
|
622
|
+
` The agent is now running autonomously. Use check_agents to monitor progress, ` +
|
|
623
|
+
`message_agent to send instructions, or wait_for_agents to block until completion.]`;
|
|
624
|
+
yield ctx.continueAsNew(continueInput({ prompt: spawnMsg }));
|
|
625
|
+
return "";
|
|
626
|
+
}
|
|
627
|
+
case "message_agent": {
|
|
628
|
+
const targetOrchId = result.agentId;
|
|
629
|
+
const agentEntry = subAgents.find(a => a.orchId === targetOrchId);
|
|
630
|
+
if (!agentEntry) {
|
|
631
|
+
ctx.traceInfo(`[orch] message_agent: unknown agent ${targetOrchId}`);
|
|
632
|
+
yield ctx.continueAsNew(continueInput({
|
|
633
|
+
prompt: `[SYSTEM: message_agent failed — agent "${targetOrchId}" not found. ` +
|
|
634
|
+
`Known agents: ${subAgents.map(a => a.orchId).join(", ") || "none"}]`,
|
|
635
|
+
}));
|
|
636
|
+
return "";
|
|
637
|
+
}
|
|
638
|
+
ctx.traceInfo(`[orch] message_agent via SDK: ${agentEntry.sessionId} msg="${result.message.slice(0, 60)}"`);
|
|
639
|
+
try {
|
|
640
|
+
yield manager.sendToSession(agentEntry.sessionId, result.message);
|
|
641
|
+
}
|
|
642
|
+
catch (err) {
|
|
643
|
+
ctx.traceInfo(`[orch] message_agent failed: ${err.message}`);
|
|
644
|
+
yield ctx.continueAsNew(continueInput({
|
|
645
|
+
prompt: `[SYSTEM: message_agent failed: ${err.message}]`,
|
|
646
|
+
}));
|
|
647
|
+
return "";
|
|
648
|
+
}
|
|
649
|
+
yield ctx.continueAsNew(continueInput({
|
|
650
|
+
prompt: `[SYSTEM: Message sent to sub-agent ${targetOrchId}: "${result.message.slice(0, 200)}"]`,
|
|
651
|
+
}));
|
|
652
|
+
return "";
|
|
653
|
+
}
|
|
654
|
+
case "check_agents": {
|
|
655
|
+
ctx.traceInfo(`[orch] check_agents: ${subAgents.length} agents tracked`);
|
|
656
|
+
if (subAgents.length === 0) {
|
|
657
|
+
yield ctx.continueAsNew(continueInput({
|
|
658
|
+
prompt: `[SYSTEM: No sub-agents have been spawned yet.]`,
|
|
659
|
+
}));
|
|
660
|
+
return "";
|
|
661
|
+
}
|
|
662
|
+
// Get fresh status for each agent via the SDK
|
|
663
|
+
const statusLines = [];
|
|
664
|
+
for (const agent of subAgents) {
|
|
665
|
+
try {
|
|
666
|
+
const rawStatus = yield manager.getSessionStatus(agent.sessionId);
|
|
667
|
+
const parsed = JSON.parse(rawStatus);
|
|
668
|
+
// Update local tracking
|
|
669
|
+
if (parsed.status === "completed" || parsed.status === "failed") {
|
|
670
|
+
agent.status = parsed.status;
|
|
671
|
+
if (parsed.result)
|
|
672
|
+
agent.result = parsed.result.slice(0, 1000);
|
|
673
|
+
}
|
|
674
|
+
statusLines.push(` - Agent ${agent.orchId}\n` +
|
|
675
|
+
` Task: "${agent.task.slice(0, 120)}"\n` +
|
|
676
|
+
` Status: ${parsed.status}\n` +
|
|
677
|
+
` Iterations: ${parsed.iterations ?? 0}\n` +
|
|
678
|
+
` Output: ${parsed.result ?? "(no output yet)"}`);
|
|
679
|
+
}
|
|
680
|
+
catch (err) {
|
|
681
|
+
statusLines.push(` - Agent ${agent.orchId}\n` +
|
|
682
|
+
` Task: "${agent.task.slice(0, 120)}"\n` +
|
|
683
|
+
` Status: unknown (error: ${err.message})`);
|
|
684
|
+
}
|
|
685
|
+
}
|
|
686
|
+
yield ctx.continueAsNew(continueInput({
|
|
687
|
+
prompt: `[SYSTEM: Sub-agent status report (${subAgents.length} agents):\n${statusLines.join("\n")}]`,
|
|
688
|
+
}));
|
|
689
|
+
return "";
|
|
690
|
+
}
|
|
691
|
+
case "wait_for_agents": {
|
|
692
|
+
let targetIds = result.agentIds;
|
|
693
|
+
// If empty, wait for all running agents
|
|
694
|
+
if (!targetIds || targetIds.length === 0) {
|
|
695
|
+
targetIds = subAgents.filter(a => a.status === "running").map(a => a.orchId);
|
|
696
|
+
}
|
|
697
|
+
if (targetIds.length === 0) {
|
|
698
|
+
ctx.traceInfo(`[orch] wait_for_agents: no running agents to wait for`);
|
|
699
|
+
yield ctx.continueAsNew(continueInput({
|
|
700
|
+
prompt: `[SYSTEM: No running sub-agents to wait for. All agents have already completed.]`,
|
|
701
|
+
}));
|
|
702
|
+
return "";
|
|
703
|
+
}
|
|
704
|
+
ctx.traceInfo(`[orch] wait_for_agents: waiting for ${targetIds.length} agents`);
|
|
705
|
+
setStatus(ctx, "running", {
|
|
706
|
+
iteration,
|
|
707
|
+
waitingForAgents: targetIds,
|
|
708
|
+
});
|
|
709
|
+
// Event-driven wait: listen for child_updates until all targets are done.
|
|
710
|
+
// Also race with user messages so the parent stays responsive.
|
|
711
|
+
const MAX_WAIT_ITERATIONS = 180;
|
|
712
|
+
for (let waitIter = 0; waitIter < MAX_WAIT_ITERATIONS; waitIter++) {
|
|
713
|
+
// Check if all targets are done
|
|
714
|
+
const stillRunning = targetIds.filter(id => {
|
|
715
|
+
const agent = subAgents.find(a => a.orchId === id);
|
|
716
|
+
return agent && agent.status === "running";
|
|
717
|
+
});
|
|
718
|
+
if (stillRunning.length === 0)
|
|
719
|
+
break;
|
|
720
|
+
// Race: child_updates vs user message vs timeout
|
|
721
|
+
const childUpdate = ctx.dequeueEvent("child_updates");
|
|
722
|
+
const userMsg = ctx.dequeueEvent("messages");
|
|
723
|
+
const timeout = ctx.scheduleTimer(30_000); // 30s fallback poll
|
|
724
|
+
const waitRace = yield ctx.race(childUpdate, userMsg, timeout);
|
|
725
|
+
if (waitRace.index === 0) {
|
|
726
|
+
// Child update
|
|
727
|
+
const childData = typeof waitRace.value === "string"
|
|
728
|
+
? JSON.parse(waitRace.value) : waitRace.value;
|
|
729
|
+
ctx.traceInfo(`[orch] wait_for_agents: child_update from ${childData.childOrchId}`);
|
|
730
|
+
const agent = subAgents.find((a) => a.orchId === childData.childOrchId);
|
|
731
|
+
if (agent) {
|
|
732
|
+
if (childData.type === "turn_completed" || childData.type === "finished") {
|
|
733
|
+
agent.result = (childData.content ?? "").slice(0, 2000);
|
|
734
|
+
}
|
|
735
|
+
if (childData.type === "finished") {
|
|
736
|
+
agent.status = "completed";
|
|
737
|
+
}
|
|
738
|
+
if (childData.type === "failed") {
|
|
739
|
+
agent.status = "failed";
|
|
740
|
+
agent.result = childData.error ?? "unknown error";
|
|
741
|
+
}
|
|
742
|
+
}
|
|
743
|
+
}
|
|
744
|
+
else if (waitRace.index === 1) {
|
|
745
|
+
// User message interrupted the wait — handle it and come back
|
|
746
|
+
const interruptData = typeof waitRace.value === "string"
|
|
747
|
+
? JSON.parse(waitRace.value) : (waitRace.value ?? {});
|
|
748
|
+
if (interruptData.prompt) {
|
|
749
|
+
ctx.traceInfo(`[orch] wait_for_agents interrupted by user: "${interruptData.prompt.slice(0, 60)}"`);
|
|
750
|
+
yield ctx.continueAsNew(continueInput({
|
|
751
|
+
prompt: interruptData.prompt,
|
|
752
|
+
}));
|
|
753
|
+
return "";
|
|
754
|
+
}
|
|
755
|
+
}
|
|
756
|
+
else {
|
|
757
|
+
// Timeout — do a fallback status check via SDK
|
|
758
|
+
ctx.traceInfo(`[orch] wait_for_agents: timeout poll, checking ${stillRunning.length} agents`);
|
|
759
|
+
for (const targetId of stillRunning) {
|
|
760
|
+
const agent = subAgents.find(a => a.orchId === targetId);
|
|
761
|
+
if (!agent || agent.status !== "running")
|
|
762
|
+
continue;
|
|
763
|
+
try {
|
|
764
|
+
const rawStatus = yield manager.getSessionStatus(agent.sessionId);
|
|
765
|
+
const parsed = JSON.parse(rawStatus);
|
|
766
|
+
if (parsed.status === "completed" || parsed.status === "failed") {
|
|
767
|
+
agent.status = parsed.status;
|
|
768
|
+
if (parsed.result)
|
|
769
|
+
agent.result = parsed.result.slice(0, 2000);
|
|
770
|
+
}
|
|
771
|
+
}
|
|
772
|
+
catch { }
|
|
773
|
+
}
|
|
774
|
+
}
|
|
775
|
+
}
|
|
776
|
+
// Build results summary
|
|
777
|
+
const resultLines = [];
|
|
778
|
+
for (const targetId of targetIds) {
|
|
779
|
+
const agent = subAgents.find(a => a.orchId === targetId);
|
|
780
|
+
if (!agent)
|
|
781
|
+
continue;
|
|
782
|
+
resultLines.push(` - Agent ${agent.orchId}\n` +
|
|
783
|
+
` Task: "${agent.task.slice(0, 120)}"\n` +
|
|
784
|
+
` Status: ${agent.status}\n` +
|
|
785
|
+
` Result: ${agent.result ?? "(no result)"}`);
|
|
786
|
+
}
|
|
787
|
+
yield ctx.continueAsNew(continueInput({
|
|
788
|
+
prompt: `[SYSTEM: Sub-agents completed:\n${resultLines.join("\n")}]`,
|
|
789
|
+
}));
|
|
790
|
+
return "";
|
|
791
|
+
}
|
|
792
|
+
case "error": {
|
|
793
|
+
// Treat like an activity failure — retry with backoff.
|
|
794
|
+
retryCount++;
|
|
795
|
+
ctx.traceInfo(`[orch] turn returned error (attempt ${retryCount}/${MAX_RETRIES}): ${result.message}`);
|
|
796
|
+
if (retryCount >= MAX_RETRIES) {
|
|
797
|
+
ctx.traceInfo(`[orch] max retries exhausted for turn error, waiting for user input`);
|
|
798
|
+
setStatus(ctx, "error", {
|
|
799
|
+
iteration,
|
|
800
|
+
error: `Failed after ${MAX_RETRIES} attempts: ${result.message}`,
|
|
801
|
+
retriesExhausted: true,
|
|
802
|
+
});
|
|
803
|
+
retryCount = 0;
|
|
804
|
+
continue;
|
|
805
|
+
}
|
|
806
|
+
setStatus(ctx, "error", {
|
|
807
|
+
iteration,
|
|
808
|
+
error: `${result.message} (retry ${retryCount}/${MAX_RETRIES})`,
|
|
809
|
+
});
|
|
810
|
+
const errorRetryDelay = 15 * Math.pow(2, retryCount - 1);
|
|
811
|
+
ctx.traceInfo(`[orch] retrying in ${errorRetryDelay}s after turn error`);
|
|
812
|
+
if (blobEnabled) {
|
|
813
|
+
yield* dehydrateAndReset("error");
|
|
814
|
+
}
|
|
815
|
+
yield ctx.scheduleTimer(errorRetryDelay * 1000);
|
|
816
|
+
yield ctx.continueAsNew(continueInput({
|
|
817
|
+
prompt,
|
|
818
|
+
retryCount,
|
|
819
|
+
needsHydration: blobEnabled ? true : needsHydration,
|
|
820
|
+
}));
|
|
821
|
+
return "";
|
|
822
|
+
}
|
|
823
|
+
}
|
|
824
|
+
}
|
|
825
|
+
}
|
|
826
|
+
//# sourceMappingURL=orchestration_1_0_3.js.map
|