pilotswarm-sdk 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (183) hide show
  1. package/dist/agent-loader.d.ts +61 -0
  2. package/dist/agent-loader.d.ts.map +1 -0
  3. package/dist/agent-loader.js +212 -0
  4. package/dist/agent-loader.js.map +1 -0
  5. package/dist/artifact-tools.d.ts +31 -0
  6. package/dist/artifact-tools.d.ts.map +1 -0
  7. package/dist/artifact-tools.js +190 -0
  8. package/dist/artifact-tools.js.map +1 -0
  9. package/dist/blob-store.d.ts +73 -0
  10. package/dist/blob-store.d.ts.map +1 -0
  11. package/dist/blob-store.js +220 -0
  12. package/dist/blob-store.js.map +1 -0
  13. package/dist/client.d.ts +159 -0
  14. package/dist/client.d.ts.map +1 -0
  15. package/dist/client.js +676 -0
  16. package/dist/client.js.map +1 -0
  17. package/dist/cms.d.ts +129 -0
  18. package/dist/cms.d.ts.map +1 -0
  19. package/dist/cms.js +313 -0
  20. package/dist/cms.js.map +1 -0
  21. package/dist/index.d.ts +44 -0
  22. package/dist/index.d.ts.map +1 -0
  23. package/dist/index.js +42 -0
  24. package/dist/index.js.map +1 -0
  25. package/dist/managed-session.d.ts +70 -0
  26. package/dist/managed-session.d.ts.map +1 -0
  27. package/dist/managed-session.js +717 -0
  28. package/dist/managed-session.js.map +1 -0
  29. package/dist/management-client.d.ts +171 -0
  30. package/dist/management-client.d.ts.map +1 -0
  31. package/dist/management-client.js +401 -0
  32. package/dist/management-client.js.map +1 -0
  33. package/dist/mcp-loader.d.ts +50 -0
  34. package/dist/mcp-loader.d.ts.map +1 -0
  35. package/dist/mcp-loader.js +83 -0
  36. package/dist/mcp-loader.js.map +1 -0
  37. package/dist/model-providers.d.ts +143 -0
  38. package/dist/model-providers.d.ts.map +1 -0
  39. package/dist/model-providers.js +228 -0
  40. package/dist/model-providers.js.map +1 -0
  41. package/dist/orchestration-registry.d.ts +7 -0
  42. package/dist/orchestration-registry.d.ts.map +1 -0
  43. package/dist/orchestration-registry.js +49 -0
  44. package/dist/orchestration-registry.js.map +1 -0
  45. package/dist/orchestration.d.ts +36 -0
  46. package/dist/orchestration.d.ts.map +1 -0
  47. package/dist/orchestration.js +1357 -0
  48. package/dist/orchestration.js.map +1 -0
  49. package/dist/orchestration_1_0_0.d.ts +20 -0
  50. package/dist/orchestration_1_0_0.d.ts.map +1 -0
  51. package/dist/orchestration_1_0_0.js +497 -0
  52. package/dist/orchestration_1_0_0.js.map +1 -0
  53. package/dist/orchestration_1_0_1.d.ts +19 -0
  54. package/dist/orchestration_1_0_1.d.ts.map +1 -0
  55. package/dist/orchestration_1_0_1.js +546 -0
  56. package/dist/orchestration_1_0_1.js.map +1 -0
  57. package/dist/orchestration_1_0_10.d.ts +36 -0
  58. package/dist/orchestration_1_0_10.d.ts.map +1 -0
  59. package/dist/orchestration_1_0_10.js +1253 -0
  60. package/dist/orchestration_1_0_10.js.map +1 -0
  61. package/dist/orchestration_1_0_11.d.ts +36 -0
  62. package/dist/orchestration_1_0_11.d.ts.map +1 -0
  63. package/dist/orchestration_1_0_11.js +1255 -0
  64. package/dist/orchestration_1_0_11.js.map +1 -0
  65. package/dist/orchestration_1_0_12.d.ts +36 -0
  66. package/dist/orchestration_1_0_12.d.ts.map +1 -0
  67. package/dist/orchestration_1_0_12.js +1250 -0
  68. package/dist/orchestration_1_0_12.js.map +1 -0
  69. package/dist/orchestration_1_0_13.d.ts +36 -0
  70. package/dist/orchestration_1_0_13.d.ts.map +1 -0
  71. package/dist/orchestration_1_0_13.js +1260 -0
  72. package/dist/orchestration_1_0_13.js.map +1 -0
  73. package/dist/orchestration_1_0_14.d.ts +36 -0
  74. package/dist/orchestration_1_0_14.d.ts.map +1 -0
  75. package/dist/orchestration_1_0_14.js +1258 -0
  76. package/dist/orchestration_1_0_14.js.map +1 -0
  77. package/dist/orchestration_1_0_15.d.ts +36 -0
  78. package/dist/orchestration_1_0_15.d.ts.map +1 -0
  79. package/dist/orchestration_1_0_15.js +1266 -0
  80. package/dist/orchestration_1_0_15.js.map +1 -0
  81. package/dist/orchestration_1_0_16.d.ts +36 -0
  82. package/dist/orchestration_1_0_16.d.ts.map +1 -0
  83. package/dist/orchestration_1_0_16.js +1275 -0
  84. package/dist/orchestration_1_0_16.js.map +1 -0
  85. package/dist/orchestration_1_0_17.d.ts +36 -0
  86. package/dist/orchestration_1_0_17.d.ts.map +1 -0
  87. package/dist/orchestration_1_0_17.js +1314 -0
  88. package/dist/orchestration_1_0_17.js.map +1 -0
  89. package/dist/orchestration_1_0_18.d.ts +36 -0
  90. package/dist/orchestration_1_0_18.d.ts.map +1 -0
  91. package/dist/orchestration_1_0_18.js +1328 -0
  92. package/dist/orchestration_1_0_18.js.map +1 -0
  93. package/dist/orchestration_1_0_19.d.ts +36 -0
  94. package/dist/orchestration_1_0_19.d.ts.map +1 -0
  95. package/dist/orchestration_1_0_19.js +1324 -0
  96. package/dist/orchestration_1_0_19.js.map +1 -0
  97. package/dist/orchestration_1_0_2.d.ts +19 -0
  98. package/dist/orchestration_1_0_2.d.ts.map +1 -0
  99. package/dist/orchestration_1_0_2.js +749 -0
  100. package/dist/orchestration_1_0_2.js.map +1 -0
  101. package/dist/orchestration_1_0_20.d.ts +36 -0
  102. package/dist/orchestration_1_0_20.d.ts.map +1 -0
  103. package/dist/orchestration_1_0_20.js +1347 -0
  104. package/dist/orchestration_1_0_20.js.map +1 -0
  105. package/dist/orchestration_1_0_3.d.ts +19 -0
  106. package/dist/orchestration_1_0_3.d.ts.map +1 -0
  107. package/dist/orchestration_1_0_3.js +826 -0
  108. package/dist/orchestration_1_0_3.js.map +1 -0
  109. package/dist/orchestration_1_0_4.d.ts +19 -0
  110. package/dist/orchestration_1_0_4.d.ts.map +1 -0
  111. package/dist/orchestration_1_0_4.js +1020 -0
  112. package/dist/orchestration_1_0_4.js.map +1 -0
  113. package/dist/orchestration_1_0_5.d.ts +19 -0
  114. package/dist/orchestration_1_0_5.d.ts.map +1 -0
  115. package/dist/orchestration_1_0_5.js +1027 -0
  116. package/dist/orchestration_1_0_5.js.map +1 -0
  117. package/dist/orchestration_1_0_6.d.ts +19 -0
  118. package/dist/orchestration_1_0_6.d.ts.map +1 -0
  119. package/dist/orchestration_1_0_6.js +1034 -0
  120. package/dist/orchestration_1_0_6.js.map +1 -0
  121. package/dist/orchestration_1_0_7.d.ts +19 -0
  122. package/dist/orchestration_1_0_7.d.ts.map +1 -0
  123. package/dist/orchestration_1_0_7.js +1085 -0
  124. package/dist/orchestration_1_0_7.js.map +1 -0
  125. package/dist/orchestration_1_0_8.d.ts +36 -0
  126. package/dist/orchestration_1_0_8.d.ts.map +1 -0
  127. package/dist/orchestration_1_0_8.js +1106 -0
  128. package/dist/orchestration_1_0_8.js.map +1 -0
  129. package/dist/orchestration_1_0_9.d.ts +36 -0
  130. package/dist/orchestration_1_0_9.d.ts.map +1 -0
  131. package/dist/orchestration_1_0_9.js +1207 -0
  132. package/dist/orchestration_1_0_9.js.map +1 -0
  133. package/dist/prompt-layering.d.ts +16 -0
  134. package/dist/prompt-layering.d.ts.map +1 -0
  135. package/dist/prompt-layering.js +60 -0
  136. package/dist/prompt-layering.js.map +1 -0
  137. package/dist/resourcemgr-tools.d.ts +27 -0
  138. package/dist/resourcemgr-tools.d.ts.map +1 -0
  139. package/dist/resourcemgr-tools.js +638 -0
  140. package/dist/resourcemgr-tools.js.map +1 -0
  141. package/dist/session-dumper.d.ts +26 -0
  142. package/dist/session-dumper.d.ts.map +1 -0
  143. package/dist/session-dumper.js +272 -0
  144. package/dist/session-dumper.js.map +1 -0
  145. package/dist/session-manager.d.ts +152 -0
  146. package/dist/session-manager.d.ts.map +1 -0
  147. package/dist/session-manager.js +493 -0
  148. package/dist/session-manager.js.map +1 -0
  149. package/dist/session-proxy.d.ts +68 -0
  150. package/dist/session-proxy.d.ts.map +1 -0
  151. package/dist/session-proxy.js +665 -0
  152. package/dist/session-proxy.js.map +1 -0
  153. package/dist/session-store.d.ts +35 -0
  154. package/dist/session-store.d.ts.map +1 -0
  155. package/dist/session-store.js +88 -0
  156. package/dist/session-store.js.map +1 -0
  157. package/dist/skills.d.ts +31 -0
  158. package/dist/skills.d.ts.map +1 -0
  159. package/dist/skills.js +93 -0
  160. package/dist/skills.js.map +1 -0
  161. package/dist/sweeper-tools.d.ts +28 -0
  162. package/dist/sweeper-tools.d.ts.map +1 -0
  163. package/dist/sweeper-tools.js +332 -0
  164. package/dist/sweeper-tools.js.map +1 -0
  165. package/dist/types.d.ts +498 -0
  166. package/dist/types.d.ts.map +1 -0
  167. package/dist/types.js +9 -0
  168. package/dist/types.js.map +1 -0
  169. package/dist/worker.d.ts +128 -0
  170. package/dist/worker.d.ts.map +1 -0
  171. package/dist/worker.js +562 -0
  172. package/dist/worker.js.map +1 -0
  173. package/package.json +74 -0
  174. package/plugins/mgmt/agents/pilotswarm.agent.md +59 -0
  175. package/plugins/mgmt/agents/resourcemgr.agent.md +111 -0
  176. package/plugins/mgmt/agents/sweeper.agent.md +67 -0
  177. package/plugins/mgmt/skills/resourcemgr/SKILL.md +41 -0
  178. package/plugins/mgmt/skills/resourcemgr/tools.json +1 -0
  179. package/plugins/mgmt/skills/sweeper/SKILL.md +44 -0
  180. package/plugins/mgmt/skills/sweeper/tools.json +1 -0
  181. package/plugins/system/agents/default.agent.md +58 -0
  182. package/plugins/system/skills/durable-timers/SKILL.md +39 -0
  183. package/plugins/system/skills/sub-agents/SKILL.md +75 -0
@@ -0,0 +1,1034 @@
1
+ import { createSessionProxy, createSessionManagerProxy } from "./session-proxy.js";
2
+ /**
3
+ * Set custom status as a JSON blob of session state.
4
+ * Clients read this via waitForStatusChange() or getStatus().
5
+ * @internal
6
+ */
7
+ function setStatus(ctx, status, extra) {
8
+ ctx.setCustomStatus(JSON.stringify({ status, ...extra }));
9
+ }
10
+ /**
11
+ * Long-lived durable session orchestration.
12
+ *
13
+ * One orchestration per copilot session. Uses:
14
+ * - SessionProxy for session-scoped operations (runTurn, dehydrate, hydrate, destroy)
15
+ * - SessionManagerProxy for global operations (listModels)
16
+ * - A single FIFO event queue ("messages") for all client→orchestration communication
17
+ *
18
+ * Main loop:
19
+ * 1. Dequeue message from "messages" queue
20
+ * 2. session.hydrate() if needed
21
+ * 3. session.runTurn(prompt) — returns TurnResult
22
+ * 4. Handle result: completed → idle wait, wait → timer, input → wait for answer
23
+ *
24
+ * @internal
25
+ */
26
+ export function* durableSessionOrchestration_1_0_6(ctx, input) {
27
+ const dehydrateThreshold = input.dehydrateThreshold ?? 30;
28
+ const idleTimeout = input.idleTimeout ?? 30;
29
+ const inputGracePeriod = input.inputGracePeriod ?? 30;
30
+ const checkpointInterval = input.checkpointInterval ?? -1; // seconds, -1 = disabled
31
+ const rehydrationMessage = input.rehydrationMessage;
32
+ const blobEnabled = input.blobEnabled ?? false;
33
+ let needsHydration = input.needsHydration ?? false;
34
+ let affinityKey = input.affinityKey ?? input.sessionId;
35
+ let iteration = input.iteration ?? 0;
36
+ let config = { ...input.config };
37
+ let retryCount = input.retryCount ?? 0;
38
+ let taskContext = input.taskContext;
39
+ const baseSystemMessage = input.baseSystemMessage ?? config.systemMessage;
40
+ const isSystem = input.isSystem ?? false;
41
+ const MAX_RETRIES = 3;
42
+ const MAX_SUB_AGENTS = 8;
43
+ const MAX_NESTING_LEVEL = 2; // 0=root, 1=child, 2=grandchild — no deeper
44
+ // ─── Sub-agent tracking ──────────────────────────────────
45
+ let subAgents = input.subAgents ? [...input.subAgents] : [];
46
+ // parentSessionId: prefer new field, fall back to old parentOrchId for backward compat
47
+ const parentSessionId = input.parentSessionId
48
+ ?? (input.parentOrchId ? input.parentOrchId.replace(/^session-/, '') : undefined);
49
+ const nestingLevel = input.nestingLevel ?? 0;
50
+ // If we have a captured task context, inject it into the system message
51
+ // so it survives LLM conversation truncation (BasicTruncator never drops system messages).
52
+ if (taskContext) {
53
+ const base = typeof baseSystemMessage === 'string'
54
+ ? baseSystemMessage ?? ''
55
+ : baseSystemMessage?.content ?? '';
56
+ config.systemMessage = base + (base ? '\n\n' : '') +
57
+ '[RECURRING TASK]\n' +
58
+ 'Original user request (always remember, even if conversation history is truncated):\n"' +
59
+ taskContext + '"';
60
+ }
61
+ // ─── Title summarization timer ───────────────────────────
62
+ // First summarize at iteration 0 + 60s, then every 300s.
63
+ // We track the target timestamp (epoch ms) across continueAsNew.
64
+ // 0 means "schedule on first turn completion".
65
+ let nextSummarizeAt = input.nextSummarizeAt ?? 0;
66
+ // ─── Create proxies ──────────────────────────────────────
67
+ const manager = createSessionManagerProxy(ctx);
68
+ let session = createSessionProxy(ctx, input.sessionId, affinityKey, config);
69
+ // ─── Helper: wrap prompt with resume context after dehydration ──
70
+ function wrapWithResumeContext(userPrompt, extra) {
71
+ const base = rehydrationMessage ??
72
+ `The session was dehydrated and has been rehydrated on a new worker. ` +
73
+ `The LLM conversation history is preserved.`;
74
+ const parts = [userPrompt, ``, `[SYSTEM: ${base}`];
75
+ if (extra)
76
+ parts.push(extra);
77
+ parts.push(`]`);
78
+ return parts.join('\n');
79
+ }
80
+ // ─── Shared continueAsNew input builder ──────────────────
81
+ function continueInput(overrides = {}) {
82
+ return {
83
+ sessionId: input.sessionId,
84
+ config,
85
+ iteration,
86
+ affinityKey,
87
+ needsHydration,
88
+ blobEnabled,
89
+ dehydrateThreshold,
90
+ idleTimeout,
91
+ inputGracePeriod,
92
+ checkpointInterval,
93
+ rehydrationMessage,
94
+ nextSummarizeAt,
95
+ taskContext,
96
+ baseSystemMessage,
97
+ subAgents,
98
+ parentSessionId,
99
+ nestingLevel,
100
+ ...(isSystem ? { isSystem: true } : {}),
101
+ retryCount: 0, // reset by default; overrides can set it
102
+ ...overrides,
103
+ };
104
+ }
105
+ // ─── Helper: dehydrate + reset affinity ──────────────────
106
+ function* dehydrateAndReset(reason) {
107
+ ctx.traceInfo(`[orch] dehydrating session (reason=${reason})`);
108
+ yield session.dehydrate(reason);
109
+ needsHydration = true;
110
+ affinityKey = yield ctx.newGuid();
111
+ session = createSessionProxy(ctx, input.sessionId, affinityKey, config);
112
+ }
113
+ // ─── Helper: checkpoint without releasing pin ────────────
114
+ function* maybeCheckpoint() {
115
+ if (!blobEnabled || checkpointInterval < 0)
116
+ return;
117
+ try {
118
+ ctx.traceInfo(`[orch] checkpoint (iteration=${iteration})`);
119
+ yield session.checkpoint();
120
+ }
121
+ catch (err) {
122
+ ctx.traceInfo(`[orch] checkpoint failed: ${err.message ?? err}`);
123
+ }
124
+ }
125
+ // ─── Helper: summarize session title if due ──────────────
126
+ const FIRST_SUMMARIZE_DELAY = 60_000; // 1 minute
127
+ const REPEAT_SUMMARIZE_DELAY = 300_000; // 5 minutes
128
+ function* maybeSummarize() {
129
+ // System sessions have fixed titles — never summarize
130
+ if (isSystem)
131
+ return;
132
+ const now = yield ctx.utcNow();
133
+ // Schedule first summarize 60s after session start
134
+ if (nextSummarizeAt === 0) {
135
+ nextSummarizeAt = now + FIRST_SUMMARIZE_DELAY;
136
+ return;
137
+ }
138
+ if (now < nextSummarizeAt)
139
+ return;
140
+ // Time to summarize — fire and forget (best effort)
141
+ try {
142
+ ctx.traceInfo(`[orch] summarizing session title`);
143
+ yield manager.summarizeSession(input.sessionId);
144
+ }
145
+ catch (err) {
146
+ ctx.traceInfo(`[orch] summarize failed: ${err.message}`);
147
+ }
148
+ nextSummarizeAt = now + REPEAT_SUMMARIZE_DELAY;
149
+ }
150
+ // ─── Prompt carried from continueAsNew ───────────────────
151
+ let pendingPrompt = input.prompt;
152
+ /** Set by the "completed" handler so the dequeue loop doesn't overwrite it. */
153
+ let lastTurnResult = undefined;
154
+ ctx.traceInfo(`[orch] start: iter=${iteration} pending=${pendingPrompt ? `"${pendingPrompt.slice(0, 40)}"` : 'NONE'} hydrate=${needsHydration} blob=${blobEnabled}`);
155
+ // ─── MAIN LOOP ──────────────────────────────────────────
156
+ while (true) {
157
+ // ① GET NEXT PROMPT
158
+ let prompt = "";
159
+ if (pendingPrompt) {
160
+ prompt = pendingPrompt;
161
+ pendingPrompt = undefined;
162
+ }
163
+ else {
164
+ // If we have a completed turnResult, include it in the idle status
165
+ // so clients can read it via waitForStatusChange. Without this,
166
+ // a bare setStatus("idle") between yields would overwrite it.
167
+ if (lastTurnResult) {
168
+ setStatus(ctx, "idle", { iteration, turnResult: lastTurnResult });
169
+ }
170
+ else {
171
+ setStatus(ctx, "idle", { iteration });
172
+ }
173
+ let gotPrompt = false;
174
+ while (!gotPrompt) {
175
+ // All messages (from users and child agents) arrive on the "messages" queue.
176
+ // Child agents communicate via the SDK (sendToSession), which enqueues
177
+ // to the same "messages" queue as user prompts.
178
+ let msgData;
179
+ const msg = yield ctx.dequeueEvent("messages");
180
+ msgData = typeof msg === "string" ? JSON.parse(msg) : msg;
181
+ // ── Command dispatch ─────────────────────────
182
+ if (msgData.type === "cmd") {
183
+ const cmdMsg = msgData;
184
+ ctx.traceInfo(`[orch-cmd] ${cmdMsg.cmd} id=${cmdMsg.id}`);
185
+ switch (cmdMsg.cmd) {
186
+ case "set_model": {
187
+ const newModel = String(cmdMsg.args?.model || "");
188
+ const oldModel = config.model || "(default)";
189
+ config = { ...config, model: newModel };
190
+ const resp = {
191
+ id: cmdMsg.id,
192
+ cmd: cmdMsg.cmd,
193
+ result: { ok: true, oldModel, newModel },
194
+ };
195
+ setStatus(ctx, "idle", { iteration, cmdResponse: resp });
196
+ yield ctx.continueAsNew(continueInput());
197
+ return "";
198
+ }
199
+ case "list_models": {
200
+ setStatus(ctx, "idle", { iteration, cmdProcessing: cmdMsg.id });
201
+ let models;
202
+ try {
203
+ const raw = yield manager.listModels();
204
+ models = typeof raw === "string" ? JSON.parse(raw) : raw;
205
+ }
206
+ catch (err) {
207
+ const resp = {
208
+ id: cmdMsg.id,
209
+ cmd: cmdMsg.cmd,
210
+ error: err.message || String(err),
211
+ };
212
+ setStatus(ctx, "idle", { iteration, cmdResponse: resp });
213
+ continue;
214
+ }
215
+ const resp = {
216
+ id: cmdMsg.id,
217
+ cmd: cmdMsg.cmd,
218
+ result: { models, currentModel: config.model },
219
+ };
220
+ setStatus(ctx, "idle", { iteration, cmdResponse: resp });
221
+ continue;
222
+ }
223
+ case "get_info": {
224
+ const resp = {
225
+ id: cmdMsg.id,
226
+ cmd: cmdMsg.cmd,
227
+ result: {
228
+ model: config.model || "(default)",
229
+ iteration,
230
+ sessionId: input.sessionId,
231
+ affinityKey: affinityKey?.slice(0, 8),
232
+ needsHydration,
233
+ blobEnabled,
234
+ },
235
+ };
236
+ setStatus(ctx, "idle", { iteration, cmdResponse: resp });
237
+ continue;
238
+ }
239
+ case "done": {
240
+ ctx.traceInfo(`[orch] /done command received — completing session`);
241
+ // Cascade: complete all sub-agents whose orchestrations may still be alive.
242
+ // Include "running" AND "completed" — a child that sent CHILD_UPDATE
243
+ // may still have a live orchestration waiting in its idle loop.
244
+ const liveChildren = subAgents.filter(a => a.status === "running" || a.status === "completed");
245
+ if (liveChildren.length > 0) {
246
+ ctx.traceInfo(`[orch] /done: completing ${liveChildren.length} sub-agent(s)`);
247
+ for (const child of liveChildren) {
248
+ try {
249
+ const childCmdId = `done-cascade-${iteration}-${child.sessionId.slice(0, 8)}`;
250
+ yield manager.sendCommandToSession(child.sessionId, { type: "cmd", cmd: "done", id: childCmdId, args: { reason: "Parent session completing" } });
251
+ child.status = "completed";
252
+ ctx.traceInfo(`[orch] /done: completed child ${child.sessionId}`);
253
+ }
254
+ catch (err) {
255
+ ctx.traceInfo(`[orch] /done: failed to complete child ${child.sessionId}: ${err.message} (non-fatal)`);
256
+ }
257
+ }
258
+ }
259
+ // If this is a child orchestration, send final result to parent
260
+ if (parentSessionId) {
261
+ try {
262
+ const doneReason = String(cmdMsg.args?.reason || "Session completed by user");
263
+ yield manager.sendToSession(parentSessionId, `[CHILD_UPDATE from=${input.sessionId} type=completed iter=${iteration}]\n${doneReason}`);
264
+ }
265
+ catch (err) {
266
+ ctx.traceInfo(`[orch] sendToSession(parent) on /done failed: ${err.message} (non-fatal)`);
267
+ }
268
+ }
269
+ // Destroy the in-memory session
270
+ try {
271
+ yield session.destroy();
272
+ }
273
+ catch { }
274
+ const resp = {
275
+ id: cmdMsg.id,
276
+ cmd: cmdMsg.cmd,
277
+ result: { ok: true, message: "Session completed" },
278
+ };
279
+ setStatus(ctx, "completed", { iteration, cmdResponse: resp });
280
+ return "done";
281
+ }
282
+ default: {
283
+ const resp = {
284
+ id: cmdMsg.id,
285
+ cmd: cmdMsg.cmd,
286
+ error: `Unknown command: ${cmdMsg.cmd}`,
287
+ };
288
+ setStatus(ctx, "idle", { iteration, cmdResponse: resp });
289
+ continue;
290
+ }
291
+ }
292
+ }
293
+ prompt = msgData.prompt;
294
+ gotPrompt = true;
295
+ lastTurnResult = undefined; // Clear after new prompt arrives
296
+ }
297
+ }
298
+ // If the session needs hydration, the LLM lost in-memory context.
299
+ // Wrap the user's prompt with resume instructions so the LLM picks up where it left off.
300
+ if (needsHydration && blobEnabled && prompt) {
301
+ prompt = wrapWithResumeContext(prompt);
302
+ }
303
+ ctx.traceInfo(`[turn ${iteration}] session=${input.sessionId} prompt="${prompt.slice(0, 80)}"`);
304
+ // ② HYDRATE if session was dehydrated (with retry)
305
+ if (needsHydration && blobEnabled) {
306
+ let hydrateAttempts = 0;
307
+ while (true) {
308
+ try {
309
+ affinityKey = yield ctx.newGuid();
310
+ session = createSessionProxy(ctx, input.sessionId, affinityKey, config);
311
+ yield session.hydrate();
312
+ needsHydration = false;
313
+ break;
314
+ }
315
+ catch (hydrateErr) {
316
+ hydrateAttempts++;
317
+ const hMsg = hydrateErr.message || String(hydrateErr);
318
+ ctx.traceInfo(`[orch] hydrate FAILED (attempt ${hydrateAttempts}/${MAX_RETRIES}): ${hMsg}`);
319
+ if (hydrateAttempts >= MAX_RETRIES) {
320
+ setStatus(ctx, "error", {
321
+ iteration,
322
+ error: `Hydrate failed after ${MAX_RETRIES} attempts: ${hMsg}`,
323
+ retriesExhausted: true,
324
+ });
325
+ // Can't proceed without hydration — wait for next user message to retry
326
+ break;
327
+ }
328
+ const hydrateDelay = 10 * Math.pow(2, hydrateAttempts - 1);
329
+ setStatus(ctx, "error", {
330
+ iteration,
331
+ error: `Hydrate failed: ${hMsg} (retry ${hydrateAttempts}/${MAX_RETRIES} in ${hydrateDelay}s)`,
332
+ });
333
+ yield ctx.scheduleTimer(hydrateDelay * 1000);
334
+ }
335
+ }
336
+ if (needsHydration)
337
+ continue; // hydrate exhausted retries — go back to dequeue
338
+ }
339
+ // ③ RUN TURN via SessionProxy (with retry on failure)
340
+ setStatus(ctx, "running", { iteration });
341
+ let turnResult;
342
+ try {
343
+ turnResult = yield session.runTurn(prompt);
344
+ }
345
+ catch (err) {
346
+ // Activity failed (e.g. Copilot timeout, network error).
347
+ const errorMsg = err.message || String(err);
348
+ retryCount++;
349
+ ctx.traceInfo(`[orch] runTurn FAILED (attempt ${retryCount}/${MAX_RETRIES}): ${errorMsg}`);
350
+ if (retryCount >= MAX_RETRIES) {
351
+ // Exhausted retries — park in error state but don't crash.
352
+ // The orchestration stays alive and will retry on the next user message.
353
+ ctx.traceInfo(`[orch] max retries exhausted, waiting for user input`);
354
+ setStatus(ctx, "error", {
355
+ iteration,
356
+ error: `Failed after ${MAX_RETRIES} attempts: ${errorMsg}`,
357
+ retriesExhausted: true,
358
+ });
359
+ // Reset retry count and wait for next user message
360
+ retryCount = 0;
361
+ continue;
362
+ }
363
+ setStatus(ctx, "error", {
364
+ iteration,
365
+ error: `${errorMsg} (retry ${retryCount}/${MAX_RETRIES} in 15s)`,
366
+ });
367
+ // Exponential backoff: 15s, 30s, 60s
368
+ const retryDelay = 15 * Math.pow(2, retryCount - 1);
369
+ ctx.traceInfo(`[orch] retrying in ${retryDelay}s`);
370
+ if (blobEnabled) {
371
+ yield* dehydrateAndReset("error");
372
+ }
373
+ yield ctx.scheduleTimer(retryDelay * 1000);
374
+ yield ctx.continueAsNew(continueInput({
375
+ prompt,
376
+ retryCount,
377
+ needsHydration: blobEnabled ? true : needsHydration,
378
+ }));
379
+ return "";
380
+ }
381
+ // Successful activity — reset retry counter
382
+ retryCount = 0;
383
+ const result = typeof turnResult === "string"
384
+ ? JSON.parse(turnResult) : turnResult;
385
+ iteration++;
386
+ // Strip events from result before putting in customStatus (events go to CMS, not status)
387
+ const { events: _events, ...statusResult } = result;
388
+ // ── Summarize title if due ──────────────────────────
389
+ yield* maybeSummarize();
390
+ // ④ HANDLE RESULT
391
+ switch (result.type) {
392
+ case "completed":
393
+ ctx.traceInfo(`[response] ${result.content}`);
394
+ // If this is a child orchestration, notify the parent about our completion
395
+ // via the SDK — sends to the parent's "messages" queue like any other message.
396
+ if (parentSessionId) {
397
+ try {
398
+ yield manager.sendToSession(parentSessionId, `[CHILD_UPDATE from=${input.sessionId} type=completed iter=${iteration}]\n${result.content.slice(0, 2000)}`);
399
+ }
400
+ catch (err) {
401
+ ctx.traceInfo(`[orch] sendToSession(parent) failed: ${err.message} (non-fatal)`);
402
+ }
403
+ // Sub-agents auto-terminate after completing their task and notifying
404
+ // the parent. Without this, they sit in the idle loop forever (idleTimeout=-1)
405
+ // and accumulate as zombie orchestrations.
406
+ ctx.traceInfo(`[orch] sub-agent completed task, auto-terminating`);
407
+ try {
408
+ yield session.destroy();
409
+ }
410
+ catch { }
411
+ setStatus(ctx, "completed", { iteration, turnResult: statusResult });
412
+ return "done";
413
+ }
414
+ if (!blobEnabled || idleTimeout < 0) {
415
+ // Store the result so the dequeue-idle setStatus includes it
416
+ lastTurnResult = statusResult;
417
+ // Checkpoint while idle (no dehydration path)
418
+ yield* maybeCheckpoint();
419
+ continue;
420
+ }
421
+ // Race: next message vs idle timeout
422
+ {
423
+ setStatus(ctx, "idle", { iteration, turnResult: statusResult });
424
+ yield* maybeCheckpoint();
425
+ const nextMsg = ctx.dequeueEvent("messages");
426
+ const idleTimer = ctx.scheduleTimer(idleTimeout * 1000);
427
+ const raceResult = yield ctx.race(nextMsg, idleTimer);
428
+ if (raceResult.index === 0) {
429
+ ctx.traceInfo("[session] user responded within idle window");
430
+ const raceMsg = typeof raceResult.value === "string"
431
+ ? JSON.parse(raceResult.value) : (raceResult.value ?? {});
432
+ if (raceMsg.prompt) {
433
+ yield ctx.continueAsNew(continueInput({ prompt: raceMsg.prompt }));
434
+ }
435
+ else {
436
+ yield ctx.continueAsNew(continueInput());
437
+ }
438
+ return "";
439
+ }
440
+ // Idle timeout → dehydrate. Next message will need resume context.
441
+ ctx.traceInfo("[session] idle timeout, dehydrating");
442
+ yield* dehydrateAndReset("idle");
443
+ // Don't continueAsNew with a prompt — wait for the next user message,
444
+ // which will be wrapped with resume context because needsHydration=true.
445
+ yield ctx.continueAsNew(continueInput());
446
+ return "";
447
+ }
448
+ case "wait":
449
+ // Capture original user prompt as task context for recurring tasks.
450
+ // This ensures the LLM remembers its task even after conversation truncation.
451
+ if (!taskContext) {
452
+ taskContext = prompt.slice(0, 2000);
453
+ const base = typeof baseSystemMessage === 'string'
454
+ ? baseSystemMessage ?? ''
455
+ : baseSystemMessage?.content ?? '';
456
+ config.systemMessage = base + (base ? '\n\n' : '') +
457
+ '[RECURRING TASK]\n' +
458
+ 'Original user request (always remember, even if conversation history is truncated):\n"' +
459
+ taskContext + '"';
460
+ }
461
+ if (result.content) {
462
+ setStatus(ctx, "running", { iteration, intermediateContent: result.content });
463
+ ctx.traceInfo(`[orch] intermediate: ${result.content.slice(0, 80)}`);
464
+ }
465
+ // If this is a child orchestration, notify the parent on every wait cycle
466
+ // via the SDK — sends a message to the parent's "messages" queue.
467
+ if (parentSessionId) {
468
+ try {
469
+ const notifyContent = result.content
470
+ ? result.content.slice(0, 2000)
471
+ : `[wait: ${result.reason} (${result.seconds}s)]`;
472
+ yield manager.sendToSession(parentSessionId, `[CHILD_UPDATE from=${input.sessionId} type=wait iter=${iteration}]\n${notifyContent}`);
473
+ }
474
+ catch (err) {
475
+ ctx.traceInfo(`[orch] sendToSession(parent) wait failed: ${err.message} (non-fatal)`);
476
+ }
477
+ }
478
+ ctx.traceInfo(`[orch] durable timer: ${result.seconds}s (${result.reason})`);
479
+ {
480
+ const shouldDehydrate = blobEnabled && result.seconds > dehydrateThreshold;
481
+ if (shouldDehydrate) {
482
+ yield* dehydrateAndReset("timer");
483
+ }
484
+ const waitStartedAt = yield ctx.utcNow();
485
+ setStatus(ctx, "waiting", {
486
+ iteration,
487
+ waitSeconds: result.seconds,
488
+ waitReason: result.reason,
489
+ waitStartedAt,
490
+ ...(result.content ? { turnResult: { type: "completed", content: result.content } } : {}),
491
+ });
492
+ // Checkpoint before the blocking wait
493
+ if (!shouldDehydrate)
494
+ yield* maybeCheckpoint();
495
+ const timerTask = ctx.scheduleTimer(result.seconds * 1000);
496
+ const interruptMsg = ctx.dequeueEvent("messages");
497
+ const timerRace = yield ctx.race(timerTask, interruptMsg);
498
+ if (timerRace.index === 1) {
499
+ const interruptData = typeof timerRace.value === "string"
500
+ ? JSON.parse(timerRace.value) : (timerRace.value ?? {});
501
+ ctx.traceInfo(`[session] wait interrupted: "${(interruptData.prompt || "").slice(0, 60)}"`);
502
+ // Calculate remaining time for resume context
503
+ const interruptedAt = yield ctx.utcNow();
504
+ const elapsedSec = Math.round((interruptedAt - waitStartedAt) / 1000);
505
+ const remainingSec = Math.max(0, result.seconds - elapsedSec);
506
+ const userPrompt = interruptData.prompt || "";
507
+ let finalPrompt;
508
+ if (shouldDehydrate && userPrompt) {
509
+ finalPrompt = wrapWithResumeContext(userPrompt, `Your timer was interrupted by a USER MESSAGE. You MUST respond to the user's message below before doing anything else. ` +
510
+ `Timer context: ${result.seconds}s timer (reason: "${result.reason}"), ` +
511
+ `${elapsedSec}s elapsed, ${remainingSec}s remain. ` +
512
+ `After fully addressing the user's message, resume the wait for the remaining ${remainingSec} seconds.`);
513
+ }
514
+ else if (userPrompt) {
515
+ // Not dehydrated but still interrupted — give timing context
516
+ finalPrompt = `${userPrompt}\n\n` +
517
+ `[SYSTEM: IMPORTANT — The above is a USER MESSAGE that interrupted your ${result.seconds}s timer (reason: "${result.reason}"). ` +
518
+ `You MUST respond to the user's message FIRST. ${elapsedSec}s elapsed, ${remainingSec}s remain. ` +
519
+ `After fully answering the user, resume the wait for the remaining ${remainingSec} seconds.]`;
520
+ }
521
+ else {
522
+ finalPrompt = userPrompt;
523
+ }
524
+ yield ctx.continueAsNew(continueInput({
525
+ prompt: finalPrompt,
526
+ needsHydration: shouldDehydrate ? true : needsHydration,
527
+ }));
528
+ return "";
529
+ }
530
+ const timerPrompt = `The ${result.seconds} second wait is now complete. Continue with your task.`;
531
+ yield ctx.continueAsNew(continueInput({
532
+ prompt: timerPrompt,
533
+ needsHydration: shouldDehydrate ? true : needsHydration,
534
+ }));
535
+ return "";
536
+ }
537
+ case "input_required":
538
+ ctx.traceInfo(`[orch] waiting for user input: ${result.question}`);
539
+ if (!blobEnabled || inputGracePeriod < 0) {
540
+ setStatus(ctx, "input_required", {
541
+ iteration,
542
+ turnResult: statusResult,
543
+ pendingQuestion: result.question,
544
+ choices: result.choices,
545
+ allowFreeform: result.allowFreeform,
546
+ });
547
+ yield* maybeCheckpoint();
548
+ const answerMsg = yield ctx.dequeueEvent("messages");
549
+ const answerData = typeof answerMsg === "string"
550
+ ? JSON.parse(answerMsg) : answerMsg;
551
+ yield ctx.continueAsNew(continueInput({
552
+ prompt: `The user was asked: "${result.question}"\nThe user responded: "${answerData.answer}"`,
553
+ needsHydration: false,
554
+ }));
555
+ return "";
556
+ }
557
+ if (inputGracePeriod === 0) {
558
+ setStatus(ctx, "input_required", {
559
+ iteration,
560
+ turnResult: statusResult,
561
+ pendingQuestion: result.question,
562
+ });
563
+ yield* dehydrateAndReset("input_required");
564
+ const answerMsg = yield ctx.dequeueEvent("messages");
565
+ const answerData = typeof answerMsg === "string"
566
+ ? JSON.parse(answerMsg) : answerMsg;
567
+ yield ctx.continueAsNew(continueInput({
568
+ prompt: `The user was asked: "${result.question}"\nThe user responded: "${answerData.answer}"`,
569
+ }));
570
+ return "";
571
+ }
572
+ // Race: user answer vs grace period
573
+ {
574
+ setStatus(ctx, "input_required", {
575
+ iteration,
576
+ turnResult: statusResult,
577
+ pendingQuestion: result.question,
578
+ choices: result.choices,
579
+ allowFreeform: result.allowFreeform,
580
+ });
581
+ const answerEvt = ctx.dequeueEvent("messages");
582
+ const graceTimer = ctx.scheduleTimer(inputGracePeriod * 1000);
583
+ const raceResult = yield ctx.race(answerEvt, graceTimer);
584
+ if (raceResult.index === 0) {
585
+ const answerData = typeof raceResult.value === "string"
586
+ ? JSON.parse(raceResult.value) : (raceResult.value ?? {});
587
+ yield ctx.continueAsNew(continueInput({
588
+ prompt: `The user was asked: "${result.question}"\nThe user responded: "${answerData.answer}"`,
589
+ needsHydration: false,
590
+ }));
591
+ return "";
592
+ }
593
+ yield* dehydrateAndReset("input_required");
594
+ const answerMsg = yield ctx.dequeueEvent("messages");
595
+ const answerData = typeof answerMsg === "string"
596
+ ? JSON.parse(answerMsg) : answerMsg;
597
+ yield ctx.continueAsNew(continueInput({
598
+ prompt: `The user was asked: "${result.question}"\nThe user responded: "${answerData.answer}"`,
599
+ }));
600
+ return "";
601
+ }
602
+ case "cancelled":
603
+ ctx.traceInfo("[session] turn cancelled");
604
+ continue;
605
+ // ─── Sub-Agent Result Handlers ───────────────────
606
+ case "spawn_agent": {
607
+ // Enforce nesting depth limit
608
+ const childNestingLevel = nestingLevel + 1;
609
+ if (childNestingLevel > MAX_NESTING_LEVEL) {
610
+ ctx.traceInfo(`[orch] spawn_agent denied: nesting level ${nestingLevel} is at max (${MAX_NESTING_LEVEL})`);
611
+ yield ctx.continueAsNew(continueInput({
612
+ prompt: `[SYSTEM: spawn_agent failed — you are already at nesting level ${nestingLevel} (max ${MAX_NESTING_LEVEL}). ` +
613
+ `Sub-agents at this depth cannot spawn further sub-agents. Handle the task directly instead.]`,
614
+ }));
615
+ return "";
616
+ }
617
+ // Enforce max sub-agents
618
+ const activeCount = subAgents.filter(a => a.status === "running").length;
619
+ if (activeCount >= MAX_SUB_AGENTS) {
620
+ ctx.traceInfo(`[orch] spawn_agent denied: ${activeCount}/${MAX_SUB_AGENTS} agents running`);
621
+ yield ctx.continueAsNew(continueInput({
622
+ prompt: `[SYSTEM: spawn_agent failed — you already have ${activeCount} running sub-agents (max ${MAX_SUB_AGENTS}). ` +
623
+ `Wait for some to complete before spawning more.]`,
624
+ }));
625
+ return "";
626
+ }
627
+ ctx.traceInfo(`[orch] spawning sub-agent via SDK: task="${result.task.slice(0, 80)}" model=${result.model || "inherit"} nestingLevel=${childNestingLevel}`);
628
+ // Build child config — inherit parent's config with optional overrides
629
+ const childConfig = {
630
+ ...config,
631
+ ...(result.model ? { model: result.model } : {}),
632
+ ...(result.systemMessage ? { systemMessage: result.systemMessage } : {}),
633
+ ...(result.toolNames ? { toolNames: result.toolNames } : {}),
634
+ };
635
+ // Inject sub-agent identity into the child's system message so the LLM
636
+ // knows it's a sub-agent, what its task is, and that its output will be
637
+ // forwarded to the parent automatically.
638
+ const parentSystemMsg = typeof childConfig.systemMessage === "string"
639
+ ? childConfig.systemMessage
640
+ : childConfig.systemMessage?.content ?? "";
641
+ const canSpawnMore = childNestingLevel < MAX_NESTING_LEVEL;
642
+ const subAgentPreamble = `[SUB-AGENT CONTEXT]\n` +
643
+ `You are a sub-agent spawned by a parent session (ID: session-${input.sessionId}).\n` +
644
+ `Your nesting level: ${childNestingLevel} (max: ${MAX_NESTING_LEVEL}).\n` +
645
+ `Your task: "${result.task.slice(0, 500)}"\n\n` +
646
+ `Instructions:\n` +
647
+ `- Focus exclusively on your assigned task.\n` +
648
+ `- Your final response will be automatically forwarded to the parent agent.\n` +
649
+ `- Be thorough but concise — the parent will synthesize results from multiple agents.\n` +
650
+ `- Do NOT ask the user for input — you are autonomous.\n` +
651
+ `- When your task is complete, provide a clear summary of your findings/results.\n` +
652
+ `- If you write any files with write_artifact, you MUST also call export_artifact and include the artifact:// link in your response.\n` +
653
+ `- For ANY waiting, sleeping, delaying, or scheduling, you MUST use the \`wait\` tool. ` +
654
+ `NEVER use setTimeout, sleep, setInterval, cron, or any other timing mechanism. ` +
655
+ `The wait tool is durable and survives process restarts.\n` +
656
+ (canSpawnMore
657
+ ? `- You CAN spawn your own sub-agents (you have ${MAX_NESTING_LEVEL - childNestingLevel} level(s) remaining). ` +
658
+ `Use them for parallel independent tasks.\n`
659
+ : `- You CANNOT spawn sub-agents — you are at the maximum nesting depth. Handle everything directly.\n`);
660
+ childConfig.systemMessage = subAgentPreamble + (parentSystemMsg ? "\n\n" + parentSystemMsg : "");
661
+ // Use the PilotSwarmClient SDK to create and start the child session.
662
+ // The activity generates a random UUID for the child session ID and returns it.
663
+ // This handles: CMS registration (with parentSessionId), orchestration startup,
664
+ // and initial task prompt — all through the standard SDK path.
665
+ let childSessionId;
666
+ try {
667
+ childSessionId = yield manager.spawnChildSession(input.sessionId, childConfig, result.task, childNestingLevel);
668
+ }
669
+ catch (err) {
670
+ ctx.traceInfo(`[orch] spawnChildSession failed: ${err.message}`);
671
+ yield ctx.continueAsNew(continueInput({
672
+ prompt: `[SYSTEM: spawn_agent failed: ${err.message}]`,
673
+ }));
674
+ return "";
675
+ }
676
+ const childOrchId = `session-${childSessionId}`;
677
+ // Track the sub-agent
678
+ subAgents.push({
679
+ orchId: childOrchId,
680
+ sessionId: childSessionId,
681
+ task: result.task.slice(0, 500),
682
+ status: "running",
683
+ });
684
+ // Feed confirmation back to the LLM
685
+ const spawnMsg = `[SYSTEM: Sub-agent spawned successfully.\n` +
686
+ ` Agent ID: ${childOrchId}\n` +
687
+ ` Task: "${result.task.slice(0, 200)}"\n` +
688
+ ` The agent is now running autonomously. Use check_agents to monitor progress, ` +
689
+ `message_agent to send instructions. To wait for completion, use wait + check_agents ` +
690
+ `in a loop (choose an appropriate interval) so you can report progress to the user.]`;
691
+ yield ctx.continueAsNew(continueInput({ prompt: spawnMsg }));
692
+ return "";
693
+ }
694
+ case "message_agent": {
695
+ const targetOrchId = result.agentId;
696
+ const agentEntry = subAgents.find(a => a.orchId === targetOrchId);
697
+ if (!agentEntry) {
698
+ ctx.traceInfo(`[orch] message_agent: unknown agent ${targetOrchId}`);
699
+ yield ctx.continueAsNew(continueInput({
700
+ prompt: `[SYSTEM: message_agent failed — agent "${targetOrchId}" not found. ` +
701
+ `Known agents: ${subAgents.map(a => a.orchId).join(", ") || "none"}]`,
702
+ }));
703
+ return "";
704
+ }
705
+ ctx.traceInfo(`[orch] message_agent via SDK: ${agentEntry.sessionId} msg="${result.message.slice(0, 60)}"`);
706
+ try {
707
+ yield manager.sendToSession(agentEntry.sessionId, result.message);
708
+ }
709
+ catch (err) {
710
+ ctx.traceInfo(`[orch] message_agent failed: ${err.message}`);
711
+ yield ctx.continueAsNew(continueInput({
712
+ prompt: `[SYSTEM: message_agent failed: ${err.message}]`,
713
+ }));
714
+ return "";
715
+ }
716
+ yield ctx.continueAsNew(continueInput({
717
+ prompt: `[SYSTEM: Message sent to sub-agent ${targetOrchId}: "${result.message.slice(0, 200)}"]`,
718
+ }));
719
+ return "";
720
+ }
721
+ case "check_agents": {
722
+ ctx.traceInfo(`[orch] check_agents: ${subAgents.length} agents tracked`);
723
+ if (subAgents.length === 0) {
724
+ yield ctx.continueAsNew(continueInput({
725
+ prompt: `[SYSTEM: No sub-agents have been spawned yet.]`,
726
+ }));
727
+ return "";
728
+ }
729
+ // Get fresh status for each agent via the SDK
730
+ const statusLines = [];
731
+ for (const agent of subAgents) {
732
+ try {
733
+ const rawStatus = yield manager.getSessionStatus(agent.sessionId);
734
+ const parsed = JSON.parse(rawStatus);
735
+ // Update local tracking
736
+ // Sub-agents go "idle" when their turn completes
737
+ if (parsed.status === "completed" || parsed.status === "failed" || parsed.status === "idle") {
738
+ agent.status = parsed.status === "failed" ? "failed" : "completed";
739
+ if (parsed.result)
740
+ agent.result = parsed.result.slice(0, 1000);
741
+ }
742
+ statusLines.push(` - Agent ${agent.orchId}\n` +
743
+ ` Task: "${agent.task.slice(0, 120)}"\n` +
744
+ ` Status: ${parsed.status}\n` +
745
+ ` Iterations: ${parsed.iterations ?? 0}\n` +
746
+ ` Output: ${parsed.result ?? "(no output yet)"}`);
747
+ }
748
+ catch (err) {
749
+ statusLines.push(` - Agent ${agent.orchId}\n` +
750
+ ` Task: "${agent.task.slice(0, 120)}"\n` +
751
+ ` Status: unknown (error: ${err.message})`);
752
+ }
753
+ }
754
+ yield ctx.continueAsNew(continueInput({
755
+ prompt: `[SYSTEM: Sub-agent status report (${subAgents.length} agents):\n${statusLines.join("\n")}]`,
756
+ }));
757
+ return "";
758
+ }
759
+ case "list_sessions": {
760
+ ctx.traceInfo(`[orch] list_sessions`);
761
+ const rawSessions = yield manager.listSessions();
762
+ const sessions = JSON.parse(rawSessions);
763
+ const lines = sessions.map((s) => ` - ${s.sessionId}${s.sessionId === input.sessionId ? " (this session)" : ""}\n` +
764
+ ` Title: ${s.title ?? "(untitled)"}\n` +
765
+ ` Status: ${s.status}, Iterations: ${s.iterations ?? 0}\n` +
766
+ ` Parent: ${s.parentSessionId ?? "none"}`);
767
+ yield ctx.continueAsNew(continueInput({
768
+ prompt: `[SYSTEM: Active sessions (${sessions.length}):\n${lines.join("\n")}]`,
769
+ }));
770
+ return "";
771
+ }
772
+ case "wait_for_agents": {
773
+ let targetIds = result.agentIds;
774
+ // If empty, wait for all running agents
775
+ if (!targetIds || targetIds.length === 0) {
776
+ targetIds = subAgents.filter(a => a.status === "running").map(a => a.orchId);
777
+ }
778
+ if (targetIds.length === 0) {
779
+ ctx.traceInfo(`[orch] wait_for_agents: no running agents to wait for`);
780
+ yield ctx.continueAsNew(continueInput({
781
+ prompt: `[SYSTEM: No running sub-agents to wait for. All agents have already completed.]`,
782
+ }));
783
+ return "";
784
+ }
785
+ ctx.traceInfo(`[orch] wait_for_agents: waiting for ${targetIds.length} agents`);
786
+ setStatus(ctx, "running", {
787
+ iteration,
788
+ waitingForAgents: targetIds,
789
+ });
790
+ // Event-driven wait: children send updates to the parent's "messages"
791
+ // queue via sendToSession. We race messages vs a fallback poll timer.
792
+ // Child updates arrive as "[CHILD_UPDATE from=... type=...]" messages.
793
+ const POLL_INTERVAL_MS = 30_000; // 30s fallback poll (event-driven, so rarely needed)
794
+ const MAX_WAIT_ITERATIONS = 360;
795
+ for (let waitIter = 0; waitIter < MAX_WAIT_ITERATIONS; waitIter++) {
796
+ // Check if all targets are done (from local tracking)
797
+ const stillRunning = targetIds.filter(id => {
798
+ const agent = subAgents.find(a => a.orchId === id);
799
+ return agent && agent.status === "running";
800
+ });
801
+ if (stillRunning.length === 0)
802
+ break;
803
+ // Race: message (child update or user) vs fallback poll timer
804
+ const msg = ctx.dequeueEvent("messages");
805
+ const pollTimer = ctx.scheduleTimer(POLL_INTERVAL_MS);
806
+ const waitRace = yield ctx.race(msg, pollTimer);
807
+ if (waitRace.index === 0) {
808
+ // Message arrived — could be a child update or a user message
809
+ const msgData = typeof waitRace.value === "string"
810
+ ? JSON.parse(waitRace.value) : (waitRace.value ?? {});
811
+ // Check if it's a child update (sent by sendToSession from child orch)
812
+ const childUpdateMatch = typeof msgData.prompt === "string"
813
+ && msgData.prompt.match(/^\[CHILD_UPDATE from=(\S+) type=(\S+)/);
814
+ if (childUpdateMatch) {
815
+ const childSessionId = childUpdateMatch[1];
816
+ const updateType = childUpdateMatch[2].replace(/\]$/, "");
817
+ const content = msgData.prompt.split("\n").slice(1).join("\n").trim();
818
+ ctx.traceInfo(`[orch] wait_for_agents: child update from=${childSessionId} type=${updateType}`);
819
+ const agent = subAgents.find(a => a.sessionId === childSessionId);
820
+ if (agent) {
821
+ if (content)
822
+ agent.result = content.slice(0, 2000);
823
+ // Check via SDK if done (the update type alone isn't authoritative
824
+ // since "completed" means turn completed, not necessarily finished)
825
+ try {
826
+ const rawStatus = yield manager.getSessionStatus(agent.sessionId);
827
+ const parsed = JSON.parse(rawStatus);
828
+ // Sub-agents go "idle" when their turn completes (they have no user to wait for)
829
+ if (parsed.status === "completed" || parsed.status === "failed" || parsed.status === "idle") {
830
+ agent.status = parsed.status === "failed" ? "failed" : "completed";
831
+ if (parsed.result)
832
+ agent.result = parsed.result.slice(0, 2000);
833
+ }
834
+ }
835
+ catch { }
836
+ }
837
+ continue;
838
+ }
839
+ // Not a child update — it's a user message interrupting the wait
840
+ if (msgData.prompt) {
841
+ ctx.traceInfo(`[orch] wait_for_agents interrupted by user: "${msgData.prompt.slice(0, 60)}"`);
842
+ yield ctx.continueAsNew(continueInput({
843
+ prompt: msgData.prompt,
844
+ }));
845
+ return "";
846
+ }
847
+ }
848
+ else {
849
+ // Timer fired — fallback poll via SDK for any agents we missed
850
+ ctx.traceInfo(`[orch] wait_for_agents: fallback poll, checking ${stillRunning.length} agents`);
851
+ for (const targetId of stillRunning) {
852
+ const agent = subAgents.find(a => a.orchId === targetId);
853
+ if (!agent || agent.status !== "running")
854
+ continue;
855
+ try {
856
+ const rawStatus = yield manager.getSessionStatus(agent.sessionId);
857
+ const parsed = JSON.parse(rawStatus);
858
+ // Sub-agents go "idle" when their turn completes
859
+ if (parsed.status === "completed" || parsed.status === "failed" || parsed.status === "idle") {
860
+ agent.status = parsed.status === "failed" ? "failed" : "completed";
861
+ if (parsed.result)
862
+ agent.result = parsed.result.slice(0, 2000);
863
+ }
864
+ }
865
+ catch { }
866
+ }
867
+ }
868
+ }
869
+ // Build results summary
870
+ const resultLines = [];
871
+ for (const targetId of targetIds) {
872
+ const agent = subAgents.find(a => a.orchId === targetId);
873
+ if (!agent)
874
+ continue;
875
+ resultLines.push(` - Agent ${agent.orchId}\n` +
876
+ ` Task: "${agent.task.slice(0, 120)}"\n` +
877
+ ` Status: ${agent.status}\n` +
878
+ ` Result: ${agent.result ?? "(no result)"}`);
879
+ }
880
+ yield ctx.continueAsNew(continueInput({
881
+ prompt: `[SYSTEM: Sub-agents completed:\n${resultLines.join("\n")}]`,
882
+ }));
883
+ return "";
884
+ }
885
+ case "complete_agent": {
886
+ const targetOrchId = result.agentId;
887
+ const agentEntry = subAgents.find(a => a.orchId === targetOrchId);
888
+ if (!agentEntry) {
889
+ ctx.traceInfo(`[orch] complete_agent: unknown agent ${targetOrchId}`);
890
+ yield ctx.continueAsNew(continueInput({
891
+ prompt: `[SYSTEM: complete_agent failed — agent "${targetOrchId}" not found. ` +
892
+ `Known agents: ${subAgents.map(a => a.orchId).join(", ") || "none"}]`,
893
+ }));
894
+ return "";
895
+ }
896
+ ctx.traceInfo(`[orch] complete_agent: sending /done to ${agentEntry.sessionId}`);
897
+ try {
898
+ // Send a /done command to the child's orchestration
899
+ const cmdId = `done-${iteration}`;
900
+ yield manager.sendCommandToSession(agentEntry.sessionId, { type: "cmd", cmd: "done", id: cmdId, args: { reason: "Completed by parent" } });
901
+ agentEntry.status = "completed";
902
+ }
903
+ catch (err) {
904
+ ctx.traceInfo(`[orch] complete_agent failed: ${err.message}`);
905
+ yield ctx.continueAsNew(continueInput({
906
+ prompt: `[SYSTEM: complete_agent failed: ${err.message}]`,
907
+ }));
908
+ return "";
909
+ }
910
+ yield ctx.continueAsNew(continueInput({
911
+ prompt: `[SYSTEM: Sub-agent ${targetOrchId} has been completed gracefully.]`,
912
+ }));
913
+ return "";
914
+ }
915
+ case "cancel_agent": {
916
+ const targetOrchId = result.agentId;
917
+ const agentEntry = subAgents.find(a => a.orchId === targetOrchId);
918
+ if (!agentEntry) {
919
+ ctx.traceInfo(`[orch] cancel_agent: unknown agent ${targetOrchId}`);
920
+ yield ctx.continueAsNew(continueInput({
921
+ prompt: `[SYSTEM: cancel_agent failed — agent "${targetOrchId}" not found. ` +
922
+ `Known agents: ${subAgents.map(a => a.orchId).join(", ") || "none"}]`,
923
+ }));
924
+ return "";
925
+ }
926
+ const cancelReason = result.reason ?? "Cancelled by parent";
927
+ ctx.traceInfo(`[orch] cancel_agent: cancelling ${agentEntry.sessionId} reason="${cancelReason}"`);
928
+ try {
929
+ // Cascade: cancel all descendants of the target agent first
930
+ const descendants = yield manager.getDescendantSessionIds(agentEntry.sessionId);
931
+ if (descendants.length > 0) {
932
+ ctx.traceInfo(`[orch] cancel_agent: cascading cancel to ${descendants.length} descendant(s)`);
933
+ for (const descId of descendants) {
934
+ try {
935
+ yield manager.cancelSession(descId, `Ancestor ${agentEntry.sessionId} cancelled: ${cancelReason}`);
936
+ }
937
+ catch (err) {
938
+ ctx.traceInfo(`[orch] cancel_agent: failed to cancel descendant ${descId}: ${err.message} (non-fatal)`);
939
+ }
940
+ }
941
+ }
942
+ yield manager.cancelSession(agentEntry.sessionId, cancelReason);
943
+ agentEntry.status = "cancelled";
944
+ }
945
+ catch (err) {
946
+ ctx.traceInfo(`[orch] cancel_agent failed: ${err.message}`);
947
+ yield ctx.continueAsNew(continueInput({
948
+ prompt: `[SYSTEM: cancel_agent failed: ${err.message}]`,
949
+ }));
950
+ return "";
951
+ }
952
+ yield ctx.continueAsNew(continueInput({
953
+ prompt: `[SYSTEM: Sub-agent ${targetOrchId} has been cancelled.${result.reason ? ` Reason: ${result.reason}` : ""}]`,
954
+ }));
955
+ return "";
956
+ }
957
+ case "delete_agent": {
958
+ const targetOrchId = result.agentId;
959
+ const agentEntry = subAgents.find(a => a.orchId === targetOrchId);
960
+ if (!agentEntry) {
961
+ ctx.traceInfo(`[orch] delete_agent: unknown agent ${targetOrchId}`);
962
+ yield ctx.continueAsNew(continueInput({
963
+ prompt: `[SYSTEM: delete_agent failed — agent "${targetOrchId}" not found. ` +
964
+ `Known agents: ${subAgents.map(a => a.orchId).join(", ") || "none"}]`,
965
+ }));
966
+ return "";
967
+ }
968
+ const deleteReason = result.reason ?? "Deleted by parent";
969
+ ctx.traceInfo(`[orch] delete_agent: deleting ${agentEntry.sessionId} reason="${deleteReason}"`);
970
+ try {
971
+ // Cascade: delete all descendants of the target agent first
972
+ const descendants = yield manager.getDescendantSessionIds(agentEntry.sessionId);
973
+ if (descendants.length > 0) {
974
+ ctx.traceInfo(`[orch] delete_agent: cascading delete to ${descendants.length} descendant(s)`);
975
+ for (const descId of descendants) {
976
+ try {
977
+ yield manager.deleteSession(descId, `Ancestor ${agentEntry.sessionId} deleted: ${deleteReason}`);
978
+ }
979
+ catch (err) {
980
+ ctx.traceInfo(`[orch] delete_agent: failed to delete descendant ${descId}: ${err.message} (non-fatal)`);
981
+ }
982
+ }
983
+ }
984
+ yield manager.deleteSession(agentEntry.sessionId, deleteReason);
985
+ // Remove from subAgents tracking entirely
986
+ subAgents = subAgents.filter(a => a.orchId !== targetOrchId);
987
+ }
988
+ catch (err) {
989
+ ctx.traceInfo(`[orch] delete_agent failed: ${err.message}`);
990
+ yield ctx.continueAsNew(continueInput({
991
+ prompt: `[SYSTEM: delete_agent failed: ${err.message}]`,
992
+ }));
993
+ return "";
994
+ }
995
+ yield ctx.continueAsNew(continueInput({
996
+ prompt: `[SYSTEM: Sub-agent ${targetOrchId} has been deleted.${result.reason ? ` Reason: ${result.reason}` : ""}]`,
997
+ }));
998
+ return "";
999
+ }
1000
+ case "error": {
1001
+ // Treat like an activity failure — retry with backoff.
1002
+ retryCount++;
1003
+ ctx.traceInfo(`[orch] turn returned error (attempt ${retryCount}/${MAX_RETRIES}): ${result.message}`);
1004
+ if (retryCount >= MAX_RETRIES) {
1005
+ ctx.traceInfo(`[orch] max retries exhausted for turn error, waiting for user input`);
1006
+ setStatus(ctx, "error", {
1007
+ iteration,
1008
+ error: `Failed after ${MAX_RETRIES} attempts: ${result.message}`,
1009
+ retriesExhausted: true,
1010
+ });
1011
+ retryCount = 0;
1012
+ continue;
1013
+ }
1014
+ setStatus(ctx, "error", {
1015
+ iteration,
1016
+ error: `${result.message} (retry ${retryCount}/${MAX_RETRIES})`,
1017
+ });
1018
+ const errorRetryDelay = 15 * Math.pow(2, retryCount - 1);
1019
+ ctx.traceInfo(`[orch] retrying in ${errorRetryDelay}s after turn error`);
1020
+ if (blobEnabled) {
1021
+ yield* dehydrateAndReset("error");
1022
+ }
1023
+ yield ctx.scheduleTimer(errorRetryDelay * 1000);
1024
+ yield ctx.continueAsNew(continueInput({
1025
+ prompt,
1026
+ retryCount,
1027
+ needsHydration: blobEnabled ? true : needsHydration,
1028
+ }));
1029
+ return "";
1030
+ }
1031
+ }
1032
+ }
1033
+ }
1034
+ //# sourceMappingURL=orchestration_1_0_6.js.map