pilotswarm-sdk 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (183) hide show
  1. package/dist/agent-loader.d.ts +61 -0
  2. package/dist/agent-loader.d.ts.map +1 -0
  3. package/dist/agent-loader.js +212 -0
  4. package/dist/agent-loader.js.map +1 -0
  5. package/dist/artifact-tools.d.ts +31 -0
  6. package/dist/artifact-tools.d.ts.map +1 -0
  7. package/dist/artifact-tools.js +190 -0
  8. package/dist/artifact-tools.js.map +1 -0
  9. package/dist/blob-store.d.ts +73 -0
  10. package/dist/blob-store.d.ts.map +1 -0
  11. package/dist/blob-store.js +220 -0
  12. package/dist/blob-store.js.map +1 -0
  13. package/dist/client.d.ts +159 -0
  14. package/dist/client.d.ts.map +1 -0
  15. package/dist/client.js +676 -0
  16. package/dist/client.js.map +1 -0
  17. package/dist/cms.d.ts +129 -0
  18. package/dist/cms.d.ts.map +1 -0
  19. package/dist/cms.js +313 -0
  20. package/dist/cms.js.map +1 -0
  21. package/dist/index.d.ts +44 -0
  22. package/dist/index.d.ts.map +1 -0
  23. package/dist/index.js +42 -0
  24. package/dist/index.js.map +1 -0
  25. package/dist/managed-session.d.ts +70 -0
  26. package/dist/managed-session.d.ts.map +1 -0
  27. package/dist/managed-session.js +717 -0
  28. package/dist/managed-session.js.map +1 -0
  29. package/dist/management-client.d.ts +171 -0
  30. package/dist/management-client.d.ts.map +1 -0
  31. package/dist/management-client.js +401 -0
  32. package/dist/management-client.js.map +1 -0
  33. package/dist/mcp-loader.d.ts +50 -0
  34. package/dist/mcp-loader.d.ts.map +1 -0
  35. package/dist/mcp-loader.js +83 -0
  36. package/dist/mcp-loader.js.map +1 -0
  37. package/dist/model-providers.d.ts +143 -0
  38. package/dist/model-providers.d.ts.map +1 -0
  39. package/dist/model-providers.js +228 -0
  40. package/dist/model-providers.js.map +1 -0
  41. package/dist/orchestration-registry.d.ts +7 -0
  42. package/dist/orchestration-registry.d.ts.map +1 -0
  43. package/dist/orchestration-registry.js +49 -0
  44. package/dist/orchestration-registry.js.map +1 -0
  45. package/dist/orchestration.d.ts +36 -0
  46. package/dist/orchestration.d.ts.map +1 -0
  47. package/dist/orchestration.js +1357 -0
  48. package/dist/orchestration.js.map +1 -0
  49. package/dist/orchestration_1_0_0.d.ts +20 -0
  50. package/dist/orchestration_1_0_0.d.ts.map +1 -0
  51. package/dist/orchestration_1_0_0.js +497 -0
  52. package/dist/orchestration_1_0_0.js.map +1 -0
  53. package/dist/orchestration_1_0_1.d.ts +19 -0
  54. package/dist/orchestration_1_0_1.d.ts.map +1 -0
  55. package/dist/orchestration_1_0_1.js +546 -0
  56. package/dist/orchestration_1_0_1.js.map +1 -0
  57. package/dist/orchestration_1_0_10.d.ts +36 -0
  58. package/dist/orchestration_1_0_10.d.ts.map +1 -0
  59. package/dist/orchestration_1_0_10.js +1253 -0
  60. package/dist/orchestration_1_0_10.js.map +1 -0
  61. package/dist/orchestration_1_0_11.d.ts +36 -0
  62. package/dist/orchestration_1_0_11.d.ts.map +1 -0
  63. package/dist/orchestration_1_0_11.js +1255 -0
  64. package/dist/orchestration_1_0_11.js.map +1 -0
  65. package/dist/orchestration_1_0_12.d.ts +36 -0
  66. package/dist/orchestration_1_0_12.d.ts.map +1 -0
  67. package/dist/orchestration_1_0_12.js +1250 -0
  68. package/dist/orchestration_1_0_12.js.map +1 -0
  69. package/dist/orchestration_1_0_13.d.ts +36 -0
  70. package/dist/orchestration_1_0_13.d.ts.map +1 -0
  71. package/dist/orchestration_1_0_13.js +1260 -0
  72. package/dist/orchestration_1_0_13.js.map +1 -0
  73. package/dist/orchestration_1_0_14.d.ts +36 -0
  74. package/dist/orchestration_1_0_14.d.ts.map +1 -0
  75. package/dist/orchestration_1_0_14.js +1258 -0
  76. package/dist/orchestration_1_0_14.js.map +1 -0
  77. package/dist/orchestration_1_0_15.d.ts +36 -0
  78. package/dist/orchestration_1_0_15.d.ts.map +1 -0
  79. package/dist/orchestration_1_0_15.js +1266 -0
  80. package/dist/orchestration_1_0_15.js.map +1 -0
  81. package/dist/orchestration_1_0_16.d.ts +36 -0
  82. package/dist/orchestration_1_0_16.d.ts.map +1 -0
  83. package/dist/orchestration_1_0_16.js +1275 -0
  84. package/dist/orchestration_1_0_16.js.map +1 -0
  85. package/dist/orchestration_1_0_17.d.ts +36 -0
  86. package/dist/orchestration_1_0_17.d.ts.map +1 -0
  87. package/dist/orchestration_1_0_17.js +1314 -0
  88. package/dist/orchestration_1_0_17.js.map +1 -0
  89. package/dist/orchestration_1_0_18.d.ts +36 -0
  90. package/dist/orchestration_1_0_18.d.ts.map +1 -0
  91. package/dist/orchestration_1_0_18.js +1328 -0
  92. package/dist/orchestration_1_0_18.js.map +1 -0
  93. package/dist/orchestration_1_0_19.d.ts +36 -0
  94. package/dist/orchestration_1_0_19.d.ts.map +1 -0
  95. package/dist/orchestration_1_0_19.js +1324 -0
  96. package/dist/orchestration_1_0_19.js.map +1 -0
  97. package/dist/orchestration_1_0_2.d.ts +19 -0
  98. package/dist/orchestration_1_0_2.d.ts.map +1 -0
  99. package/dist/orchestration_1_0_2.js +749 -0
  100. package/dist/orchestration_1_0_2.js.map +1 -0
  101. package/dist/orchestration_1_0_20.d.ts +36 -0
  102. package/dist/orchestration_1_0_20.d.ts.map +1 -0
  103. package/dist/orchestration_1_0_20.js +1347 -0
  104. package/dist/orchestration_1_0_20.js.map +1 -0
  105. package/dist/orchestration_1_0_3.d.ts +19 -0
  106. package/dist/orchestration_1_0_3.d.ts.map +1 -0
  107. package/dist/orchestration_1_0_3.js +826 -0
  108. package/dist/orchestration_1_0_3.js.map +1 -0
  109. package/dist/orchestration_1_0_4.d.ts +19 -0
  110. package/dist/orchestration_1_0_4.d.ts.map +1 -0
  111. package/dist/orchestration_1_0_4.js +1020 -0
  112. package/dist/orchestration_1_0_4.js.map +1 -0
  113. package/dist/orchestration_1_0_5.d.ts +19 -0
  114. package/dist/orchestration_1_0_5.d.ts.map +1 -0
  115. package/dist/orchestration_1_0_5.js +1027 -0
  116. package/dist/orchestration_1_0_5.js.map +1 -0
  117. package/dist/orchestration_1_0_6.d.ts +19 -0
  118. package/dist/orchestration_1_0_6.d.ts.map +1 -0
  119. package/dist/orchestration_1_0_6.js +1034 -0
  120. package/dist/orchestration_1_0_6.js.map +1 -0
  121. package/dist/orchestration_1_0_7.d.ts +19 -0
  122. package/dist/orchestration_1_0_7.d.ts.map +1 -0
  123. package/dist/orchestration_1_0_7.js +1085 -0
  124. package/dist/orchestration_1_0_7.js.map +1 -0
  125. package/dist/orchestration_1_0_8.d.ts +36 -0
  126. package/dist/orchestration_1_0_8.d.ts.map +1 -0
  127. package/dist/orchestration_1_0_8.js +1106 -0
  128. package/dist/orchestration_1_0_8.js.map +1 -0
  129. package/dist/orchestration_1_0_9.d.ts +36 -0
  130. package/dist/orchestration_1_0_9.d.ts.map +1 -0
  131. package/dist/orchestration_1_0_9.js +1207 -0
  132. package/dist/orchestration_1_0_9.js.map +1 -0
  133. package/dist/prompt-layering.d.ts +16 -0
  134. package/dist/prompt-layering.d.ts.map +1 -0
  135. package/dist/prompt-layering.js +60 -0
  136. package/dist/prompt-layering.js.map +1 -0
  137. package/dist/resourcemgr-tools.d.ts +27 -0
  138. package/dist/resourcemgr-tools.d.ts.map +1 -0
  139. package/dist/resourcemgr-tools.js +638 -0
  140. package/dist/resourcemgr-tools.js.map +1 -0
  141. package/dist/session-dumper.d.ts +26 -0
  142. package/dist/session-dumper.d.ts.map +1 -0
  143. package/dist/session-dumper.js +272 -0
  144. package/dist/session-dumper.js.map +1 -0
  145. package/dist/session-manager.d.ts +152 -0
  146. package/dist/session-manager.d.ts.map +1 -0
  147. package/dist/session-manager.js +493 -0
  148. package/dist/session-manager.js.map +1 -0
  149. package/dist/session-proxy.d.ts +68 -0
  150. package/dist/session-proxy.d.ts.map +1 -0
  151. package/dist/session-proxy.js +665 -0
  152. package/dist/session-proxy.js.map +1 -0
  153. package/dist/session-store.d.ts +35 -0
  154. package/dist/session-store.d.ts.map +1 -0
  155. package/dist/session-store.js +88 -0
  156. package/dist/session-store.js.map +1 -0
  157. package/dist/skills.d.ts +31 -0
  158. package/dist/skills.d.ts.map +1 -0
  159. package/dist/skills.js +93 -0
  160. package/dist/skills.js.map +1 -0
  161. package/dist/sweeper-tools.d.ts +28 -0
  162. package/dist/sweeper-tools.d.ts.map +1 -0
  163. package/dist/sweeper-tools.js +332 -0
  164. package/dist/sweeper-tools.js.map +1 -0
  165. package/dist/types.d.ts +498 -0
  166. package/dist/types.d.ts.map +1 -0
  167. package/dist/types.js +9 -0
  168. package/dist/types.js.map +1 -0
  169. package/dist/worker.d.ts +128 -0
  170. package/dist/worker.d.ts.map +1 -0
  171. package/dist/worker.js +562 -0
  172. package/dist/worker.js.map +1 -0
  173. package/package.json +74 -0
  174. package/plugins/mgmt/agents/pilotswarm.agent.md +59 -0
  175. package/plugins/mgmt/agents/resourcemgr.agent.md +111 -0
  176. package/plugins/mgmt/agents/sweeper.agent.md +67 -0
  177. package/plugins/mgmt/skills/resourcemgr/SKILL.md +41 -0
  178. package/plugins/mgmt/skills/resourcemgr/tools.json +1 -0
  179. package/plugins/mgmt/skills/sweeper/SKILL.md +44 -0
  180. package/plugins/mgmt/skills/sweeper/tools.json +1 -0
  181. package/plugins/system/agents/default.agent.md +58 -0
  182. package/plugins/system/skills/durable-timers/SKILL.md +39 -0
  183. package/plugins/system/skills/sub-agents/SKILL.md +75 -0
@@ -0,0 +1,1253 @@
1
+ import { RESPONSE_VERSION_KEY, COMMAND_VERSION_KEY, RESPONSE_LATEST_KEY, commandResponseKey, } from "./types.js";
2
+ import { createSessionProxy, createSessionManagerProxy } from "./session-proxy.js";
3
+ /**
4
+ * Set custom status as a JSON blob of session state.
5
+ * Clients read this via waitForStatusChange() or getStatus().
6
+ * @internal
7
+ */
8
+ function setStatus(ctx, status, extra) {
9
+ const signal = { status, ...(extra ?? {}) };
10
+ ctx.setCustomStatus(JSON.stringify(signal));
11
+ }
12
+ /**
13
+ * Long-lived durable session orchestration.
14
+ *
15
+ * One orchestration per copilot session. Uses:
16
+ * - SessionProxy for session-scoped operations (runTurn, dehydrate, hydrate, destroy)
17
+ * - SessionManagerProxy for global operations (listModels)
18
+ * - A single FIFO event queue ("messages") for all client→orchestration communication
19
+ *
20
+ * Main loop:
21
+ * 1. Dequeue message from "messages" queue
22
+ * 2. session.hydrate() if needed
23
+ * 3. session.runTurn(prompt) — returns TurnResult
24
+ * 4. Handle result: completed → idle wait, wait → timer, input → wait for answer
25
+ *
26
+ * @internal
27
+ */
28
+ export const CURRENT_ORCHESTRATION_VERSION = "1.0.10";
29
+ /**
30
+ * Long-lived durable session orchestration.
31
+ *
32
+ * One orchestration per copilot session. Uses:
33
+ * - SessionProxy for session-scoped operations (runTurn, dehydrate, hydrate, destroy)
34
+ * - SessionManagerProxy for global operations (listModels)
35
+ * - A single FIFO event queue ("messages") for all client→orchestration communication
36
+ *
37
+ * Main loop:
38
+ * 1. Dequeue message from "messages" queue
39
+ * 2. session.hydrate() if needed
40
+ * 3. session.runTurn(prompt) — returns TurnResult
41
+ * 4. Handle result: completed → idle wait, wait → timer, input → wait for answer
42
+ *
43
+ * @internal
44
+ */
45
+ export function* durableSessionOrchestration_1_0_10(ctx, input) {
46
+ const rawTraceInfo = typeof ctx.traceInfo === "function" ? ctx.traceInfo.bind(ctx) : null;
47
+ if (rawTraceInfo) {
48
+ ctx.traceInfo = (message) => rawTraceInfo(`[v1.0.10] ${message}`);
49
+ }
50
+ const dehydrateThreshold = input.dehydrateThreshold ?? 30;
51
+ const idleTimeout = input.idleTimeout ?? 30;
52
+ const inputGracePeriod = input.inputGracePeriod ?? 30;
53
+ const checkpointInterval = input.checkpointInterval ?? -1; // seconds, -1 = disabled
54
+ const rehydrationMessage = input.rehydrationMessage;
55
+ const blobEnabled = input.blobEnabled ?? false;
56
+ let needsHydration = input.needsHydration ?? false;
57
+ let affinityKey = input.affinityKey ?? input.sessionId;
58
+ let iteration = input.iteration ?? 0;
59
+ let config = { ...input.config };
60
+ let retryCount = input.retryCount ?? 0;
61
+ let taskContext = input.taskContext;
62
+ const baseSystemMessage = input.baseSystemMessage ?? config.systemMessage;
63
+ const isSystem = input.isSystem ?? false;
64
+ const MAX_RETRIES = 3;
65
+ const MAX_SUB_AGENTS = 20;
66
+ const MAX_NESTING_LEVEL = 2; // 0=root, 1=child, 2=grandchild — no deeper
67
+ // ─── Sub-agent tracking ──────────────────────────────────
68
+ let subAgents = input.subAgents ? [...input.subAgents] : [];
69
+ // parentSessionId: prefer new field, fall back to old parentOrchId for backward compat
70
+ const parentSessionId = input.parentSessionId
71
+ ?? (input.parentOrchId ? input.parentOrchId.replace(/^session-/, '') : undefined);
72
+ const nestingLevel = input.nestingLevel ?? 0;
73
+ // If we have a captured task context, inject it into the system message
74
+ // so it survives LLM conversation truncation (BasicTruncator never drops system messages).
75
+ if (taskContext) {
76
+ const base = typeof baseSystemMessage === 'string'
77
+ ? baseSystemMessage ?? ''
78
+ : baseSystemMessage?.content ?? '';
79
+ config.systemMessage = base + (base ? '\n\n' : '') +
80
+ '[RECURRING TASK]\n' +
81
+ 'Original user request (always remember, even if conversation history is truncated):\n"' +
82
+ taskContext + '"';
83
+ }
84
+ // ─── Title summarization timer ───────────────────────────
85
+ // First summarize at iteration 0 + 60s, then every 300s.
86
+ // We track the target timestamp (epoch ms) across continueAsNew.
87
+ // 0 means "schedule on first turn completion".
88
+ let nextSummarizeAt = input.nextSummarizeAt ?? 0;
89
+ // ─── Create proxies ──────────────────────────────────────
90
+ const manager = createSessionManagerProxy(ctx);
91
+ let session = createSessionProxy(ctx, input.sessionId, affinityKey, config);
92
+ function readCounter(key) {
93
+ const raw = ctx.getValue(key);
94
+ if (raw == null)
95
+ return 0;
96
+ const parsed = Number(raw);
97
+ return Number.isFinite(parsed) ? parsed : 0;
98
+ }
99
+ function writeJsonValue(key, value) {
100
+ ctx.setValue(key, JSON.stringify(value));
101
+ }
102
+ function bumpCounter(key) {
103
+ const next = readCounter(key) + 1;
104
+ ctx.setValue(key, String(next));
105
+ return next;
106
+ }
107
+ let lastResponseVersion = readCounter(RESPONSE_VERSION_KEY);
108
+ let lastCommandVersion = readCounter(COMMAND_VERSION_KEY);
109
+ let lastCommandId;
110
+ function publishStatus(status, extra = {}) {
111
+ const signal = {
112
+ iteration,
113
+ ...(lastResponseVersion > 0 ? { responseVersion: lastResponseVersion } : {}),
114
+ ...(lastCommandVersion > 0 ? { commandVersion: lastCommandVersion } : {}),
115
+ ...(lastCommandId ? { commandId: lastCommandId } : {}),
116
+ ...extra,
117
+ };
118
+ setStatus(ctx, status, signal);
119
+ }
120
+ function* writeLatestResponse(payload) {
121
+ const version = bumpCounter(RESPONSE_VERSION_KEY);
122
+ const emittedAt = yield ctx.utcNow();
123
+ const responsePayload = {
124
+ schemaVersion: 1,
125
+ version,
126
+ emittedAt,
127
+ ...payload,
128
+ };
129
+ writeJsonValue(RESPONSE_LATEST_KEY, responsePayload);
130
+ lastResponseVersion = version;
131
+ return responsePayload;
132
+ }
133
+ function* writeCommandResponse(response) {
134
+ const version = bumpCounter(COMMAND_VERSION_KEY);
135
+ const emittedAt = yield ctx.utcNow();
136
+ const payload = {
137
+ ...response,
138
+ schemaVersion: 1,
139
+ version,
140
+ emittedAt,
141
+ };
142
+ writeJsonValue(commandResponseKey(response.id), payload);
143
+ lastCommandVersion = version;
144
+ lastCommandId = response.id;
145
+ return payload;
146
+ }
147
+ // ─── Helper: wrap prompt with resume context after dehydration ──
148
+ function wrapWithResumeContext(userPrompt, extra) {
149
+ const base = rehydrationMessage ??
150
+ `The session was dehydrated and has been rehydrated on a new worker. ` +
151
+ `The LLM conversation history is preserved.`;
152
+ const parts = [userPrompt, ``, `[SYSTEM: ${base}`];
153
+ if (extra)
154
+ parts.push(extra);
155
+ parts.push(`]`);
156
+ return parts.join('\n');
157
+ }
158
+ // ─── Shared continueAsNew input builder ──────────────────
159
+ function continueInput(overrides = {}) {
160
+ return {
161
+ sessionId: input.sessionId,
162
+ config,
163
+ iteration,
164
+ affinityKey,
165
+ needsHydration,
166
+ blobEnabled,
167
+ dehydrateThreshold,
168
+ idleTimeout,
169
+ inputGracePeriod,
170
+ checkpointInterval,
171
+ rehydrationMessage,
172
+ nextSummarizeAt,
173
+ taskContext,
174
+ baseSystemMessage,
175
+ subAgents,
176
+ parentSessionId,
177
+ nestingLevel,
178
+ ...(isSystem ? { isSystem: true } : {}),
179
+ retryCount: 0, // reset by default; overrides can set it
180
+ ...overrides,
181
+ };
182
+ }
183
+ /** Yield this to continueAsNew into the current (latest) orchestration version. */
184
+ function versionedContinueAsNew(input) {
185
+ return ctx.continueAsNewVersioned(input, CURRENT_ORCHESTRATION_VERSION);
186
+ }
187
+ function parseChildUpdate(promptText) {
188
+ if (typeof promptText !== "string")
189
+ return null;
190
+ const match = promptText.match(/^\[CHILD_UPDATE from=(\S+) type=(\S+)/);
191
+ if (!match)
192
+ return null;
193
+ return {
194
+ sessionId: match[1],
195
+ updateType: match[2].replace(/\]$/, ""),
196
+ content: promptText.split("\n").slice(1).join("\n").trim(),
197
+ };
198
+ }
199
+ function* applyChildUpdate(update) {
200
+ ctx.traceInfo(`[orch] child update from=${update.sessionId} type=${update.updateType}`);
201
+ const agent = subAgents.find(a => a.sessionId === update.sessionId);
202
+ if (!agent)
203
+ return;
204
+ if (update.content) {
205
+ agent.result = update.content.slice(0, 2000);
206
+ }
207
+ if (update.updateType === "completed") {
208
+ agent.status = "completed";
209
+ }
210
+ try {
211
+ const rawStatus = yield manager.getSessionStatus(agent.sessionId);
212
+ const parsed = JSON.parse(rawStatus);
213
+ if (parsed.status === "completed" || parsed.status === "failed" || parsed.status === "idle") {
214
+ agent.status = parsed.status === "failed" ? "failed" : "completed";
215
+ }
216
+ if (parsed.result && parsed.result !== "done") {
217
+ agent.result = parsed.result.slice(0, 2000);
218
+ }
219
+ }
220
+ catch { }
221
+ }
222
+ // ─── Helper: dehydrate + reset affinity ──────────────────
223
+ function* dehydrateAndReset(reason) {
224
+ ctx.traceInfo(`[orch] dehydrating session (reason=${reason})`);
225
+ yield session.dehydrate(reason);
226
+ needsHydration = true;
227
+ affinityKey = yield ctx.newGuid();
228
+ session = createSessionProxy(ctx, input.sessionId, affinityKey, config);
229
+ }
230
+ // ─── Helper: checkpoint without releasing pin ────────────
231
+ function* maybeCheckpoint() {
232
+ if (!blobEnabled || checkpointInterval < 0)
233
+ return;
234
+ try {
235
+ ctx.traceInfo(`[orch] checkpoint (iteration=${iteration})`);
236
+ yield session.checkpoint();
237
+ }
238
+ catch (err) {
239
+ ctx.traceInfo(`[orch] checkpoint failed: ${err.message ?? err}`);
240
+ }
241
+ }
242
+ // ─── Helper: summarize session title if due ──────────────
243
+ const FIRST_SUMMARIZE_DELAY = 60_000; // 1 minute
244
+ const REPEAT_SUMMARIZE_DELAY = 300_000; // 5 minutes
245
+ function* maybeSummarize() {
246
+ // System sessions have fixed titles — never summarize
247
+ if (isSystem)
248
+ return;
249
+ const now = yield ctx.utcNow();
250
+ // Schedule first summarize 60s after session start
251
+ if (nextSummarizeAt === 0) {
252
+ nextSummarizeAt = now + FIRST_SUMMARIZE_DELAY;
253
+ return;
254
+ }
255
+ if (now < nextSummarizeAt)
256
+ return;
257
+ // Time to summarize — fire and forget (best effort)
258
+ try {
259
+ ctx.traceInfo(`[orch] summarizing session title`);
260
+ yield manager.summarizeSession(input.sessionId);
261
+ }
262
+ catch (err) {
263
+ ctx.traceInfo(`[orch] summarize failed: ${err.message}`);
264
+ }
265
+ nextSummarizeAt = now + REPEAT_SUMMARIZE_DELAY;
266
+ }
267
+ // ─── Prompt carried from continueAsNew ───────────────────
268
+ let pendingPrompt = input.prompt;
269
+ ctx.traceInfo(`[orch] start: iter=${iteration} pending=${pendingPrompt ? `"${pendingPrompt.slice(0, 40)}"` : 'NONE'} hydrate=${needsHydration} blob=${blobEnabled}`);
270
+ // ─── MAIN LOOP ──────────────────────────────────────────
271
+ while (true) {
272
+ // ① GET NEXT PROMPT
273
+ let prompt = "";
274
+ if (pendingPrompt) {
275
+ prompt = pendingPrompt;
276
+ pendingPrompt = undefined;
277
+ }
278
+ else {
279
+ publishStatus("idle");
280
+ let gotPrompt = false;
281
+ while (!gotPrompt) {
282
+ // All messages (from users and child agents) arrive on the "messages" queue.
283
+ // Child agents communicate via the SDK (sendToSession), which enqueues
284
+ // to the same "messages" queue as user prompts.
285
+ let msgData;
286
+ const msg = yield ctx.dequeueEvent("messages");
287
+ msgData = typeof msg === "string" ? JSON.parse(msg) : msg;
288
+ // ── Command dispatch ─────────────────────────
289
+ if (msgData.type === "cmd") {
290
+ const cmdMsg = msgData;
291
+ ctx.traceInfo(`[orch-cmd] ${cmdMsg.cmd} id=${cmdMsg.id}`);
292
+ switch (cmdMsg.cmd) {
293
+ case "set_model": {
294
+ const newModel = String(cmdMsg.args?.model || "");
295
+ const oldModel = config.model || "(default)";
296
+ config = { ...config, model: newModel };
297
+ const resp = {
298
+ id: cmdMsg.id,
299
+ cmd: cmdMsg.cmd,
300
+ result: { ok: true, oldModel, newModel },
301
+ };
302
+ yield* writeCommandResponse(resp);
303
+ publishStatus("idle");
304
+ yield versionedContinueAsNew(continueInput());
305
+ return "";
306
+ }
307
+ case "list_models": {
308
+ publishStatus("idle", { cmdProcessing: cmdMsg.id });
309
+ let models;
310
+ try {
311
+ const raw = yield manager.listModels();
312
+ models = typeof raw === "string" ? JSON.parse(raw) : raw;
313
+ }
314
+ catch (err) {
315
+ const resp = {
316
+ id: cmdMsg.id,
317
+ cmd: cmdMsg.cmd,
318
+ error: err.message || String(err),
319
+ };
320
+ yield* writeCommandResponse(resp);
321
+ publishStatus("idle");
322
+ continue;
323
+ }
324
+ const resp = {
325
+ id: cmdMsg.id,
326
+ cmd: cmdMsg.cmd,
327
+ result: { models, currentModel: config.model },
328
+ };
329
+ yield* writeCommandResponse(resp);
330
+ publishStatus("idle");
331
+ continue;
332
+ }
333
+ case "get_info": {
334
+ const resp = {
335
+ id: cmdMsg.id,
336
+ cmd: cmdMsg.cmd,
337
+ result: {
338
+ model: config.model || "(default)",
339
+ iteration,
340
+ sessionId: input.sessionId,
341
+ affinityKey: affinityKey?.slice(0, 8),
342
+ needsHydration,
343
+ blobEnabled,
344
+ },
345
+ };
346
+ yield* writeCommandResponse(resp);
347
+ publishStatus("idle");
348
+ continue;
349
+ }
350
+ case "done": {
351
+ ctx.traceInfo(`[orch] /done command received — completing session`);
352
+ // Cascade: complete all sub-agents whose orchestrations may still be alive.
353
+ // Include "running" AND "completed" — a child that sent CHILD_UPDATE
354
+ // may still have a live orchestration waiting in its idle loop.
355
+ const liveChildren = subAgents.filter(a => a.status === "running" || a.status === "completed");
356
+ if (liveChildren.length > 0) {
357
+ ctx.traceInfo(`[orch] /done: completing ${liveChildren.length} sub-agent(s)`);
358
+ for (const child of liveChildren) {
359
+ try {
360
+ const childCmdId = `done-cascade-${iteration}-${child.sessionId.slice(0, 8)}`;
361
+ yield manager.sendCommandToSession(child.sessionId, { type: "cmd", cmd: "done", id: childCmdId, args: { reason: "Parent session completing" } });
362
+ child.status = "completed";
363
+ ctx.traceInfo(`[orch] /done: completed child ${child.sessionId}`);
364
+ }
365
+ catch (err) {
366
+ ctx.traceInfo(`[orch] /done: failed to complete child ${child.sessionId}: ${err.message} (non-fatal)`);
367
+ }
368
+ }
369
+ }
370
+ // If this is a child orchestration, send final result to parent
371
+ if (parentSessionId) {
372
+ try {
373
+ const doneReason = String(cmdMsg.args?.reason || "Session completed by user");
374
+ yield manager.sendToSession(parentSessionId, `[CHILD_UPDATE from=${input.sessionId} type=completed iter=${iteration}]\n${doneReason}`);
375
+ }
376
+ catch (err) {
377
+ ctx.traceInfo(`[orch] sendToSession(parent) on /done failed: ${err.message} (non-fatal)`);
378
+ }
379
+ }
380
+ // Destroy the in-memory session
381
+ try {
382
+ yield session.destroy();
383
+ }
384
+ catch { }
385
+ const resp = {
386
+ id: cmdMsg.id,
387
+ cmd: cmdMsg.cmd,
388
+ result: { ok: true, message: "Session completed" },
389
+ };
390
+ yield* writeCommandResponse(resp);
391
+ publishStatus("completed");
392
+ return "done";
393
+ }
394
+ default: {
395
+ const resp = {
396
+ id: cmdMsg.id,
397
+ cmd: cmdMsg.cmd,
398
+ error: `Unknown command: ${cmdMsg.cmd}`,
399
+ };
400
+ yield* writeCommandResponse(resp);
401
+ publishStatus("idle");
402
+ continue;
403
+ }
404
+ }
405
+ }
406
+ const childUpdate = parseChildUpdate(msgData.prompt);
407
+ if (childUpdate) {
408
+ yield* applyChildUpdate(childUpdate);
409
+ continue;
410
+ }
411
+ prompt = msgData.prompt;
412
+ gotPrompt = true;
413
+ }
414
+ }
415
+ // If the session needs hydration, the LLM lost in-memory context.
416
+ // Wrap the user's prompt with resume instructions so the LLM picks up where it left off.
417
+ if (needsHydration && blobEnabled && prompt) {
418
+ prompt = wrapWithResumeContext(prompt);
419
+ }
420
+ ctx.traceInfo(`[turn ${iteration}] session=${input.sessionId} prompt="${prompt.slice(0, 80)}"`);
421
+ // ② HYDRATE if session was dehydrated (with retry)
422
+ if (needsHydration && blobEnabled) {
423
+ let hydrateAttempts = 0;
424
+ while (true) {
425
+ try {
426
+ affinityKey = yield ctx.newGuid();
427
+ session = createSessionProxy(ctx, input.sessionId, affinityKey, config);
428
+ yield session.hydrate();
429
+ needsHydration = false;
430
+ break;
431
+ }
432
+ catch (hydrateErr) {
433
+ const hMsg = hydrateErr.message || String(hydrateErr);
434
+ // Blob was deleted (e.g. after a reset) — skip hydration, start fresh
435
+ if (hMsg.includes("blob does not exist") || hMsg.includes("BlobNotFound") || hMsg.includes("404")) {
436
+ ctx.traceInfo(`[orch] hydrate skipped — blob not found, starting fresh session`);
437
+ needsHydration = false;
438
+ break;
439
+ }
440
+ hydrateAttempts++;
441
+ ctx.traceInfo(`[orch] hydrate FAILED (attempt ${hydrateAttempts}/${MAX_RETRIES}): ${hMsg}`);
442
+ if (hydrateAttempts >= MAX_RETRIES) {
443
+ publishStatus("error", {
444
+ error: `Hydrate failed after ${MAX_RETRIES} attempts: ${hMsg}`,
445
+ retriesExhausted: true,
446
+ });
447
+ // Can't proceed without hydration — wait for next user message to retry
448
+ break;
449
+ }
450
+ const hydrateDelay = 10 * Math.pow(2, hydrateAttempts - 1);
451
+ publishStatus("error", {
452
+ error: `Hydrate failed: ${hMsg} (retry ${hydrateAttempts}/${MAX_RETRIES} in ${hydrateDelay}s)`,
453
+ });
454
+ yield ctx.scheduleTimer(hydrateDelay * 1000);
455
+ }
456
+ }
457
+ if (needsHydration)
458
+ continue; // hydrate exhausted retries — go back to dequeue
459
+ }
460
+ // ③ RUN TURN via SessionProxy (with retry on failure)
461
+ publishStatus("running");
462
+ let turnResult;
463
+ try {
464
+ turnResult = yield session.runTurn(prompt);
465
+ }
466
+ catch (err) {
467
+ // Activity failed (e.g. Copilot timeout, network error).
468
+ const errorMsg = err.message || String(err);
469
+ retryCount++;
470
+ ctx.traceInfo(`[orch] runTurn FAILED (attempt ${retryCount}/${MAX_RETRIES}): ${errorMsg}`);
471
+ if (retryCount >= MAX_RETRIES) {
472
+ // Exhausted retries — park in error state but don't crash.
473
+ // The orchestration stays alive and will retry on the next user message.
474
+ ctx.traceInfo(`[orch] max retries exhausted, waiting for user input`);
475
+ publishStatus("error", {
476
+ error: `Failed after ${MAX_RETRIES} attempts: ${errorMsg}`,
477
+ retriesExhausted: true,
478
+ });
479
+ // Reset retry count and wait for next user message
480
+ retryCount = 0;
481
+ continue;
482
+ }
483
+ publishStatus("error", {
484
+ error: `${errorMsg} (retry ${retryCount}/${MAX_RETRIES} in 15s)`,
485
+ });
486
+ // Exponential backoff: 15s, 30s, 60s
487
+ const retryDelay = 15 * Math.pow(2, retryCount - 1);
488
+ ctx.traceInfo(`[orch] retrying in ${retryDelay}s`);
489
+ if (blobEnabled) {
490
+ yield* dehydrateAndReset("error");
491
+ }
492
+ yield ctx.scheduleTimer(retryDelay * 1000);
493
+ yield versionedContinueAsNew(continueInput({
494
+ prompt,
495
+ retryCount,
496
+ needsHydration: blobEnabled ? true : needsHydration,
497
+ }));
498
+ return "";
499
+ }
500
+ // Successful activity — reset retry counter
501
+ retryCount = 0;
502
+ const result = typeof turnResult === "string"
503
+ ? JSON.parse(turnResult) : turnResult;
504
+ iteration++;
505
+ // ── Summarize title if due ──────────────────────────
506
+ yield* maybeSummarize();
507
+ // ④ HANDLE RESULT
508
+ switch (result.type) {
509
+ case "completed":
510
+ ctx.traceInfo(`[response] ${result.content}`);
511
+ yield* writeLatestResponse({
512
+ iteration,
513
+ type: "completed",
514
+ content: result.content,
515
+ model: result.model,
516
+ });
517
+ // If this is a child orchestration, notify the parent about our completion
518
+ // via the SDK — sends to the parent's "messages" queue like any other message.
519
+ if (parentSessionId) {
520
+ try {
521
+ yield manager.sendToSession(parentSessionId, `[CHILD_UPDATE from=${input.sessionId} type=completed iter=${iteration}]\n${result.content.slice(0, 2000)}`);
522
+ }
523
+ catch (err) {
524
+ ctx.traceInfo(`[orch] sendToSession(parent) failed: ${err.message} (non-fatal)`);
525
+ }
526
+ // System sub-agents (sweeper, resourcemgr) should keep running forever.
527
+ // Non-system sub-agents auto-terminate after completing their task.
528
+ if (input.isSystem) {
529
+ ctx.traceInfo(`[orch] system sub-agent completed turn, continuing loop`);
530
+ yield* maybeCheckpoint();
531
+ continue;
532
+ }
533
+ // Non-system sub-agents auto-terminate after completing their task and notifying
534
+ // the parent. Without this, they sit in the idle loop forever (idleTimeout=-1)
535
+ // and accumulate as zombie orchestrations.
536
+ ctx.traceInfo(`[orch] sub-agent completed task, auto-terminating`);
537
+ try {
538
+ yield session.destroy();
539
+ }
540
+ catch { }
541
+ publishStatus("completed");
542
+ return "done";
543
+ }
544
+ if (!blobEnabled || idleTimeout < 0) {
545
+ // Checkpoint while idle (no dehydration path)
546
+ yield* maybeCheckpoint();
547
+ continue;
548
+ }
549
+ // Race: next message vs idle timeout
550
+ {
551
+ publishStatus("idle");
552
+ yield* maybeCheckpoint();
553
+ const idleDeadline = (yield ctx.utcNow()) + idleTimeout * 1000;
554
+ while (true) {
555
+ const now = yield ctx.utcNow();
556
+ const remainingMs = Math.max(0, idleDeadline - now);
557
+ if (remainingMs === 0)
558
+ break;
559
+ const nextMsg = ctx.dequeueEvent("messages");
560
+ const idleTimer = ctx.scheduleTimer(remainingMs);
561
+ const raceResult = yield ctx.race(nextMsg, idleTimer);
562
+ if (raceResult.index === 0) {
563
+ const raceMsg = typeof raceResult.value === "string"
564
+ ? JSON.parse(raceResult.value) : (raceResult.value ?? {});
565
+ const childUpdate = parseChildUpdate(raceMsg.prompt);
566
+ if (childUpdate) {
567
+ yield* applyChildUpdate(childUpdate);
568
+ continue;
569
+ }
570
+ ctx.traceInfo("[session] user responded within idle window");
571
+ if (raceMsg.prompt) {
572
+ yield versionedContinueAsNew(continueInput({ prompt: raceMsg.prompt }));
573
+ }
574
+ else {
575
+ yield versionedContinueAsNew(continueInput());
576
+ }
577
+ return "";
578
+ }
579
+ break;
580
+ }
581
+ // Idle timeout → dehydrate. Next message will need resume context.
582
+ ctx.traceInfo("[session] idle timeout, dehydrating");
583
+ yield* dehydrateAndReset("idle");
584
+ // Don't continueAsNew with a prompt — wait for the next user message,
585
+ // which will be wrapped with resume context because needsHydration=true.
586
+ yield versionedContinueAsNew(continueInput());
587
+ return "";
588
+ }
589
+ case "wait":
590
+ // Capture original user prompt as task context for recurring tasks.
591
+ // This ensures the LLM remembers its task even after conversation truncation.
592
+ if (!taskContext) {
593
+ taskContext = prompt.slice(0, 2000);
594
+ const base = typeof baseSystemMessage === 'string'
595
+ ? baseSystemMessage ?? ''
596
+ : baseSystemMessage?.content ?? '';
597
+ config.systemMessage = base + (base ? '\n\n' : '') +
598
+ '[RECURRING TASK]\n' +
599
+ 'Original user request (always remember, even if conversation history is truncated):\n"' +
600
+ taskContext + '"';
601
+ }
602
+ // If this is a child orchestration, notify the parent on every wait cycle
603
+ // via the SDK — sends a message to the parent's "messages" queue.
604
+ if (parentSessionId) {
605
+ try {
606
+ const notifyContent = result.content
607
+ ? result.content.slice(0, 2000)
608
+ : `[wait: ${result.reason} (${result.seconds}s)]`;
609
+ yield manager.sendToSession(parentSessionId, `[CHILD_UPDATE from=${input.sessionId} type=wait iter=${iteration}]\n${notifyContent}`);
610
+ }
611
+ catch (err) {
612
+ ctx.traceInfo(`[orch] sendToSession(parent) wait failed: ${err.message} (non-fatal)`);
613
+ }
614
+ }
615
+ ctx.traceInfo(`[orch] durable timer: ${result.seconds}s (${result.reason})`);
616
+ {
617
+ const shouldDehydrate = blobEnabled && result.seconds > dehydrateThreshold;
618
+ if (shouldDehydrate) {
619
+ yield* dehydrateAndReset("timer");
620
+ }
621
+ const waitStartedAt = yield ctx.utcNow();
622
+ if (result.content) {
623
+ yield* writeLatestResponse({
624
+ iteration,
625
+ type: "wait",
626
+ content: result.content,
627
+ waitReason: result.reason,
628
+ waitSeconds: result.seconds,
629
+ waitStartedAt,
630
+ model: result.model,
631
+ });
632
+ ctx.traceInfo(`[orch] intermediate: ${result.content.slice(0, 80)}`);
633
+ }
634
+ publishStatus("waiting", {
635
+ waitSeconds: result.seconds,
636
+ waitReason: result.reason,
637
+ waitStartedAt,
638
+ });
639
+ // Checkpoint before the blocking wait
640
+ if (!shouldDehydrate)
641
+ yield* maybeCheckpoint();
642
+ const timerTask = ctx.scheduleTimer(result.seconds * 1000);
643
+ const interruptMsg = ctx.dequeueEvent("messages");
644
+ const timerRace = yield ctx.race(timerTask, interruptMsg);
645
+ if (timerRace.index === 1) {
646
+ const interruptData = typeof timerRace.value === "string"
647
+ ? JSON.parse(timerRace.value) : (timerRace.value ?? {});
648
+ const childUpdate = parseChildUpdate(interruptData.prompt);
649
+ if (childUpdate) {
650
+ yield* applyChildUpdate(childUpdate);
651
+ const interruptedAt = yield ctx.utcNow();
652
+ const elapsedSec = Math.round((interruptedAt - waitStartedAt) / 1000);
653
+ const remainingSec = Math.max(0, result.seconds - elapsedSec);
654
+ if (remainingSec === 0) {
655
+ const timerPrompt = `The ${result.seconds} second wait is now complete. Continue with your task.`;
656
+ yield versionedContinueAsNew(continueInput({
657
+ prompt: timerPrompt,
658
+ needsHydration: shouldDehydrate ? true : needsHydration,
659
+ }));
660
+ }
661
+ else {
662
+ yield versionedContinueAsNew(continueInput({
663
+ prompt: `The wait was partially completed (${elapsedSec}s elapsed, ${remainingSec}s remain). Resume the wait for the remaining ${remainingSec} seconds.`,
664
+ needsHydration: shouldDehydrate ? true : needsHydration,
665
+ }));
666
+ }
667
+ return "";
668
+ }
669
+ ctx.traceInfo(`[session] wait interrupted: "${(interruptData.prompt || "").slice(0, 60)}"`);
670
+ // Calculate remaining time for resume context
671
+ const interruptedAt = yield ctx.utcNow();
672
+ const elapsedSec = Math.round((interruptedAt - waitStartedAt) / 1000);
673
+ const remainingSec = Math.max(0, result.seconds - elapsedSec);
674
+ const userPrompt = interruptData.prompt || "";
675
+ let finalPrompt;
676
+ if (shouldDehydrate && userPrompt) {
677
+ finalPrompt = wrapWithResumeContext(userPrompt, `Your timer was interrupted by a USER MESSAGE. You MUST respond to the user's message below before doing anything else. ` +
678
+ `Timer context: ${result.seconds}s timer (reason: "${result.reason}"), ` +
679
+ `${elapsedSec}s elapsed, ${remainingSec}s remain. ` +
680
+ `After fully addressing the user's message, resume the wait for the remaining ${remainingSec} seconds.`);
681
+ }
682
+ else if (userPrompt) {
683
+ // Not dehydrated but still interrupted — give timing context
684
+ finalPrompt = `${userPrompt}\n\n` +
685
+ `[SYSTEM: IMPORTANT — The above is a USER MESSAGE that interrupted your ${result.seconds}s timer (reason: "${result.reason}"). ` +
686
+ `You MUST respond to the user's message FIRST. ${elapsedSec}s elapsed, ${remainingSec}s remain. ` +
687
+ `After fully answering the user, resume the wait for the remaining ${remainingSec} seconds.]`;
688
+ }
689
+ else {
690
+ finalPrompt = userPrompt;
691
+ }
692
+ yield versionedContinueAsNew(continueInput({
693
+ prompt: finalPrompt,
694
+ needsHydration: shouldDehydrate ? true : needsHydration,
695
+ }));
696
+ return "";
697
+ }
698
+ const timerPrompt = `The ${result.seconds} second wait is now complete. Continue with your task.`;
699
+ yield versionedContinueAsNew(continueInput({
700
+ prompt: timerPrompt,
701
+ needsHydration: shouldDehydrate ? true : needsHydration,
702
+ }));
703
+ return "";
704
+ }
705
+ case "input_required":
706
+ ctx.traceInfo(`[orch] waiting for user input: ${result.question}`);
707
+ yield* writeLatestResponse({
708
+ iteration,
709
+ type: "input_required",
710
+ question: result.question,
711
+ choices: result.choices,
712
+ allowFreeform: result.allowFreeform,
713
+ model: result.model,
714
+ });
715
+ if (!blobEnabled || inputGracePeriod < 0) {
716
+ publishStatus("input_required");
717
+ yield* maybeCheckpoint();
718
+ const answerMsg = yield ctx.dequeueEvent("messages");
719
+ const answerData = typeof answerMsg === "string"
720
+ ? JSON.parse(answerMsg) : answerMsg;
721
+ yield versionedContinueAsNew(continueInput({
722
+ prompt: `The user was asked: "${result.question}"\nThe user responded: "${answerData.answer}"`,
723
+ needsHydration: false,
724
+ }));
725
+ return "";
726
+ }
727
+ if (inputGracePeriod === 0) {
728
+ publishStatus("input_required");
729
+ yield* dehydrateAndReset("input_required");
730
+ const answerMsg = yield ctx.dequeueEvent("messages");
731
+ const answerData = typeof answerMsg === "string"
732
+ ? JSON.parse(answerMsg) : answerMsg;
733
+ yield versionedContinueAsNew(continueInput({
734
+ prompt: `The user was asked: "${result.question}"\nThe user responded: "${answerData.answer}"`,
735
+ }));
736
+ return "";
737
+ }
738
+ // Race: user answer vs grace period
739
+ {
740
+ publishStatus("input_required");
741
+ const answerEvt = ctx.dequeueEvent("messages");
742
+ const graceTimer = ctx.scheduleTimer(inputGracePeriod * 1000);
743
+ const raceResult = yield ctx.race(answerEvt, graceTimer);
744
+ if (raceResult.index === 0) {
745
+ const answerData = typeof raceResult.value === "string"
746
+ ? JSON.parse(raceResult.value) : (raceResult.value ?? {});
747
+ yield versionedContinueAsNew(continueInput({
748
+ prompt: `The user was asked: "${result.question}"\nThe user responded: "${answerData.answer}"`,
749
+ needsHydration: false,
750
+ }));
751
+ return "";
752
+ }
753
+ yield* dehydrateAndReset("input_required");
754
+ const answerMsg = yield ctx.dequeueEvent("messages");
755
+ const answerData = typeof answerMsg === "string"
756
+ ? JSON.parse(answerMsg) : answerMsg;
757
+ yield versionedContinueAsNew(continueInput({
758
+ prompt: `The user was asked: "${result.question}"\nThe user responded: "${answerData.answer}"`,
759
+ }));
760
+ return "";
761
+ }
762
+ case "cancelled":
763
+ ctx.traceInfo("[session] turn cancelled");
764
+ continue;
765
+ // ─── Sub-Agent Result Handlers ───────────────────
766
+ case "spawn_agent": {
767
+ // Enforce nesting depth limit
768
+ const childNestingLevel = nestingLevel + 1;
769
+ if (childNestingLevel > MAX_NESTING_LEVEL) {
770
+ ctx.traceInfo(`[orch] spawn_agent denied: nesting level ${nestingLevel} is at max (${MAX_NESTING_LEVEL})`);
771
+ yield versionedContinueAsNew(continueInput({
772
+ prompt: `[SYSTEM: spawn_agent failed — you are already at nesting level ${nestingLevel} (max ${MAX_NESTING_LEVEL}). ` +
773
+ `Sub-agents at this depth cannot spawn further sub-agents. Handle the task directly instead.]`,
774
+ }));
775
+ return "";
776
+ }
777
+ // Enforce max sub-agents
778
+ const activeCount = subAgents.filter(a => a.status === "running").length;
779
+ if (activeCount >= MAX_SUB_AGENTS) {
780
+ ctx.traceInfo(`[orch] spawn_agent denied: ${activeCount}/${MAX_SUB_AGENTS} agents running`);
781
+ yield versionedContinueAsNew(continueInput({
782
+ prompt: `[SYSTEM: spawn_agent failed — you already have ${activeCount} running sub-agents (max ${MAX_SUB_AGENTS}). ` +
783
+ `Wait for some to complete before spawning more.]`,
784
+ }));
785
+ return "";
786
+ }
787
+ // ─── Resolve agent config if agent_name is provided ───
788
+ let agentTask = result.task;
789
+ let agentSystemMessage = result.systemMessage;
790
+ let agentToolNames = result.toolNames;
791
+ let agentModel = result.model;
792
+ let agentIsSystem = false;
793
+ let agentTitle;
794
+ let agentId;
795
+ let agentSplash;
796
+ let resolvedAgentName = result.agentName;
797
+ const applyAgentDef = (agentDef, useDefinitionDefaults = false) => {
798
+ agentTask = useDefinitionDefaults
799
+ ? (agentDef.initialPrompt || `You are the ${agentDef.name} agent. Begin your work.`)
800
+ : (result.task || agentDef.initialPrompt || `You are the ${agentDef.name} agent. Begin your work.`);
801
+ agentSystemMessage = useDefinitionDefaults
802
+ ? ({ mode: "replace", content: agentDef.prompt })
803
+ : (result.systemMessage ?? { mode: "replace", content: agentDef.prompt });
804
+ agentToolNames = useDefinitionDefaults
805
+ ? (agentDef.tools ?? undefined)
806
+ : (result.toolNames ?? agentDef.tools ?? undefined);
807
+ agentIsSystem = agentDef.system ?? false;
808
+ agentTitle = agentDef.title;
809
+ agentId = agentDef.id ?? resolvedAgentName;
810
+ agentSplash = agentDef.splash;
811
+ };
812
+ if (!resolvedAgentName && input.isSystem && agentTask) {
813
+ const titleMatch = agentTask.match(/You are the \*{0,2}([^*\n]+?Agent)\*{0,2}/i);
814
+ const inferredLookup = titleMatch?.[1]?.trim();
815
+ if (inferredLookup) {
816
+ const inferredDef = yield manager.resolveAgentConfig(inferredLookup);
817
+ if (inferredDef?.system && inferredDef?.parent) {
818
+ resolvedAgentName = inferredDef.id ?? inferredDef.name;
819
+ ctx.traceInfo(`[orch] normalized custom system spawn to named agent: ${resolvedAgentName}`);
820
+ applyAgentDef(inferredDef, true);
821
+ }
822
+ }
823
+ }
824
+ if (resolvedAgentName) {
825
+ ctx.traceInfo(`[orch] resolving agent config for: ${resolvedAgentName}`);
826
+ const agentDef = yield manager.resolveAgentConfig(resolvedAgentName);
827
+ if (!agentDef) {
828
+ yield versionedContinueAsNew(continueInput({
829
+ prompt: `[SYSTEM: spawn_agent failed — agent "${resolvedAgentName}" not found. Use list_agents to see available agents.]`,
830
+ }));
831
+ return "";
832
+ }
833
+ applyAgentDef(agentDef, resolvedAgentName !== result.agentName);
834
+ }
835
+ // If the parent is a system session, propagate isSystem to children
836
+ if (input.isSystem) {
837
+ agentIsSystem = true;
838
+ }
839
+ // Auto-detect title for custom spawns by system sessions:
840
+ // If the LLM didn't use agent_name, try to extract a reasonable title
841
+ // from the task or system_message rather than showing "System Agent".
842
+ if (!agentTitle && agentIsSystem) {
843
+ const text = agentTask || "";
844
+ // Look for "You are the **XYZ Agent**" or "You are the XYZ Agent" patterns
845
+ const titleMatch = text.match(/You are the \*{0,2}([^*\n]+?)\*{0,2}\s*[—–-]/i)
846
+ || text.match(/You are the \*{0,2}([^*\n]+?Agent)\*{0,2}/i);
847
+ if (titleMatch) {
848
+ agentTitle = titleMatch[1].trim();
849
+ }
850
+ }
851
+ ctx.traceInfo(`[orch] spawning sub-agent via SDK: task="${agentTask.slice(0, 80)}" model=${agentModel || "inherit"} agent=${resolvedAgentName || "custom"} nestingLevel=${childNestingLevel}`);
852
+ // Build child config — inherit parent's config with optional overrides
853
+ const childConfig = {
854
+ ...config,
855
+ ...(agentModel ? { model: agentModel } : {}),
856
+ ...(agentSystemMessage ? { systemMessage: agentSystemMessage } : {}),
857
+ ...(agentToolNames ? { toolNames: agentToolNames } : {}),
858
+ };
859
+ // Inject sub-agent identity into the child's system message so the LLM
860
+ // knows it's a sub-agent, what its task is, and that its output will be
861
+ // forwarded to the parent automatically.
862
+ const parentSystemMsg = typeof childConfig.systemMessage === "string"
863
+ ? childConfig.systemMessage
864
+ : childConfig.systemMessage?.content ?? "";
865
+ const canSpawnMore = childNestingLevel < MAX_NESTING_LEVEL;
866
+ const subAgentPreamble = `[SUB-AGENT CONTEXT]\n` +
867
+ `You are a sub-agent spawned by a parent session (ID: session-${input.sessionId}).\n` +
868
+ `Your nesting level: ${childNestingLevel} (max: ${MAX_NESTING_LEVEL}).\n` +
869
+ `Your task: "${agentTask.slice(0, 500)}"\n\n` +
870
+ `Instructions:\n` +
871
+ `- Focus exclusively on your assigned task.\n` +
872
+ `- Your final response will be automatically forwarded to the parent agent.\n` +
873
+ `- Be thorough but concise — the parent will synthesize results from multiple agents.\n` +
874
+ `- Do NOT ask the user for input — you are autonomous.\n` +
875
+ `- When your task is complete, provide a clear summary of your findings/results.\n` +
876
+ `- If you write any files with write_artifact, you MUST also call export_artifact and include the artifact:// link in your response.\n` +
877
+ `- For ANY waiting, sleeping, delaying, or scheduling, you MUST use the \`wait\` tool. ` +
878
+ `NEVER use setTimeout, sleep, setInterval, cron, or any other timing mechanism. ` +
879
+ `The wait tool is durable and survives process restarts.\n` +
880
+ (canSpawnMore
881
+ ? `- You CAN spawn your own sub-agents (you have ${MAX_NESTING_LEVEL - childNestingLevel} level(s) remaining). ` +
882
+ `Use them for parallel independent tasks.\n`
883
+ : `- You CANNOT spawn sub-agents — you are at the maximum nesting depth. Handle everything directly.\n`);
884
+ childConfig.systemMessage = subAgentPreamble + (parentSystemMsg ? "\n\n" + parentSystemMsg : "");
885
+ // Use the PilotSwarmClient SDK to create and start the child session.
886
+ // The activity generates a random UUID for the child session ID and returns it.
887
+ // This handles: CMS registration (with parentSessionId), orchestration startup,
888
+ // and initial task prompt — all through the standard SDK path.
889
+ let childSessionId;
890
+ try {
891
+ childSessionId = yield manager.spawnChildSession(input.sessionId, childConfig, agentTask, childNestingLevel, agentIsSystem, agentTitle, agentId, agentSplash);
892
+ }
893
+ catch (err) {
894
+ ctx.traceInfo(`[orch] spawnChildSession failed: ${err.message}`);
895
+ yield versionedContinueAsNew(continueInput({
896
+ prompt: `[SYSTEM: spawn_agent failed: ${err.message}]`,
897
+ }));
898
+ return "";
899
+ }
900
+ const childOrchId = `session-${childSessionId}`;
901
+ // Track the sub-agent
902
+ subAgents.push({
903
+ orchId: childOrchId,
904
+ sessionId: childSessionId,
905
+ task: agentTask.slice(0, 500),
906
+ status: "running",
907
+ });
908
+ // Feed confirmation back to the LLM
909
+ const spawnMsg = `[SYSTEM: Sub-agent spawned successfully.\n` +
910
+ ` Agent ID: ${childOrchId}\n` +
911
+ ` ${resolvedAgentName ? `Agent: ${resolvedAgentName}\n ` : ``}Task: "${agentTask.slice(0, 200)}"\n` +
912
+ ` The agent is now running autonomously. Use check_agents to monitor progress, ` +
913
+ `message_agent to send instructions. To wait for completion, use wait + check_agents ` +
914
+ `in a loop (choose an appropriate interval) so you can report progress to the user.]`;
915
+ yield versionedContinueAsNew(continueInput({ prompt: spawnMsg }));
916
+ return "";
917
+ }
918
+ case "message_agent": {
919
+ const targetOrchId = result.agentId;
920
+ const agentEntry = subAgents.find(a => a.orchId === targetOrchId);
921
+ if (!agentEntry) {
922
+ ctx.traceInfo(`[orch] message_agent: unknown agent ${targetOrchId}`);
923
+ yield versionedContinueAsNew(continueInput({
924
+ prompt: `[SYSTEM: message_agent failed — agent "${targetOrchId}" not found. ` +
925
+ `Known agents: ${subAgents.map(a => a.orchId).join(", ") || "none"}]`,
926
+ }));
927
+ return "";
928
+ }
929
+ ctx.traceInfo(`[orch] message_agent via SDK: ${agentEntry.sessionId} msg="${result.message.slice(0, 60)}"`);
930
+ try {
931
+ yield manager.sendToSession(agentEntry.sessionId, result.message);
932
+ }
933
+ catch (err) {
934
+ ctx.traceInfo(`[orch] message_agent failed: ${err.message}`);
935
+ yield versionedContinueAsNew(continueInput({
936
+ prompt: `[SYSTEM: message_agent failed: ${err.message}]`,
937
+ }));
938
+ return "";
939
+ }
940
+ yield versionedContinueAsNew(continueInput({
941
+ prompt: `[SYSTEM: Message sent to sub-agent ${targetOrchId}: "${result.message.slice(0, 200)}"]`,
942
+ }));
943
+ return "";
944
+ }
945
+ case "check_agents": {
946
+ ctx.traceInfo(`[orch] check_agents: ${subAgents.length} agents tracked`);
947
+ if (subAgents.length === 0) {
948
+ yield versionedContinueAsNew(continueInput({
949
+ prompt: `[SYSTEM: No sub-agents have been spawned yet.]`,
950
+ }));
951
+ return "";
952
+ }
953
+ // Get fresh status for each agent via the SDK
954
+ const statusLines = [];
955
+ for (const agent of subAgents) {
956
+ try {
957
+ const rawStatus = yield manager.getSessionStatus(agent.sessionId);
958
+ const parsed = JSON.parse(rawStatus);
959
+ // Update local tracking
960
+ // Sub-agents go "idle" when their turn completes
961
+ if (parsed.status === "completed" || parsed.status === "failed" || parsed.status === "idle") {
962
+ agent.status = parsed.status === "failed" ? "failed" : "completed";
963
+ if (parsed.result)
964
+ agent.result = parsed.result.slice(0, 1000);
965
+ }
966
+ statusLines.push(` - Agent ${agent.orchId}\n` +
967
+ ` Task: "${agent.task.slice(0, 120)}"\n` +
968
+ ` Status: ${parsed.status}\n` +
969
+ ` Iterations: ${parsed.iterations ?? 0}\n` +
970
+ ` Output: ${parsed.result ?? "(no output yet)"}`);
971
+ }
972
+ catch (err) {
973
+ statusLines.push(` - Agent ${agent.orchId}\n` +
974
+ ` Task: "${agent.task.slice(0, 120)}"\n` +
975
+ ` Status: unknown (error: ${err.message})`);
976
+ }
977
+ }
978
+ yield versionedContinueAsNew(continueInput({
979
+ prompt: `[SYSTEM: Sub-agent status report (${subAgents.length} agents):\n${statusLines.join("\n")}]`,
980
+ }));
981
+ return "";
982
+ }
983
+ case "list_sessions": {
984
+ ctx.traceInfo(`[orch] list_sessions`);
985
+ const rawSessions = yield manager.listSessions();
986
+ const sessions = JSON.parse(rawSessions);
987
+ const lines = sessions.map((s) => ` - ${s.sessionId}${s.sessionId === input.sessionId ? " (this session)" : ""}\n` +
988
+ ` Title: ${s.title ?? "(untitled)"}\n` +
989
+ ` Status: ${s.status}, Iterations: ${s.iterations ?? 0}\n` +
990
+ ` Parent: ${s.parentSessionId ?? "none"}`);
991
+ yield versionedContinueAsNew(continueInput({
992
+ prompt: `[SYSTEM: Active sessions (${sessions.length}):\n${lines.join("\n")}]`,
993
+ }));
994
+ return "";
995
+ }
996
+ case "wait_for_agents": {
997
+ let targetIds = result.agentIds;
998
+ // If empty, wait for all running agents
999
+ if (!targetIds || targetIds.length === 0) {
1000
+ targetIds = subAgents.filter(a => a.status === "running").map(a => a.orchId);
1001
+ }
1002
+ if (targetIds.length === 0) {
1003
+ ctx.traceInfo(`[orch] wait_for_agents: no running agents to wait for`);
1004
+ yield versionedContinueAsNew(continueInput({
1005
+ prompt: `[SYSTEM: No running sub-agents to wait for. All agents have already completed.]`,
1006
+ }));
1007
+ return "";
1008
+ }
1009
+ ctx.traceInfo(`[orch] wait_for_agents: waiting for ${targetIds.length} agents`);
1010
+ publishStatus("running");
1011
+ // Event-driven wait: children send updates to the parent's "messages"
1012
+ // queue via sendToSession. We race messages vs a fallback poll timer.
1013
+ // Child updates arrive as "[CHILD_UPDATE from=... type=...]" messages.
1014
+ const POLL_INTERVAL_MS = 30_000; // 30s fallback poll (event-driven, so rarely needed)
1015
+ const MAX_WAIT_ITERATIONS = 360;
1016
+ for (let waitIter = 0; waitIter < MAX_WAIT_ITERATIONS; waitIter++) {
1017
+ // Check if all targets are done (from local tracking)
1018
+ const stillRunning = targetIds.filter(id => {
1019
+ const agent = subAgents.find(a => a.orchId === id);
1020
+ return agent && agent.status === "running";
1021
+ });
1022
+ if (stillRunning.length === 0)
1023
+ break;
1024
+ // Race: message (child update or user) vs fallback poll timer
1025
+ const msg = ctx.dequeueEvent("messages");
1026
+ const pollTimer = ctx.scheduleTimer(POLL_INTERVAL_MS);
1027
+ const waitRace = yield ctx.race(msg, pollTimer);
1028
+ if (waitRace.index === 0) {
1029
+ // Message arrived — could be a child update or a user message
1030
+ const msgData = typeof waitRace.value === "string"
1031
+ ? JSON.parse(waitRace.value) : (waitRace.value ?? {});
1032
+ // Check if it's a child update (sent by sendToSession from child orch)
1033
+ const childUpdateMatch = typeof msgData.prompt === "string"
1034
+ && msgData.prompt.match(/^\[CHILD_UPDATE from=(\S+) type=(\S+)/);
1035
+ if (childUpdateMatch) {
1036
+ const childSessionId = childUpdateMatch[1];
1037
+ const updateType = childUpdateMatch[2].replace(/\]$/, "");
1038
+ const content = msgData.prompt.split("\n").slice(1).join("\n").trim();
1039
+ ctx.traceInfo(`[orch] wait_for_agents: child update from=${childSessionId} type=${updateType}`);
1040
+ const agent = subAgents.find(a => a.sessionId === childSessionId);
1041
+ if (agent) {
1042
+ if (content)
1043
+ agent.result = content.slice(0, 2000);
1044
+ // Check via SDK if done (the update type alone isn't authoritative
1045
+ // since "completed" means turn completed, not necessarily finished)
1046
+ try {
1047
+ const rawStatus = yield manager.getSessionStatus(agent.sessionId);
1048
+ const parsed = JSON.parse(rawStatus);
1049
+ // Sub-agents go "idle" when their turn completes (they have no user to wait for)
1050
+ if (parsed.status === "completed" || parsed.status === "failed" || parsed.status === "idle") {
1051
+ agent.status = parsed.status === "failed" ? "failed" : "completed";
1052
+ if (parsed.result)
1053
+ agent.result = parsed.result.slice(0, 2000);
1054
+ }
1055
+ }
1056
+ catch { }
1057
+ }
1058
+ continue;
1059
+ }
1060
+ // Not a child update — it's a user message interrupting the wait
1061
+ if (msgData.prompt) {
1062
+ ctx.traceInfo(`[orch] wait_for_agents interrupted by user: "${msgData.prompt.slice(0, 60)}"`);
1063
+ yield versionedContinueAsNew(continueInput({
1064
+ prompt: msgData.prompt,
1065
+ }));
1066
+ return "";
1067
+ }
1068
+ }
1069
+ else {
1070
+ // Timer fired — fallback poll via SDK for any agents we missed
1071
+ ctx.traceInfo(`[orch] wait_for_agents: fallback poll, checking ${stillRunning.length} agents`);
1072
+ for (const targetId of stillRunning) {
1073
+ const agent = subAgents.find(a => a.orchId === targetId);
1074
+ if (!agent || agent.status !== "running")
1075
+ continue;
1076
+ try {
1077
+ const rawStatus = yield manager.getSessionStatus(agent.sessionId);
1078
+ const parsed = JSON.parse(rawStatus);
1079
+ // Sub-agents go "idle" when their turn completes
1080
+ if (parsed.status === "completed" || parsed.status === "failed" || parsed.status === "idle") {
1081
+ agent.status = parsed.status === "failed" ? "failed" : "completed";
1082
+ if (parsed.result)
1083
+ agent.result = parsed.result.slice(0, 2000);
1084
+ }
1085
+ }
1086
+ catch { }
1087
+ }
1088
+ }
1089
+ }
1090
+ // Build results summary
1091
+ const resultLines = [];
1092
+ for (const targetId of targetIds) {
1093
+ const agent = subAgents.find(a => a.orchId === targetId);
1094
+ if (!agent)
1095
+ continue;
1096
+ resultLines.push(` - Agent ${agent.orchId}\n` +
1097
+ ` Task: "${agent.task.slice(0, 120)}"\n` +
1098
+ ` Status: ${agent.status}\n` +
1099
+ ` Result: ${agent.result ?? "(no result)"}`);
1100
+ }
1101
+ yield versionedContinueAsNew(continueInput({
1102
+ prompt: `[SYSTEM: Sub-agents completed:\n${resultLines.join("\n")}]`,
1103
+ }));
1104
+ return "";
1105
+ }
1106
+ case "complete_agent": {
1107
+ const targetOrchId = result.agentId;
1108
+ const agentEntry = subAgents.find(a => a.orchId === targetOrchId);
1109
+ if (!agentEntry) {
1110
+ ctx.traceInfo(`[orch] complete_agent: unknown agent ${targetOrchId}`);
1111
+ yield versionedContinueAsNew(continueInput({
1112
+ prompt: `[SYSTEM: complete_agent failed — agent "${targetOrchId}" not found. ` +
1113
+ `Known agents: ${subAgents.map(a => a.orchId).join(", ") || "none"}]`,
1114
+ }));
1115
+ return "";
1116
+ }
1117
+ ctx.traceInfo(`[orch] complete_agent: sending /done to ${agentEntry.sessionId}`);
1118
+ try {
1119
+ // Send a /done command to the child's orchestration
1120
+ const cmdId = `done-${iteration}`;
1121
+ yield manager.sendCommandToSession(agentEntry.sessionId, { type: "cmd", cmd: "done", id: cmdId, args: { reason: "Completed by parent" } });
1122
+ agentEntry.status = "completed";
1123
+ }
1124
+ catch (err) {
1125
+ ctx.traceInfo(`[orch] complete_agent failed: ${err.message}`);
1126
+ yield versionedContinueAsNew(continueInput({
1127
+ prompt: `[SYSTEM: complete_agent failed: ${err.message}]`,
1128
+ }));
1129
+ return "";
1130
+ }
1131
+ yield versionedContinueAsNew(continueInput({
1132
+ prompt: `[SYSTEM: Sub-agent ${targetOrchId} has been completed gracefully.]`,
1133
+ }));
1134
+ return "";
1135
+ }
1136
+ case "cancel_agent": {
1137
+ const targetOrchId = result.agentId;
1138
+ const agentEntry = subAgents.find(a => a.orchId === targetOrchId);
1139
+ if (!agentEntry) {
1140
+ ctx.traceInfo(`[orch] cancel_agent: unknown agent ${targetOrchId}`);
1141
+ yield versionedContinueAsNew(continueInput({
1142
+ prompt: `[SYSTEM: cancel_agent failed — agent "${targetOrchId}" not found. ` +
1143
+ `Known agents: ${subAgents.map(a => a.orchId).join(", ") || "none"}]`,
1144
+ }));
1145
+ return "";
1146
+ }
1147
+ const cancelReason = result.reason ?? "Cancelled by parent";
1148
+ ctx.traceInfo(`[orch] cancel_agent: cancelling ${agentEntry.sessionId} reason="${cancelReason}"`);
1149
+ try {
1150
+ // Cascade: cancel all descendants of the target agent first
1151
+ const descendants = yield manager.getDescendantSessionIds(agentEntry.sessionId);
1152
+ if (descendants.length > 0) {
1153
+ ctx.traceInfo(`[orch] cancel_agent: cascading cancel to ${descendants.length} descendant(s)`);
1154
+ for (const descId of descendants) {
1155
+ try {
1156
+ yield manager.cancelSession(descId, `Ancestor ${agentEntry.sessionId} cancelled: ${cancelReason}`);
1157
+ }
1158
+ catch (err) {
1159
+ ctx.traceInfo(`[orch] cancel_agent: failed to cancel descendant ${descId}: ${err.message} (non-fatal)`);
1160
+ }
1161
+ }
1162
+ }
1163
+ yield manager.cancelSession(agentEntry.sessionId, cancelReason);
1164
+ agentEntry.status = "cancelled";
1165
+ }
1166
+ catch (err) {
1167
+ ctx.traceInfo(`[orch] cancel_agent failed: ${err.message}`);
1168
+ yield versionedContinueAsNew(continueInput({
1169
+ prompt: `[SYSTEM: cancel_agent failed: ${err.message}]`,
1170
+ }));
1171
+ return "";
1172
+ }
1173
+ yield versionedContinueAsNew(continueInput({
1174
+ prompt: `[SYSTEM: Sub-agent ${targetOrchId} has been cancelled.${result.reason ? ` Reason: ${result.reason}` : ""}]`,
1175
+ }));
1176
+ return "";
1177
+ }
1178
+ case "delete_agent": {
1179
+ const targetOrchId = result.agentId;
1180
+ const agentEntry = subAgents.find(a => a.orchId === targetOrchId);
1181
+ if (!agentEntry) {
1182
+ ctx.traceInfo(`[orch] delete_agent: unknown agent ${targetOrchId}`);
1183
+ yield versionedContinueAsNew(continueInput({
1184
+ prompt: `[SYSTEM: delete_agent failed — agent "${targetOrchId}" not found. ` +
1185
+ `Known agents: ${subAgents.map(a => a.orchId).join(", ") || "none"}]`,
1186
+ }));
1187
+ return "";
1188
+ }
1189
+ const deleteReason = result.reason ?? "Deleted by parent";
1190
+ ctx.traceInfo(`[orch] delete_agent: deleting ${agentEntry.sessionId} reason="${deleteReason}"`);
1191
+ try {
1192
+ // Cascade: delete all descendants of the target agent first
1193
+ const descendants = yield manager.getDescendantSessionIds(agentEntry.sessionId);
1194
+ if (descendants.length > 0) {
1195
+ ctx.traceInfo(`[orch] delete_agent: cascading delete to ${descendants.length} descendant(s)`);
1196
+ for (const descId of descendants) {
1197
+ try {
1198
+ yield manager.deleteSession(descId, `Ancestor ${agentEntry.sessionId} deleted: ${deleteReason}`);
1199
+ }
1200
+ catch (err) {
1201
+ ctx.traceInfo(`[orch] delete_agent: failed to delete descendant ${descId}: ${err.message} (non-fatal)`);
1202
+ }
1203
+ }
1204
+ }
1205
+ yield manager.deleteSession(agentEntry.sessionId, deleteReason);
1206
+ // Remove from subAgents tracking entirely
1207
+ subAgents = subAgents.filter(a => a.orchId !== targetOrchId);
1208
+ }
1209
+ catch (err) {
1210
+ ctx.traceInfo(`[orch] delete_agent failed: ${err.message}`);
1211
+ yield versionedContinueAsNew(continueInput({
1212
+ prompt: `[SYSTEM: delete_agent failed: ${err.message}]`,
1213
+ }));
1214
+ return "";
1215
+ }
1216
+ yield versionedContinueAsNew(continueInput({
1217
+ prompt: `[SYSTEM: Sub-agent ${targetOrchId} has been deleted.${result.reason ? ` Reason: ${result.reason}` : ""}]`,
1218
+ }));
1219
+ return "";
1220
+ }
1221
+ case "error": {
1222
+ // Treat like an activity failure — retry with backoff.
1223
+ retryCount++;
1224
+ ctx.traceInfo(`[orch] turn returned error (attempt ${retryCount}/${MAX_RETRIES}): ${result.message}`);
1225
+ if (retryCount >= MAX_RETRIES) {
1226
+ ctx.traceInfo(`[orch] max retries exhausted for turn error, waiting for user input`);
1227
+ publishStatus("error", {
1228
+ error: `Failed after ${MAX_RETRIES} attempts: ${result.message}`,
1229
+ retriesExhausted: true,
1230
+ });
1231
+ retryCount = 0;
1232
+ continue;
1233
+ }
1234
+ publishStatus("error", {
1235
+ error: `${result.message} (retry ${retryCount}/${MAX_RETRIES})`,
1236
+ });
1237
+ const errorRetryDelay = 15 * Math.pow(2, retryCount - 1);
1238
+ ctx.traceInfo(`[orch] retrying in ${errorRetryDelay}s after turn error`);
1239
+ if (blobEnabled) {
1240
+ yield* dehydrateAndReset("error");
1241
+ }
1242
+ yield ctx.scheduleTimer(errorRetryDelay * 1000);
1243
+ yield versionedContinueAsNew(continueInput({
1244
+ prompt,
1245
+ retryCount,
1246
+ needsHydration: blobEnabled ? true : needsHydration,
1247
+ }));
1248
+ return "";
1249
+ }
1250
+ }
1251
+ }
1252
+ }
1253
+ //# sourceMappingURL=orchestration_1_0_10.js.map