@oni.bot/core 1.0.3 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (146) hide show
  1. package/dist/checkpointers/postgres.d.ts.map +1 -1
  2. package/dist/checkpointers/postgres.js +2 -1
  3. package/dist/checkpointers/postgres.js.map +1 -1
  4. package/dist/cli/inspect.d.ts.map +1 -1
  5. package/dist/cli/inspect.js +4 -2
  6. package/dist/cli/inspect.js.map +1 -1
  7. package/dist/coordination/request-reply.d.ts +11 -2
  8. package/dist/coordination/request-reply.d.ts.map +1 -1
  9. package/dist/coordination/request-reply.js.map +1 -1
  10. package/dist/events/bus.d.ts.map +1 -1
  11. package/dist/events/bus.js +1 -0
  12. package/dist/events/bus.js.map +1 -1
  13. package/dist/graph.d.ts +11 -1
  14. package/dist/graph.d.ts.map +1 -1
  15. package/dist/graph.js +4 -2
  16. package/dist/graph.js.map +1 -1
  17. package/dist/harness/agent-loop.d.ts +1 -7
  18. package/dist/harness/agent-loop.d.ts.map +1 -1
  19. package/dist/harness/agent-loop.js +2 -642
  20. package/dist/harness/agent-loop.js.map +1 -1
  21. package/dist/harness/loop/hooks.d.ts +7 -0
  22. package/dist/harness/loop/hooks.d.ts.map +1 -0
  23. package/dist/harness/loop/hooks.js +46 -0
  24. package/dist/harness/loop/hooks.js.map +1 -0
  25. package/dist/harness/loop/index.d.ts +8 -0
  26. package/dist/harness/loop/index.d.ts.map +1 -0
  27. package/dist/harness/loop/index.js +257 -0
  28. package/dist/harness/loop/index.js.map +1 -0
  29. package/dist/harness/loop/inference.d.ts +19 -0
  30. package/dist/harness/loop/inference.d.ts.map +1 -0
  31. package/dist/harness/loop/inference.js +121 -0
  32. package/dist/harness/loop/inference.js.map +1 -0
  33. package/dist/harness/loop/memory.d.ts +22 -0
  34. package/dist/harness/loop/memory.d.ts.map +1 -0
  35. package/dist/harness/loop/memory.js +73 -0
  36. package/dist/harness/loop/memory.js.map +1 -0
  37. package/dist/harness/loop/safety.d.ts +8 -0
  38. package/dist/harness/loop/safety.d.ts.map +1 -0
  39. package/dist/harness/loop/safety.js +21 -0
  40. package/dist/harness/loop/safety.js.map +1 -0
  41. package/dist/harness/loop/tools.d.ts +24 -0
  42. package/dist/harness/loop/tools.d.ts.map +1 -0
  43. package/dist/harness/loop/tools.js +184 -0
  44. package/dist/harness/loop/tools.js.map +1 -0
  45. package/dist/harness/loop/types.d.ts +7 -0
  46. package/dist/harness/loop/types.d.ts.map +1 -0
  47. package/dist/harness/loop/types.js +9 -0
  48. package/dist/harness/loop/types.js.map +1 -0
  49. package/dist/harness/memory/fs-compat.d.ts +3 -0
  50. package/dist/harness/memory/fs-compat.d.ts.map +1 -0
  51. package/dist/harness/memory/fs-compat.js +26 -0
  52. package/dist/harness/memory/fs-compat.js.map +1 -0
  53. package/dist/harness/memory/index.d.ts +105 -0
  54. package/dist/harness/memory/index.d.ts.map +1 -0
  55. package/dist/harness/memory/index.js +491 -0
  56. package/dist/harness/memory/index.js.map +1 -0
  57. package/dist/harness/memory/prompter.d.ts +7 -0
  58. package/dist/harness/memory/prompter.d.ts.map +1 -0
  59. package/dist/harness/memory/prompter.js +24 -0
  60. package/dist/harness/memory/prompter.js.map +1 -0
  61. package/dist/harness/memory/ranker.d.ts +15 -0
  62. package/dist/harness/memory/ranker.d.ts.map +1 -0
  63. package/dist/harness/memory/ranker.js +72 -0
  64. package/dist/harness/memory/ranker.js.map +1 -0
  65. package/dist/harness/memory/scanner.d.ts +26 -0
  66. package/dist/harness/memory/scanner.d.ts.map +1 -0
  67. package/dist/harness/memory/scanner.js +187 -0
  68. package/dist/harness/memory/scanner.js.map +1 -0
  69. package/dist/harness/memory/types.d.ts +50 -0
  70. package/dist/harness/memory/types.d.ts.map +1 -0
  71. package/dist/harness/memory/types.js +7 -0
  72. package/dist/harness/memory/types.js.map +1 -0
  73. package/dist/harness/memory-loader.d.ts +2 -149
  74. package/dist/harness/memory-loader.d.ts.map +1 -1
  75. package/dist/harness/memory-loader.js +1 -713
  76. package/dist/harness/memory-loader.js.map +1 -1
  77. package/dist/hitl/interrupt.d.ts.map +1 -1
  78. package/dist/hitl/interrupt.js +2 -1
  79. package/dist/hitl/interrupt.js.map +1 -1
  80. package/dist/prebuilt/react-agent.d.ts.map +1 -1
  81. package/dist/prebuilt/react-agent.js +6 -2
  82. package/dist/prebuilt/react-agent.js.map +1 -1
  83. package/dist/pregel/checkpointing.d.ts +12 -0
  84. package/dist/pregel/checkpointing.d.ts.map +1 -0
  85. package/dist/pregel/checkpointing.js +60 -0
  86. package/dist/pregel/checkpointing.js.map +1 -0
  87. package/dist/pregel/execution.d.ts +7 -0
  88. package/dist/pregel/execution.d.ts.map +1 -0
  89. package/dist/pregel/execution.js +178 -0
  90. package/dist/pregel/execution.js.map +1 -0
  91. package/dist/pregel/index.d.ts +61 -0
  92. package/dist/pregel/index.d.ts.map +1 -0
  93. package/dist/pregel/index.js +154 -0
  94. package/dist/pregel/index.js.map +1 -0
  95. package/dist/pregel/interrupts.d.ts +3 -0
  96. package/dist/pregel/interrupts.d.ts.map +1 -0
  97. package/dist/pregel/interrupts.js +7 -0
  98. package/dist/pregel/interrupts.js.map +1 -0
  99. package/dist/pregel/state-helpers.d.ts +12 -0
  100. package/dist/pregel/state-helpers.d.ts.map +1 -0
  101. package/dist/pregel/state-helpers.js +71 -0
  102. package/dist/pregel/state-helpers.js.map +1 -0
  103. package/dist/pregel/streaming.d.ts +5 -0
  104. package/dist/pregel/streaming.d.ts.map +1 -0
  105. package/dist/pregel/streaming.js +462 -0
  106. package/dist/pregel/streaming.js.map +1 -0
  107. package/dist/pregel/types.d.ts +48 -0
  108. package/dist/pregel/types.d.ts.map +1 -0
  109. package/dist/pregel/types.js +5 -0
  110. package/dist/pregel/types.js.map +1 -0
  111. package/dist/pregel.d.ts +1 -66
  112. package/dist/pregel.d.ts.map +1 -1
  113. package/dist/pregel.js +2 -854
  114. package/dist/pregel.js.map +1 -1
  115. package/dist/swarm/agent-node.d.ts +11 -0
  116. package/dist/swarm/agent-node.d.ts.map +1 -0
  117. package/dist/swarm/agent-node.js +156 -0
  118. package/dist/swarm/agent-node.js.map +1 -0
  119. package/dist/swarm/compile-ext.d.ts +5 -0
  120. package/dist/swarm/compile-ext.d.ts.map +1 -0
  121. package/dist/swarm/compile-ext.js +126 -0
  122. package/dist/swarm/compile-ext.js.map +1 -0
  123. package/dist/swarm/config.d.ts +147 -0
  124. package/dist/swarm/config.d.ts.map +1 -0
  125. package/dist/swarm/config.js +17 -0
  126. package/dist/swarm/config.js.map +1 -0
  127. package/dist/swarm/factories.d.ts +37 -0
  128. package/dist/swarm/factories.d.ts.map +1 -0
  129. package/dist/swarm/factories.js +703 -0
  130. package/dist/swarm/factories.js.map +1 -0
  131. package/dist/swarm/graph.d.ts +14 -147
  132. package/dist/swarm/graph.d.ts.map +1 -1
  133. package/dist/swarm/graph.js +30 -917
  134. package/dist/swarm/graph.js.map +1 -1
  135. package/dist/swarm/pool.js.map +1 -1
  136. package/dist/swarm/supervisor.js.map +1 -1
  137. package/dist/testing/index.d.ts +2 -2
  138. package/dist/testing/index.d.ts.map +1 -1
  139. package/dist/testing/index.js +3 -2
  140. package/dist/testing/index.js.map +1 -1
  141. package/dist/tools/define.d.ts +2 -1
  142. package/dist/tools/define.d.ts.map +1 -1
  143. package/dist/tools/define.js +3 -1
  144. package/dist/tools/define.js.map +1 -1
  145. package/dist/tools/types.d.ts.map +1 -1
  146. package/package.json +1 -1
@@ -0,0 +1,703 @@
1
+ // ============================================================
2
+ // src/swarm/factories.ts — SwarmGraph topology factory functions
3
+ // Each function receives a pre-constructed SwarmGraph<S> instance,
4
+ // wires it for the requested topology, and returns it.
5
+ //
6
+ // `import type { SwarmGraph }` is used to avoid a runtime circular dep:
7
+ // graph.ts → factories.ts → (type-only) graph.ts
8
+ // ============================================================
9
+ import { START, END, Command, Send } from "../types.js";
10
+ import { runWithTimeout } from "../internal/timeout.js";
11
+ // ----------------------------------------------------------------
12
+ // buildHierarchical
13
+ // ----------------------------------------------------------------
14
+ export function buildHierarchical(config, swarm) {
15
+ for (const agentDef of config.agents) {
16
+ swarm.addAgent(agentDef);
17
+ }
18
+ swarm.addSupervisor({
19
+ model: config.supervisor.model,
20
+ strategy: config.supervisor.strategy,
21
+ taskField: "task",
22
+ contextField: "context",
23
+ rules: config.supervisor.rules,
24
+ systemPrompt: config.supervisor.systemPrompt,
25
+ maxRounds: config.supervisor.maxRounds,
26
+ deadlineMs: config.supervisor.deadlineMs,
27
+ autoRecover: config.supervisor.autoRecover,
28
+ });
29
+ swarm.onErrorPolicy = config.onError ?? "fallback";
30
+ return swarm;
31
+ }
32
+ // ----------------------------------------------------------------
33
+ // buildFanOut
34
+ // ----------------------------------------------------------------
35
+ export function buildFanOut(config, swarm) {
36
+ const agentIds = config.agents.map((a) => a.id);
37
+ const maxConcurrency = config.maxConcurrency;
38
+ const timeoutMs = config.timeoutMs;
39
+ const weights = config.weights;
40
+ // Register agents in registry (but don't use addAgent — we wire manually)
41
+ for (const agentDef of config.agents) {
42
+ swarm.registry.register(agentDef);
43
+ swarm.agentIds.add(agentDef.id);
44
+ }
45
+ // Single orchestrator node that runs agents with concurrency/timeout control
46
+ swarm.inner.addNode("__fanout_runner__", async (state, cfg) => {
47
+ const agentMap = new Map(config.agents.map((a) => [a.id, a]));
48
+ async function runAgent(id) {
49
+ const agent = agentMap.get(id);
50
+ try {
51
+ await agent.hooks?.onStart?.(id, state);
52
+ const result = await runWithTimeout(() => agent.skeleton.invoke({ ...state }, { ...cfg, agentId: id }), timeoutMs, () => new Error(`Agent "${id}" timed out after ${timeoutMs}ms`));
53
+ await agent.hooks?.onComplete?.(id, result);
54
+ return { id, result, error: null };
55
+ }
56
+ catch (err) {
57
+ await agent.hooks?.onError?.(id, err);
58
+ return { id, result: null, error: err };
59
+ }
60
+ }
61
+ let allResults;
62
+ if (maxConcurrency != null && maxConcurrency > 0) {
63
+ // Batched execution with concurrency limit
64
+ allResults = [];
65
+ const remaining = [...agentIds];
66
+ while (remaining.length > 0) {
67
+ const batch = remaining.splice(0, maxConcurrency);
68
+ const batchResults = await Promise.all(batch.map(runAgent));
69
+ allResults.push(...batchResults);
70
+ }
71
+ }
72
+ else {
73
+ // All in parallel
74
+ allResults = await Promise.all(agentIds.map(runAgent));
75
+ }
76
+ // Collect results
77
+ const agentResults = { ...(state.agentResults ?? {}) };
78
+ for (const { id, result, error } of allResults) {
79
+ if (error) {
80
+ agentResults[id] = {
81
+ _error: true,
82
+ agent: id,
83
+ error: String(error instanceof Error ? error.message : error),
84
+ };
85
+ }
86
+ else {
87
+ agentResults[id] = result;
88
+ }
89
+ }
90
+ // Run reducer with weights
91
+ const reduced = config.reducer(agentResults, weights);
92
+ return { ...reduced, agentResults };
93
+ });
94
+ swarm.inner.addEdge(START, "__fanout_runner__");
95
+ swarm.inner.addEdge("__fanout_runner__", END);
96
+ return swarm;
97
+ }
98
+ // ----------------------------------------------------------------
99
+ // buildPipeline
100
+ // ----------------------------------------------------------------
101
+ export function buildPipeline(config, swarm) {
102
+ if (config.stages.length === 0) {
103
+ throw new Error("SwarmGraph.pipeline: stages must contain at least one agent.");
104
+ }
105
+ const ids = config.stages.map((a) => a.id);
106
+ for (const agentDef of config.stages) {
107
+ swarm.addAgent(agentDef);
108
+ }
109
+ // Wire: START → first stage
110
+ swarm.inner.addEdge(START, ids[0]);
111
+ // Wire each stage to the next (or conditional)
112
+ for (let i = 0; i < ids.length - 1; i++) {
113
+ const id = ids[i];
114
+ if (config.transitions?.[id]) {
115
+ swarm.addConditionalHandoff(id, config.transitions[id]);
116
+ }
117
+ else {
118
+ swarm.inner.addEdge(id, ids[i + 1]);
119
+ }
120
+ }
121
+ // Wire last stage
122
+ const lastId = ids[ids.length - 1];
123
+ if (config.transitions?.[lastId]) {
124
+ swarm.addConditionalHandoff(lastId, config.transitions[lastId]);
125
+ }
126
+ else {
127
+ swarm.inner.addEdge(lastId, END);
128
+ }
129
+ return swarm;
130
+ }
131
+ // ----------------------------------------------------------------
132
+ // buildPeerNetwork
133
+ // ----------------------------------------------------------------
134
+ export function buildPeerNetwork(config, swarm) {
135
+ for (const agentDef of config.agents) {
136
+ swarm.addAgent(agentDef);
137
+ }
138
+ // Wire: START → entrypoint
139
+ swarm.inner.addEdge(START, config.entrypoint);
140
+ // Wire each agent's conditional handoff
141
+ for (const [agentId, handoffFn] of Object.entries(config.handoffs)) {
142
+ swarm.addConditionalHandoff(agentId, handoffFn);
143
+ }
144
+ return swarm;
145
+ }
146
+ // ----------------------------------------------------------------
147
+ // buildMapReduce
148
+ // ----------------------------------------------------------------
149
+ export function buildMapReduce(config, swarm) {
150
+ const poolSize = config.poolSize ?? 1;
151
+ if (poolSize < 1) {
152
+ throw new Error("SwarmGraph.mapReduce: poolSize must be at least 1.");
153
+ }
154
+ // Register poolSize copies of the mapper agent
155
+ const mapperIds = [];
156
+ for (let i = 0; i < poolSize; i++) {
157
+ const id = poolSize === 1 ? config.mapper.id : `${config.mapper.id}_${i}`;
158
+ mapperIds.push(id);
159
+ swarm.addAgent({
160
+ ...config.mapper,
161
+ id,
162
+ });
163
+ }
164
+ // Splitter node: passthrough — the conditional edge handles fan-out via Send
165
+ swarm.inner.addNode("__splitter__", (_state) => {
166
+ return {};
167
+ });
168
+ // Reducer node: collects agentResults and applies user reducer
169
+ swarm.inner.addNode("__reducer__", (state) => {
170
+ return config.reducer(state.agentResults ?? {});
171
+ });
172
+ // Wiring: START → __splitter__
173
+ swarm.inner.addEdge(START, "__splitter__");
174
+ // __splitter__ → Send to mapper agents using the configured poolStrategy
175
+ const inputField = config.inputField;
176
+ const strategy = config.poolStrategy ?? "round-robin";
177
+ swarm.inner.addConditionalEdges("__splitter__", ((state) => {
178
+ const items = state[inputField];
179
+ if (!Array.isArray(items) || items.length === 0) {
180
+ return "__reducer__";
181
+ }
182
+ // Per-batch assignment counts — used by least-busy strategy
183
+ const assignCounts = new Map(mapperIds.map((id) => [id, 0]));
184
+ return items.map((item, idx) => {
185
+ let targetId;
186
+ if (strategy === "random") {
187
+ targetId = mapperIds[Math.floor(Math.random() * mapperIds.length)];
188
+ }
189
+ else if (strategy === "least-busy") {
190
+ // Pick the mapper with fewest assignments in this batch so far
191
+ let minCount = Infinity;
192
+ let minId = mapperIds[0];
193
+ for (const id of mapperIds) {
194
+ const c = assignCounts.get(id) ?? 0;
195
+ if (c < minCount) {
196
+ minCount = c;
197
+ minId = id;
198
+ }
199
+ }
200
+ targetId = minId;
201
+ }
202
+ else {
203
+ // round-robin (default)
204
+ targetId = mapperIds[idx % mapperIds.length];
205
+ }
206
+ assignCounts.set(targetId, (assignCounts.get(targetId) ?? 0) + 1);
207
+ return new Send(targetId, { ...state, task: String(item) });
208
+ });
209
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
210
+ })); // SAFE: external boundary — Send[] return is valid but not in addConditionalEdges overload signature
211
+ // Each mapper → __reducer__
212
+ for (const id of mapperIds) {
213
+ swarm.inner.addEdge(id, "__reducer__");
214
+ }
215
+ // __reducer__ → END
216
+ swarm.inner.addEdge("__reducer__", END);
217
+ return swarm;
218
+ }
219
+ // ----------------------------------------------------------------
220
+ // buildDebate
221
+ // ----------------------------------------------------------------
222
+ export function buildDebate(config, swarm) {
223
+ if (config.debaters.length === 0) {
224
+ throw new Error("SwarmGraph.debate: debaters must contain at least one agent.");
225
+ }
226
+ const debaterIds = config.debaters.map((d) => d.id);
227
+ const consensusKeyword = config.judge.consensusKeyword ?? "CONSENSUS";
228
+ for (const debater of config.debaters) {
229
+ swarm.addAgent(debater);
230
+ }
231
+ const scoreDebaters = config.judge.scoreDebaters ?? false;
232
+ const consensusThreshold = config.judge.consensusThreshold;
233
+ // Judge node: evaluates arguments, decides continue or consensus
234
+ swarm.inner.addNode("__judge__", async (state) => {
235
+ const round = state.supervisorRound ?? 0;
236
+ // If no agent results yet (first round), just kick off debaters
237
+ const results = state.agentResults ?? {};
238
+ if (round === 0 && Object.keys(results).length === 0) {
239
+ return {
240
+ supervisorRound: round + 1,
241
+ };
242
+ }
243
+ // Evaluate arguments via judge model
244
+ const argsText = Object.entries(results)
245
+ .map(([id, r]) => `${id}: ${JSON.stringify(r)}`)
246
+ .join("\n\n");
247
+ const scoreInstruction = scoreDebaters
248
+ ? `\nAlso provide a JSON object with scores for each debater and a verdict. Format: {"scores": {"debater_id": score}, "verdict": "CONTINUE" or "${consensusKeyword}"}`
249
+ : "";
250
+ const response = await config.judge.model.chat({
251
+ messages: [{
252
+ role: "user",
253
+ content: `Round ${round}. Evaluate these arguments:\n\n${argsText}\n\nRespond "${consensusKeyword}" if consensus reached, otherwise "CONTINUE".${scoreInstruction}`,
254
+ }],
255
+ systemPrompt: config.judge.systemPrompt ?? "You are a debate judge.",
256
+ });
257
+ let isConsensus = false;
258
+ let roundScores;
259
+ const existingScores = (state.context.debateScores ?? []);
260
+ // Try to parse structured response (JSON with scores + verdict)
261
+ if (scoreDebaters) {
262
+ try {
263
+ const parsed = JSON.parse(response.content);
264
+ if (parsed.scores) {
265
+ roundScores = parsed.scores;
266
+ }
267
+ if (parsed.verdict) {
268
+ isConsensus = parsed.verdict.includes(consensusKeyword);
269
+ }
270
+ }
271
+ catch {
272
+ // Fallback to keyword detection
273
+ isConsensus = response.content.includes(consensusKeyword);
274
+ }
275
+ // Check consensus threshold if scores available
276
+ if (!isConsensus && roundScores && consensusThreshold != null) {
277
+ const scoreValues = Object.values(roundScores);
278
+ if (scoreValues.length >= 2) {
279
+ const spread = Math.max(...scoreValues) - Math.min(...scoreValues);
280
+ if (spread <= consensusThreshold) {
281
+ isConsensus = true;
282
+ }
283
+ }
284
+ }
285
+ }
286
+ else {
287
+ isConsensus = response.content.includes(consensusKeyword);
288
+ }
289
+ const nextRound = round + 1;
290
+ // Force done if consensus or max rounds exhausted
291
+ const isDone = isConsensus || nextRound > config.judge.maxRounds;
292
+ const updatedScores = roundScores
293
+ ? [...existingScores, { round, scores: roundScores }]
294
+ : existingScores;
295
+ return {
296
+ done: isDone,
297
+ supervisorRound: nextRound,
298
+ context: {
299
+ ...(state.context ?? {}),
300
+ ...(scoreDebaters ? { debateScores: updatedScores } : {}),
301
+ },
302
+ messages: [{ role: "system", content: `Judge round ${round}: ${isDone ? "Consensus" : "Continue"}` }],
303
+ };
304
+ });
305
+ // Fan-out node: passthrough — conditional edges handle the Send dispatch
306
+ swarm.inner.addNode("__fanout__", (_state) => {
307
+ return {};
308
+ });
309
+ // Wiring: START → __judge__
310
+ swarm.inner.addEdge(START, "__judge__");
311
+ // __judge__ → conditional (done → END, else → __fanout__)
312
+ swarm.inner.addConditionalEdges("__judge__", (state) => {
313
+ if (state.done)
314
+ return END;
315
+ return "__fanout__";
316
+ });
317
+ // __fanout__ → Send to all debaters in parallel
318
+ swarm.inner.addConditionalEdges("__fanout__", ((state) => debaterIds.map((id) => new Send(id, { ...state }))
319
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
320
+ )); // SAFE: external boundary — Send[] return is valid but not in addConditionalEdges overload signature
321
+ // Each debater → __judge__
322
+ for (const id of debaterIds) {
323
+ swarm.inner.addEdge(id, "__judge__");
324
+ }
325
+ return swarm;
326
+ }
327
+ // ----------------------------------------------------------------
328
+ // buildHierarchicalMesh
329
+ // SPECIAL CASE: makeSwarm callback used instead of SwarmGraph static methods
330
+ // to avoid runtime circular dependency (SwarmGraph is import type only).
331
+ // ----------------------------------------------------------------
332
+ export function buildHierarchicalMesh(config, swarm, makeSwarm) {
333
+ const teamIds = Object.keys(config.teams);
334
+ const maxRounds = config.coordinator.maxRounds ?? 10;
335
+ // Build each team as a compiled sub-skeleton and mount as a node
336
+ for (const [teamId, teamConfig] of Object.entries(config.teams)) {
337
+ let teamSwarm;
338
+ if (teamConfig.topology === "pipeline") {
339
+ teamSwarm = buildPipeline({ stages: teamConfig.agents }, makeSwarm());
340
+ }
341
+ else {
342
+ teamSwarm = buildPeerNetwork({
343
+ agents: teamConfig.agents,
344
+ entrypoint: teamConfig.agents[0].id,
345
+ handoffs: teamConfig.handoffs ?? {},
346
+ }, makeSwarm());
347
+ }
348
+ const teamSkeleton = teamSwarm.compile();
349
+ // Mount team as a single node in the outer graph.
350
+ // Spread all top-level fields from teamResult (including done, context)
351
+ // so they propagate to the outer coordinator state.
352
+ swarm.inner.addNode(teamId, async (state, cfg) => {
353
+ const teamResult = await teamSkeleton.invoke(state, cfg);
354
+ return {
355
+ ...teamResult,
356
+ agentResults: {
357
+ ...(state.agentResults ?? {}),
358
+ [teamId]: teamResult,
359
+ },
360
+ };
361
+ });
362
+ }
363
+ // Coordinator node: routes to teams
364
+ swarm.inner.addNode("__coordinator__", async (state) => {
365
+ const round = state.supervisorRound ?? 0;
366
+ if (round >= maxRounds || state.done) {
367
+ return { done: true };
368
+ }
369
+ if (config.coordinator.strategy === "llm" && config.coordinator.model) {
370
+ const teamList = teamIds.map((id) => `- ${id}`).join("\n");
371
+ const response = await config.coordinator.model.chat({
372
+ messages: [{
373
+ role: "user",
374
+ content: `Task: ${state.task}\n\nAvailable teams:\n${teamList}\n\nRespond with the team name to route to, or "DONE" if complete.`,
375
+ }],
376
+ systemPrompt: config.coordinator.systemPrompt ?? "You coordinate teams.",
377
+ });
378
+ const picked = response.content.trim();
379
+ if (picked === "DONE" || !teamIds.includes(picked)) {
380
+ return { done: true, supervisorRound: round + 1 };
381
+ }
382
+ return new Command({
383
+ update: { supervisorRound: round + 1, currentAgent: picked },
384
+ goto: picked,
385
+ });
386
+ }
387
+ // Round-robin strategy: route to teams in order, mark done after all visited
388
+ if (config.coordinator.strategy === "round-robin") {
389
+ const target = teamIds[round % teamIds.length];
390
+ if (round >= teamIds.length) {
391
+ return { done: true };
392
+ }
393
+ return new Command({
394
+ update: { supervisorRound: round + 1 },
395
+ goto: target,
396
+ });
397
+ }
398
+ // Rule-based strategy: evaluate rules in order, route to the first match
399
+ if (config.coordinator.strategy === "rule") {
400
+ const rules = config.coordinator.rules ?? [];
401
+ const task = String(state.task ?? "");
402
+ const ctx = (state.context ?? {});
403
+ for (const rule of rules) {
404
+ if (rule.condition(task, ctx)) {
405
+ return new Command({
406
+ update: { supervisorRound: round + 1, currentAgent: rule.agentId },
407
+ goto: rule.agentId,
408
+ });
409
+ }
410
+ }
411
+ // No matching rule — done
412
+ return { done: true, supervisorRound: round + 1 };
413
+ }
414
+ return { done: true };
415
+ });
416
+ // Wiring: START → coordinator
417
+ swarm.inner.addEdge(START, "__coordinator__");
418
+ // Each team → coordinator (loop back unless done)
419
+ for (const teamId of teamIds) {
420
+ swarm.inner.addConditionalEdges(teamId, (state) => {
421
+ if (state.done)
422
+ return END;
423
+ return "__coordinator__";
424
+ });
425
+ }
426
+ // Coordinator → conditional (done → END, else handled by Command.goto)
427
+ swarm.inner.addConditionalEdges("__coordinator__", (state) => {
428
+ if (state.done)
429
+ return END;
430
+ // Command.goto handles routing — this is the fallback
431
+ return END;
432
+ });
433
+ return swarm;
434
+ }
435
+ // ----------------------------------------------------------------
436
+ // buildRace
437
+ // ----------------------------------------------------------------
438
+ export function buildRace(config, swarm) {
439
+ const accept = config.accept ?? (() => true);
440
+ const timeoutMs = config.timeoutMs;
441
+ // Register agents so they appear in the registry
442
+ for (const agentDef of config.agents) {
443
+ swarm.registry.register(agentDef);
444
+ swarm.agentIds.add(agentDef.id);
445
+ }
446
+ // Single node that races all agents — resolves as soon as one produces
447
+ // an acceptable result (true Promise.race semantics, not Promise.all).
448
+ swarm.inner.addNode("__race__", async (state, cfg) => {
449
+ const agentPromises = config.agents.map((agent) => {
450
+ const p = agent.skeleton
451
+ .invoke({ ...state }, { ...cfg, agentId: agent.id })
452
+ .then((result) => ({ id: agent.id, result, error: null }), (err) => ({ id: agent.id, result: null, error: err }));
453
+ if (timeoutMs != null) {
454
+ return Promise.race([
455
+ p,
456
+ new Promise((resolve) => setTimeout(() => resolve({ id: agent.id, result: null, error: new Error("timeout") }), timeoutMs)),
457
+ ]);
458
+ }
459
+ return p;
460
+ });
461
+ // Resolve as soon as the first acceptable result arrives.
462
+ const winner = await new Promise((resolve) => {
463
+ let remaining = agentPromises.length;
464
+ if (remaining === 0) {
465
+ resolve(null);
466
+ return;
467
+ }
468
+ for (const p of agentPromises) {
469
+ p.then((r) => {
470
+ let accepted = false;
471
+ try {
472
+ accepted = !r.error && accept(r.result);
473
+ }
474
+ catch {
475
+ // accept() threw — treat as not accepted to keep remaining decrement working
476
+ }
477
+ if (accepted) {
478
+ resolve(r);
479
+ }
480
+ else {
481
+ remaining--;
482
+ if (remaining === 0)
483
+ resolve(null);
484
+ }
485
+ });
486
+ }
487
+ });
488
+ if (winner) {
489
+ return {
490
+ agentResults: { [winner.id]: winner.result },
491
+ context: { ...(state.context ?? {}), raceWinner: winner.id },
492
+ };
493
+ }
494
+ // No acceptable result
495
+ return {
496
+ context: {
497
+ ...(state.context ?? {}),
498
+ raceError: "No agent produced an acceptable result",
499
+ },
500
+ };
501
+ });
502
+ swarm.inner.addEdge(START, "__race__");
503
+ swarm.inner.addEdge("__race__", END);
504
+ return swarm;
505
+ }
506
+ // ----------------------------------------------------------------
507
+ // buildDag
508
+ // ----------------------------------------------------------------
509
+ export function buildDag(config, swarm) {
510
+ const agentMap = new Map(config.agents.map((a) => [a.id, a]));
511
+ // Validate dependencies
512
+ for (const [_node, deps] of Object.entries(config.dependencies)) {
513
+ for (const dep of deps) {
514
+ if (!agentMap.has(dep)) {
515
+ throw new Error(`Dependency "${dep}" not found in agents list.`);
516
+ }
517
+ }
518
+ }
519
+ // Cycle detection via topological sort
520
+ const visited = new Set();
521
+ const visiting = new Set();
522
+ const sorted = [];
523
+ function visit(id) {
524
+ if (visited.has(id))
525
+ return;
526
+ if (visiting.has(id))
527
+ throw new Error(`Cycle detected involving "${id}".`);
528
+ visiting.add(id);
529
+ for (const dep of config.dependencies[id] ?? []) {
530
+ visit(dep);
531
+ }
532
+ visiting.delete(id);
533
+ visited.add(id);
534
+ sorted.push(id);
535
+ }
536
+ for (const agent of config.agents) {
537
+ visit(agent.id);
538
+ }
539
+ for (const agent of config.agents) {
540
+ swarm.addAgent(agent);
541
+ }
542
+ // Group agents into layers based on dependencies
543
+ const deps = config.dependencies;
544
+ // For DAG execution: use a single orchestrator node
545
+ swarm.inner.addNode("__dag_runner__", async (state, cfg) => {
546
+ const results = {};
547
+ const completed = new Set();
548
+ // Process in topological order
549
+ // Group into parallel batches
550
+ const remaining = new Set(sorted);
551
+ while (remaining.size > 0) {
552
+ // Find all nodes whose deps are satisfied
553
+ const ready = [];
554
+ for (const id of remaining) {
555
+ const idDeps = deps[id] ?? [];
556
+ if (idDeps.every((d) => completed.has(d))) {
557
+ ready.push(id);
558
+ }
559
+ }
560
+ // Execute ready nodes in parallel
561
+ const batchResults = await Promise.all(ready.map(async (id) => {
562
+ const agent = agentMap.get(id);
563
+ const result = await agent.skeleton.invoke({ ...state, agentResults: { ...(state.agentResults ?? {}), ...results } }, { ...cfg, agentId: id });
564
+ return { id, result };
565
+ }));
566
+ for (const { id, result } of batchResults) {
567
+ results[id] = result;
568
+ completed.add(id);
569
+ remaining.delete(id);
570
+ }
571
+ }
572
+ return {
573
+ agentResults: { ...(state.agentResults ?? {}), ...results },
574
+ };
575
+ });
576
+ // Wire: START → __dag_runner__ → END
577
+ // Remove any edges added by addAgent — reset private edges field
578
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
579
+ swarm.inner.edges = []; // SAFE: external boundary — clearing private StateGraph.edges to rewire DAG topology
580
+ swarm.inner.addEdge(START, "__dag_runner__");
581
+ swarm.inner.addEdge("__dag_runner__", END);
582
+ return swarm;
583
+ }
584
+ // ----------------------------------------------------------------
585
+ // buildPool
586
+ // ----------------------------------------------------------------
587
+ export function buildPool(config, swarm) {
588
+ const poolSize = config.poolSize;
589
+ // Create pool copies
590
+ const poolIds = [];
591
+ for (let i = 0; i < poolSize; i++) {
592
+ const id = poolSize === 1 ? config.agent.id : `${config.agent.id}_${i}`;
593
+ poolIds.push(id);
594
+ swarm.addAgent({ ...config.agent, id });
595
+ }
596
+ // Orchestrator node: dispatches items to pool agents and reduces
597
+ swarm.inner.addNode("__pool_runner__", async (state, cfg) => {
598
+ const items = state[config.inputField];
599
+ if (!Array.isArray(items) || items.length === 0) {
600
+ return config.reducer({});
601
+ }
602
+ // Semaphore for concurrency control
603
+ let running = 0;
604
+ const results = {};
605
+ const queue = items.map((item, idx) => ({
606
+ item,
607
+ idx,
608
+ targetId: poolIds[idx % poolIds.length],
609
+ }));
610
+ await new Promise((resolve, _reject) => {
611
+ let completed = 0;
612
+ const total = queue.length;
613
+ function processNext() {
614
+ while (running < poolSize && queue.length > 0) {
615
+ const work = queue.shift();
616
+ // Respect removeAgent() — if the assigned slot was removed, redirect
617
+ // to an active pool slot; if none remain, mark the item as failed.
618
+ let agentDef = swarm.registry.getDef(work.targetId);
619
+ if (!agentDef) {
620
+ const activeIds = poolIds.filter((id) => !!swarm.registry.getDef(id));
621
+ if (activeIds.length > 0) {
622
+ work.targetId = activeIds[work.idx % activeIds.length];
623
+ agentDef = swarm.registry.getDef(work.targetId);
624
+ }
625
+ else {
626
+ results[`item_${work.idx}`] = { _error: `Pool slot removed; no active agents remain` };
627
+ completed++;
628
+ if (completed === total)
629
+ resolve();
630
+ continue;
631
+ }
632
+ }
633
+ running++;
634
+ // Use the full wrapped agentNode (hooks, retries, timeout) stored
635
+ // by addAgent() in swarm.inner.nodes rather than raw skeleton.invoke.
636
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
637
+ const wrappedFn = swarm.inner.nodes.get(work.targetId)?.fn;
638
+ const invocation = wrappedFn
639
+ ? wrappedFn({ ...state, task: String(work.item) }, { ...cfg, agentId: work.targetId })
640
+ : agentDef.skeleton.invoke({ ...state, task: String(work.item) }, { ...cfg, agentId: work.targetId });
641
+ invocation.then((result) => {
642
+ results[`item_${work.idx}`] = result;
643
+ running--;
644
+ completed++;
645
+ if (completed === total)
646
+ resolve();
647
+ else
648
+ processNext();
649
+ }, (err) => {
650
+ results[`item_${work.idx}`] = { _error: String(err) };
651
+ running--;
652
+ completed++;
653
+ if (completed === total)
654
+ resolve();
655
+ else
656
+ processNext();
657
+ });
658
+ }
659
+ }
660
+ if (total === 0)
661
+ resolve();
662
+ else
663
+ processNext();
664
+ });
665
+ return config.reducer(results);
666
+ });
667
+ // Wire: START → __pool_runner__ → END (bypass agent edges)
668
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
669
+ swarm.inner.edges = []; // SAFE: external boundary — clearing private StateGraph.edges to rewire pool topology
670
+ swarm.inner.addEdge(START, "__pool_runner__");
671
+ swarm.inner.addEdge("__pool_runner__", END);
672
+ return swarm;
673
+ }
674
+ // ----------------------------------------------------------------
675
+ // buildCompose
676
+ // ----------------------------------------------------------------
677
+ export function buildCompose(config, swarm) {
678
+ if (config.stages.length === 0) {
679
+ throw new Error("SwarmGraph.compose: stages must contain at least one sub-swarm.");
680
+ }
681
+ const stageIds = config.stages.map((s) => s.id);
682
+ for (const stage of config.stages) {
683
+ const compiled = stage.swarm.compile();
684
+ swarm.inner.addNode(stage.id, async (state, cfg) => {
685
+ const stageResult = await compiled.invoke(state, cfg);
686
+ return {
687
+ ...stageResult,
688
+ agentResults: {
689
+ ...(state.agentResults ?? {}),
690
+ [stage.id]: stageResult,
691
+ },
692
+ };
693
+ });
694
+ }
695
+ // Wire pipeline: START → stage1 → stage2 → ... → END
696
+ swarm.inner.addEdge(START, stageIds[0]);
697
+ for (let i = 0; i < stageIds.length - 1; i++) {
698
+ swarm.inner.addEdge(stageIds[i], stageIds[i + 1]);
699
+ }
700
+ swarm.inner.addEdge(stageIds[stageIds.length - 1], END);
701
+ return swarm;
702
+ }
703
+ //# sourceMappingURL=factories.js.map