@jackchen_me/open-multi-agent 0.1.0 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (140) hide show
  1. package/.github/ISSUE_TEMPLATE/bug_report.md +40 -0
  2. package/.github/ISSUE_TEMPLATE/feature_request.md +23 -0
  3. package/.github/pull_request_template.md +14 -0
  4. package/.github/workflows/ci.yml +23 -0
  5. package/CLAUDE.md +80 -0
  6. package/CODE_OF_CONDUCT.md +48 -0
  7. package/CONTRIBUTING.md +72 -0
  8. package/DECISIONS.md +43 -0
  9. package/README.md +144 -144
  10. package/README_zh.md +277 -0
  11. package/SECURITY.md +17 -0
  12. package/dist/agent/agent.d.ts +20 -1
  13. package/dist/agent/agent.d.ts.map +1 -1
  14. package/dist/agent/agent.js +233 -12
  15. package/dist/agent/agent.js.map +1 -1
  16. package/dist/agent/loop-detector.d.ts +39 -0
  17. package/dist/agent/loop-detector.d.ts.map +1 -0
  18. package/dist/agent/loop-detector.js +122 -0
  19. package/dist/agent/loop-detector.js.map +1 -0
  20. package/dist/agent/pool.d.ts +2 -1
  21. package/dist/agent/pool.d.ts.map +1 -1
  22. package/dist/agent/pool.js +4 -2
  23. package/dist/agent/pool.js.map +1 -1
  24. package/dist/agent/runner.d.ts +23 -1
  25. package/dist/agent/runner.d.ts.map +1 -1
  26. package/dist/agent/runner.js +113 -12
  27. package/dist/agent/runner.js.map +1 -1
  28. package/dist/agent/structured-output.d.ts +33 -0
  29. package/dist/agent/structured-output.d.ts.map +1 -0
  30. package/dist/agent/structured-output.js +116 -0
  31. package/dist/agent/structured-output.js.map +1 -0
  32. package/dist/index.d.ts +5 -2
  33. package/dist/index.d.ts.map +1 -1
  34. package/dist/index.js +4 -1
  35. package/dist/index.js.map +1 -1
  36. package/dist/llm/adapter.d.ts +12 -4
  37. package/dist/llm/adapter.d.ts.map +1 -1
  38. package/dist/llm/adapter.js +28 -5
  39. package/dist/llm/adapter.js.map +1 -1
  40. package/dist/llm/anthropic.d.ts +1 -1
  41. package/dist/llm/anthropic.d.ts.map +1 -1
  42. package/dist/llm/anthropic.js +2 -1
  43. package/dist/llm/anthropic.js.map +1 -1
  44. package/dist/llm/copilot.d.ts +92 -0
  45. package/dist/llm/copilot.d.ts.map +1 -0
  46. package/dist/llm/copilot.js +427 -0
  47. package/dist/llm/copilot.js.map +1 -0
  48. package/dist/llm/gemini.d.ts +65 -0
  49. package/dist/llm/gemini.d.ts.map +1 -0
  50. package/dist/llm/gemini.js +317 -0
  51. package/dist/llm/gemini.js.map +1 -0
  52. package/dist/llm/grok.d.ts +21 -0
  53. package/dist/llm/grok.d.ts.map +1 -0
  54. package/dist/llm/grok.js +24 -0
  55. package/dist/llm/grok.js.map +1 -0
  56. package/dist/llm/openai-common.d.ts +54 -0
  57. package/dist/llm/openai-common.d.ts.map +1 -0
  58. package/dist/llm/openai-common.js +242 -0
  59. package/dist/llm/openai-common.js.map +1 -0
  60. package/dist/llm/openai.d.ts +2 -2
  61. package/dist/llm/openai.d.ts.map +1 -1
  62. package/dist/llm/openai.js +23 -226
  63. package/dist/llm/openai.js.map +1 -1
  64. package/dist/orchestrator/orchestrator.d.ts +25 -1
  65. package/dist/orchestrator/orchestrator.d.ts.map +1 -1
  66. package/dist/orchestrator/orchestrator.js +214 -41
  67. package/dist/orchestrator/orchestrator.js.map +1 -1
  68. package/dist/task/queue.d.ts +31 -2
  69. package/dist/task/queue.d.ts.map +1 -1
  70. package/dist/task/queue.js +70 -3
  71. package/dist/task/queue.js.map +1 -1
  72. package/dist/task/task.d.ts +3 -0
  73. package/dist/task/task.d.ts.map +1 -1
  74. package/dist/task/task.js +5 -1
  75. package/dist/task/task.js.map +1 -1
  76. package/dist/team/messaging.d.ts.map +1 -1
  77. package/dist/team/messaging.js +2 -1
  78. package/dist/team/messaging.js.map +1 -1
  79. package/dist/tool/text-tool-extractor.d.ts +32 -0
  80. package/dist/tool/text-tool-extractor.d.ts.map +1 -0
  81. package/dist/tool/text-tool-extractor.js +187 -0
  82. package/dist/tool/text-tool-extractor.js.map +1 -0
  83. package/dist/types.d.ts +167 -7
  84. package/dist/types.d.ts.map +1 -1
  85. package/dist/utils/trace.d.ts +12 -0
  86. package/dist/utils/trace.d.ts.map +1 -0
  87. package/dist/utils/trace.js +30 -0
  88. package/dist/utils/trace.js.map +1 -0
  89. package/examples/05-copilot-test.ts +49 -0
  90. package/examples/06-local-model.ts +200 -0
  91. package/examples/07-fan-out-aggregate.ts +209 -0
  92. package/examples/08-gemma4-local.ts +192 -0
  93. package/examples/09-structured-output.ts +73 -0
  94. package/examples/10-task-retry.ts +132 -0
  95. package/examples/11-trace-observability.ts +133 -0
  96. package/examples/12-grok.ts +154 -0
  97. package/examples/13-gemini.ts +48 -0
  98. package/package.json +14 -3
  99. package/src/agent/agent.ts +273 -15
  100. package/src/agent/loop-detector.ts +137 -0
  101. package/src/agent/pool.ts +9 -2
  102. package/src/agent/runner.ts +148 -19
  103. package/src/agent/structured-output.ts +126 -0
  104. package/src/index.ts +17 -1
  105. package/src/llm/adapter.ts +29 -5
  106. package/src/llm/anthropic.ts +2 -1
  107. package/src/llm/copilot.ts +552 -0
  108. package/src/llm/gemini.ts +378 -0
  109. package/src/llm/grok.ts +29 -0
  110. package/src/llm/openai-common.ts +294 -0
  111. package/src/llm/openai.ts +31 -261
  112. package/src/orchestrator/orchestrator.ts +260 -40
  113. package/src/task/queue.ts +74 -4
  114. package/src/task/task.ts +8 -1
  115. package/src/team/messaging.ts +3 -1
  116. package/src/tool/text-tool-extractor.ts +219 -0
  117. package/src/types.ts +186 -6
  118. package/src/utils/trace.ts +34 -0
  119. package/tests/agent-hooks.test.ts +473 -0
  120. package/tests/agent-pool.test.ts +212 -0
  121. package/tests/approval.test.ts +464 -0
  122. package/tests/built-in-tools.test.ts +393 -0
  123. package/tests/gemini-adapter.test.ts +97 -0
  124. package/tests/grok-adapter.test.ts +74 -0
  125. package/tests/llm-adapters.test.ts +357 -0
  126. package/tests/loop-detection.test.ts +456 -0
  127. package/tests/openai-fallback.test.ts +159 -0
  128. package/tests/orchestrator.test.ts +281 -0
  129. package/tests/scheduler.test.ts +221 -0
  130. package/tests/semaphore.test.ts +57 -0
  131. package/tests/shared-memory.test.ts +122 -0
  132. package/tests/structured-output.test.ts +331 -0
  133. package/tests/task-queue.test.ts +244 -0
  134. package/tests/task-retry.test.ts +368 -0
  135. package/tests/task-utils.test.ts +155 -0
  136. package/tests/team-messaging.test.ts +329 -0
  137. package/tests/text-tool-extractor.test.ts +170 -0
  138. package/tests/tool-executor.test.ts +193 -0
  139. package/tests/trace.test.ts +453 -0
  140. package/vitest.config.ts +9 -0
@@ -0,0 +1,209 @@
1
+ /**
2
+ * Example 07 — Fan-Out / Aggregate (MapReduce) Pattern
3
+ *
4
+ * Demonstrates:
5
+ * - Fan-out: send the same question to N "analyst" agents in parallel
6
+ * - Aggregate: a "synthesizer" agent reads all analyst outputs and produces
7
+ * a balanced final report
8
+ * - AgentPool with runParallel() for concurrent fan-out
9
+ * - No tools needed — pure LLM reasoning to keep the focus on the pattern
10
+ *
11
+ * Run:
12
+ * npx tsx examples/07-fan-out-aggregate.ts
13
+ *
14
+ * Prerequisites:
15
+ * ANTHROPIC_API_KEY env var must be set.
16
+ */
17
+
18
+ import { Agent, AgentPool, ToolRegistry, ToolExecutor, registerBuiltInTools } from '../src/index.js'
19
+ import type { AgentConfig, AgentRunResult } from '../src/types.js'
20
+
21
+ // ---------------------------------------------------------------------------
22
+ // Analysis topic
23
+ // ---------------------------------------------------------------------------
24
+
25
+ const TOPIC = `Should a solo developer build a SaaS product that uses AI agents
26
+ for automated customer support? Consider the current state of AI technology,
27
+ market demand, competition, costs, and the unique constraints of being a solo
28
+ founder with limited time (~6 hours/day of productive work).`
29
+
30
+ // ---------------------------------------------------------------------------
31
+ // Analyst agent configs — three perspectives on the same question
32
+ // ---------------------------------------------------------------------------
33
+
34
+ const optimistConfig: AgentConfig = {
35
+ name: 'optimist',
36
+ model: 'claude-sonnet-4-6',
37
+ systemPrompt: `You are an optimistic technology analyst who focuses on
38
+ opportunities, upside potential, and emerging trends. You see possibilities
39
+ where others see obstacles. Back your optimism with concrete reasoning —
40
+ cite market trends, cost curves, and real capabilities. Keep your analysis
41
+ to 200-300 words.`,
42
+ maxTurns: 1,
43
+ temperature: 0.4,
44
+ }
45
+
46
+ const skepticConfig: AgentConfig = {
47
+ name: 'skeptic',
48
+ model: 'claude-sonnet-4-6',
49
+ systemPrompt: `You are a skeptical technology analyst who focuses on risks,
50
+ challenges, failure modes, and hidden costs. You stress-test assumptions and
51
+ ask "what could go wrong?" Back your skepticism with concrete reasoning —
52
+ cite failure rates, technical limitations, and market realities. Keep your
53
+ analysis to 200-300 words.`,
54
+ maxTurns: 1,
55
+ temperature: 0.4,
56
+ }
57
+
58
+ const pragmatistConfig: AgentConfig = {
59
+ name: 'pragmatist',
60
+ model: 'claude-sonnet-4-6',
61
+ systemPrompt: `You are a pragmatic technology analyst who focuses on practical
62
+ feasibility, execution complexity, and resource requirements. You care about
63
+ what works today, not what might work someday. You think in terms of MVPs,
64
+ timelines, and concrete tradeoffs. Keep your analysis to 200-300 words.`,
65
+ maxTurns: 1,
66
+ temperature: 0.4,
67
+ }
68
+
69
+ const synthesizerConfig: AgentConfig = {
70
+ name: 'synthesizer',
71
+ model: 'claude-sonnet-4-6',
72
+ systemPrompt: `You are a senior strategy advisor who synthesizes multiple
73
+ perspectives into a balanced, actionable recommendation. You do not simply
74
+ summarise — you weigh the arguments, identify where they agree and disagree,
75
+ and produce a clear verdict with next steps. Structure your output as:
76
+
77
+ 1. Key agreements across perspectives
78
+ 2. Key disagreements and how you weigh them
79
+ 3. Verdict (go / no-go / conditional go)
80
+ 4. Recommended next steps (3-5 bullet points)
81
+
82
+ Keep the final report to 300-400 words.`,
83
+ maxTurns: 1,
84
+ temperature: 0.3,
85
+ }
86
+
87
+ // ---------------------------------------------------------------------------
88
+ // Build agents — no tools needed for pure reasoning
89
+ // ---------------------------------------------------------------------------
90
+
91
+ function buildAgent(config: AgentConfig): Agent {
92
+ const registry = new ToolRegistry()
93
+ registerBuiltInTools(registry) // not needed here, but safe if tools are added later
94
+ const executor = new ToolExecutor(registry)
95
+ return new Agent(config, registry, executor)
96
+ }
97
+
98
+ const optimist = buildAgent(optimistConfig)
99
+ const skeptic = buildAgent(skepticConfig)
100
+ const pragmatist = buildAgent(pragmatistConfig)
101
+ const synthesizer = buildAgent(synthesizerConfig)
102
+
103
+ // ---------------------------------------------------------------------------
104
+ // Set up the pool
105
+ // ---------------------------------------------------------------------------
106
+
107
+ const pool = new AgentPool(3) // 3 analysts can run simultaneously
108
+ pool.add(optimist)
109
+ pool.add(skeptic)
110
+ pool.add(pragmatist)
111
+ pool.add(synthesizer)
112
+
113
+ console.log('Fan-Out / Aggregate (MapReduce) Pattern')
114
+ console.log('='.repeat(60))
115
+ console.log(`\nTopic: ${TOPIC.replace(/\n/g, ' ').trim()}\n`)
116
+
117
+ // ---------------------------------------------------------------------------
118
+ // Step 1: Fan-out — run all 3 analysts in parallel
119
+ // ---------------------------------------------------------------------------
120
+
121
+ console.log('[Step 1] Fan-out: 3 analysts running in parallel...\n')
122
+
123
+ const analystResults: Map<string, AgentRunResult> = await pool.runParallel([
124
+ { agent: 'optimist', prompt: TOPIC },
125
+ { agent: 'skeptic', prompt: TOPIC },
126
+ { agent: 'pragmatist', prompt: TOPIC },
127
+ ])
128
+
129
+ // Print each analyst's output (truncated)
130
+ const analysts = ['optimist', 'skeptic', 'pragmatist'] as const
131
+ for (const name of analysts) {
132
+ const result = analystResults.get(name)!
133
+ const status = result.success ? 'OK' : 'FAILED'
134
+ console.log(` ${name} [${status}] — ${result.tokenUsage.output_tokens} output tokens`)
135
+ console.log(` ${result.output.slice(0, 150).replace(/\n/g, ' ')}...`)
136
+ console.log()
137
+ }
138
+
139
+ // Check all analysts succeeded
140
+ for (const name of analysts) {
141
+ if (!analystResults.get(name)!.success) {
142
+ console.error(`Analyst '${name}' failed: ${analystResults.get(name)!.output}`)
143
+ process.exit(1)
144
+ }
145
+ }
146
+
147
+ // ---------------------------------------------------------------------------
148
+ // Step 2: Aggregate — synthesizer reads all 3 analyses
149
+ // ---------------------------------------------------------------------------
150
+
151
+ console.log('[Step 2] Aggregate: synthesizer producing final report...\n')
152
+
153
+ const synthesizerPrompt = `Three analysts have independently evaluated the same question.
154
+ Read their analyses below and produce your synthesis report.
155
+
156
+ --- OPTIMIST ---
157
+ ${analystResults.get('optimist')!.output}
158
+
159
+ --- SKEPTIC ---
160
+ ${analystResults.get('skeptic')!.output}
161
+
162
+ --- PRAGMATIST ---
163
+ ${analystResults.get('pragmatist')!.output}
164
+
165
+ Now synthesize these three perspectives into a balanced recommendation.`
166
+
167
+ const synthResult = await pool.run('synthesizer', synthesizerPrompt)
168
+
169
+ if (!synthResult.success) {
170
+ console.error('Synthesizer failed:', synthResult.output)
171
+ process.exit(1)
172
+ }
173
+
174
+ // ---------------------------------------------------------------------------
175
+ // Final output
176
+ // ---------------------------------------------------------------------------
177
+
178
+ console.log('='.repeat(60))
179
+ console.log('SYNTHESIZED REPORT')
180
+ console.log('='.repeat(60))
181
+ console.log()
182
+ console.log(synthResult.output)
183
+ console.log()
184
+ console.log('-'.repeat(60))
185
+
186
+ // ---------------------------------------------------------------------------
187
+ // Token usage comparison
188
+ // ---------------------------------------------------------------------------
189
+
190
+ console.log('\nToken Usage Summary:')
191
+ console.log('-'.repeat(60))
192
+
193
+ let totalInput = 0
194
+ let totalOutput = 0
195
+
196
+ for (const name of analysts) {
197
+ const r = analystResults.get(name)!
198
+ totalInput += r.tokenUsage.input_tokens
199
+ totalOutput += r.tokenUsage.output_tokens
200
+ console.log(` ${name.padEnd(12)} — input: ${r.tokenUsage.input_tokens}, output: ${r.tokenUsage.output_tokens}`)
201
+ }
202
+
203
+ totalInput += synthResult.tokenUsage.input_tokens
204
+ totalOutput += synthResult.tokenUsage.output_tokens
205
+ console.log(` ${'synthesizer'.padEnd(12)} — input: ${synthResult.tokenUsage.input_tokens}, output: ${synthResult.tokenUsage.output_tokens}`)
206
+ console.log('-'.repeat(60))
207
+ console.log(` ${'TOTAL'.padEnd(12)} — input: ${totalInput}, output: ${totalOutput}`)
208
+
209
+ console.log('\nDone.')
@@ -0,0 +1,192 @@
1
+ /**
2
+ * Example 08 — Gemma 4 Local (100% Local, Zero API Cost)
3
+ *
4
+ * Demonstrates both execution modes with a fully local Gemma 4 model via
5
+ * Ollama. No cloud API keys needed — everything runs on your machine.
6
+ *
7
+ * Part 1 — runTasks(): explicit task pipeline (researcher → summarizer)
8
+ * Part 2 — runTeam(): auto-orchestration where Gemma 4 acts as coordinator,
9
+ * decomposes the goal into tasks, and synthesises the final result
10
+ *
11
+ * This is the hardest test for a local model — runTeam() requires it to
12
+ * produce valid JSON for task decomposition AND do tool-calling for execution.
13
+ * Gemma 4 e2b (5.1B params) handles both reliably.
14
+ *
15
+ * Run:
16
+ * no_proxy=localhost npx tsx examples/08-gemma4-local.ts
17
+ *
18
+ * Prerequisites:
19
+ * 1. Ollama >= 0.20.0 installed and running: https://ollama.com
20
+ * 2. Pull the model: ollama pull gemma4:e2b
21
+ * (or gemma4:e4b for better quality on machines with more RAM)
22
+ * 3. No API keys needed!
23
+ *
24
+ * Note: The no_proxy=localhost prefix is needed if you have an HTTP proxy
25
+ * configured, since the OpenAI SDK would otherwise route Ollama requests
26
+ * through the proxy.
27
+ */
28
+
29
+ import { OpenMultiAgent } from '../src/index.js'
30
+ import type { AgentConfig, OrchestratorEvent, Task } from '../src/types.js'
31
+
32
+ // ---------------------------------------------------------------------------
33
+ // Configuration — change this to match your Ollama setup
34
+ // ---------------------------------------------------------------------------
35
+
36
+ // See available tags at https://ollama.com/library/gemma4
37
+ const OLLAMA_MODEL = 'gemma4:e2b' // or 'gemma4:e4b', 'gemma4:26b'
38
+ const OLLAMA_BASE_URL = 'http://localhost:11434/v1'
39
+ const OUTPUT_DIR = '/tmp/gemma4-demo'
40
+
41
+ // ---------------------------------------------------------------------------
42
+ // Agents
43
+ // ---------------------------------------------------------------------------
44
+
45
+ const researcher: AgentConfig = {
46
+ name: 'researcher',
47
+ model: OLLAMA_MODEL,
48
+ provider: 'openai',
49
+ baseURL: OLLAMA_BASE_URL,
50
+ apiKey: 'ollama', // placeholder — Ollama ignores this, but the OpenAI SDK requires a non-empty value
51
+ systemPrompt: `You are a system researcher. Use bash to run non-destructive,
52
+ read-only commands (uname -a, sw_vers, df -h, uptime, etc.) and report results.
53
+ Use file_write to save reports when asked.`,
54
+ tools: ['bash', 'file_write'],
55
+ maxTurns: 8,
56
+ }
57
+
58
+ const summarizer: AgentConfig = {
59
+ name: 'summarizer',
60
+ model: OLLAMA_MODEL,
61
+ provider: 'openai',
62
+ baseURL: OLLAMA_BASE_URL,
63
+ apiKey: 'ollama',
64
+ systemPrompt: `You are a technical writer. Read files and produce concise,
65
+ structured Markdown summaries. Use file_write to save reports when asked.`,
66
+ tools: ['file_read', 'file_write'],
67
+ maxTurns: 4,
68
+ }
69
+
70
+ // ---------------------------------------------------------------------------
71
+ // Progress handler
72
+ // ---------------------------------------------------------------------------
73
+
74
+ function handleProgress(event: OrchestratorEvent): void {
75
+ const ts = new Date().toISOString().slice(11, 23)
76
+ switch (event.type) {
77
+ case 'task_start': {
78
+ const task = event.data as Task | undefined
79
+ console.log(`[${ts}] TASK START "${task?.title ?? event.task}" → ${task?.assignee ?? '?'}`)
80
+ break
81
+ }
82
+ case 'task_complete':
83
+ console.log(`[${ts}] TASK DONE "${event.task}"`)
84
+ break
85
+ case 'agent_start':
86
+ console.log(`[${ts}] AGENT START ${event.agent}`)
87
+ break
88
+ case 'agent_complete':
89
+ console.log(`[${ts}] AGENT DONE ${event.agent}`)
90
+ break
91
+ case 'error':
92
+ console.error(`[${ts}] ERROR ${event.agent ?? ''} task=${event.task ?? '?'}`)
93
+ break
94
+ }
95
+ }
96
+
97
+ // ═══════════════════════════════════════════════════════════════════════════
98
+ // Part 1: runTasks() — Explicit task pipeline
99
+ // ═══════════════════════════════════════════════════════════════════════════
100
+
101
+ console.log('Part 1: runTasks() — Explicit Pipeline')
102
+ console.log('='.repeat(60))
103
+ console.log(` model → ${OLLAMA_MODEL} via Ollama`)
104
+ console.log(` pipeline → researcher gathers info → summarizer writes summary`)
105
+ console.log()
106
+
107
+ const orchestrator1 = new OpenMultiAgent({
108
+ defaultModel: OLLAMA_MODEL,
109
+ maxConcurrency: 1, // local model serves one request at a time
110
+ onProgress: handleProgress,
111
+ })
112
+
113
+ const team1 = orchestrator1.createTeam('explicit', {
114
+ name: 'explicit',
115
+ agents: [researcher, summarizer],
116
+ sharedMemory: true,
117
+ })
118
+
119
+ const tasks = [
120
+ {
121
+ title: 'Gather system information',
122
+ description: `Use bash to run system info commands (uname -a, sw_vers, sysctl, df -h, uptime).
123
+ Then write a structured Markdown report to ${OUTPUT_DIR}/system-report.md with sections:
124
+ OS, Hardware, Disk, and Uptime.`,
125
+ assignee: 'researcher',
126
+ },
127
+ {
128
+ title: 'Summarize the report',
129
+ description: `Read the file at ${OUTPUT_DIR}/system-report.md.
130
+ Produce a concise one-paragraph executive summary of the system information.`,
131
+ assignee: 'summarizer',
132
+ dependsOn: ['Gather system information'],
133
+ },
134
+ ]
135
+
136
+ const start1 = Date.now()
137
+ const result1 = await orchestrator1.runTasks(team1, tasks)
138
+
139
+ console.log(`\nSuccess: ${result1.success} Time: ${((Date.now() - start1) / 1000).toFixed(1)}s`)
140
+ console.log(`Tokens — input: ${result1.totalTokenUsage.input_tokens}, output: ${result1.totalTokenUsage.output_tokens}`)
141
+
142
+ const summary = result1.agentResults.get('summarizer')
143
+ if (summary?.success) {
144
+ console.log('\nSummary (from local Gemma 4):')
145
+ console.log('-'.repeat(60))
146
+ console.log(summary.output)
147
+ console.log('-'.repeat(60))
148
+ }
149
+
150
+ // ═══════════════════════════════════════════════════════════════════════════
151
+ // Part 2: runTeam() — Auto-orchestration (Gemma 4 as coordinator)
152
+ // ═══════════════════════════════════════════════════════════════════════════
153
+
154
+ console.log('\n\nPart 2: runTeam() — Auto-Orchestration')
155
+ console.log('='.repeat(60))
156
+ console.log(` coordinator → auto-created by runTeam(), also Gemma 4`)
157
+ console.log(` goal → given in natural language, framework plans everything`)
158
+ console.log()
159
+
160
+ const orchestrator2 = new OpenMultiAgent({
161
+ defaultModel: OLLAMA_MODEL,
162
+ defaultProvider: 'openai',
163
+ defaultBaseURL: OLLAMA_BASE_URL,
164
+ defaultApiKey: 'ollama',
165
+ maxConcurrency: 1,
166
+ onProgress: handleProgress,
167
+ })
168
+
169
+ const team2 = orchestrator2.createTeam('auto', {
170
+ name: 'auto',
171
+ agents: [researcher, summarizer],
172
+ sharedMemory: true,
173
+ })
174
+
175
+ const goal = `Check this machine's Node.js version, npm version, and OS info,
176
+ then write a short Markdown summary report to /tmp/gemma4-auto/report.md`
177
+
178
+ const start2 = Date.now()
179
+ const result2 = await orchestrator2.runTeam(team2, goal)
180
+
181
+ console.log(`\nSuccess: ${result2.success} Time: ${((Date.now() - start2) / 1000).toFixed(1)}s`)
182
+ console.log(`Tokens — input: ${result2.totalTokenUsage.input_tokens}, output: ${result2.totalTokenUsage.output_tokens}`)
183
+
184
+ const coordResult = result2.agentResults.get('coordinator')
185
+ if (coordResult?.success) {
186
+ console.log('\nFinal synthesis (from local Gemma 4 coordinator):')
187
+ console.log('-'.repeat(60))
188
+ console.log(coordResult.output)
189
+ console.log('-'.repeat(60))
190
+ }
191
+
192
+ console.log('\nAll processing done locally. $0 API cost.')
@@ -0,0 +1,73 @@
1
+ /**
2
+ * Example 09 — Structured Output
3
+ *
4
+ * Demonstrates `outputSchema` on AgentConfig. The agent's response is
5
+ * automatically parsed as JSON and validated against a Zod schema.
6
+ * On validation failure, the framework retries once with error feedback.
7
+ *
8
+ * The validated result is available via `result.structured`.
9
+ *
10
+ * Run:
11
+ * npx tsx examples/09-structured-output.ts
12
+ *
13
+ * Prerequisites:
14
+ * ANTHROPIC_API_KEY env var must be set.
15
+ */
16
+
17
+ import { z } from 'zod'
18
+ import { OpenMultiAgent } from '../src/index.js'
19
+ import type { AgentConfig } from '../src/types.js'
20
+
21
+ // ---------------------------------------------------------------------------
22
+ // Define a Zod schema for the expected output
23
+ // ---------------------------------------------------------------------------
24
+
25
+ const ReviewAnalysis = z.object({
26
+ summary: z.string().describe('One-sentence summary of the review'),
27
+ sentiment: z.enum(['positive', 'negative', 'neutral']),
28
+ confidence: z.number().min(0).max(1).describe('How confident the analysis is'),
29
+ keyTopics: z.array(z.string()).describe('Main topics mentioned in the review'),
30
+ })
31
+
32
+ type ReviewAnalysis = z.infer<typeof ReviewAnalysis>
33
+
34
+ // ---------------------------------------------------------------------------
35
+ // Agent with outputSchema
36
+ // ---------------------------------------------------------------------------
37
+
38
+ const analyst: AgentConfig = {
39
+ name: 'analyst',
40
+ model: 'claude-sonnet-4-6',
41
+ systemPrompt: 'You are a product review analyst. Analyze the given review and extract structured insights.',
42
+ outputSchema: ReviewAnalysis,
43
+ }
44
+
45
+ // ---------------------------------------------------------------------------
46
+ // Run
47
+ // ---------------------------------------------------------------------------
48
+
49
+ const orchestrator = new OpenMultiAgent({ defaultModel: 'claude-sonnet-4-6' })
50
+
51
+ const reviews = [
52
+ 'This keyboard is amazing! The mechanical switches feel incredible and the RGB lighting is stunning. Build quality is top-notch. Only downside is the price.',
53
+ 'Terrible experience. The product arrived broken, customer support was unhelpful, and the return process took 3 weeks.',
54
+ 'It works fine. Nothing special, nothing bad. Does what it says on the box.',
55
+ ]
56
+
57
+ console.log('Analyzing product reviews with structured output...\n')
58
+
59
+ for (const review of reviews) {
60
+ const result = await orchestrator.runAgent(analyst, `Analyze this review: "${review}"`)
61
+
62
+ if (result.structured) {
63
+ const data = result.structured as ReviewAnalysis
64
+ console.log(`Sentiment: ${data.sentiment} (confidence: ${data.confidence})`)
65
+ console.log(`Summary: ${data.summary}`)
66
+ console.log(`Topics: ${data.keyTopics.join(', ')}`)
67
+ } else {
68
+ console.log(`Validation failed. Raw output: ${result.output.slice(0, 100)}`)
69
+ }
70
+
71
+ console.log(`Tokens: ${result.tokenUsage.input_tokens} in / ${result.tokenUsage.output_tokens} out`)
72
+ console.log('---')
73
+ }
@@ -0,0 +1,132 @@
1
+ /**
2
+ * Example 10 — Task Retry with Exponential Backoff
3
+ *
4
+ * Demonstrates `maxRetries`, `retryDelayMs`, and `retryBackoff` on task config.
5
+ * When a task fails, the framework automatically retries with exponential
6
+ * backoff. The `onProgress` callback receives `task_retry` events so you can
7
+ * log retry attempts in real time.
8
+ *
9
+ * Scenario: a two-step pipeline where the first task (data fetch) is configured
10
+ * to retry on failure, and the second task (analysis) depends on it.
11
+ *
12
+ * Run:
13
+ * npx tsx examples/10-task-retry.ts
14
+ *
15
+ * Prerequisites:
16
+ * ANTHROPIC_API_KEY env var must be set.
17
+ */
18
+
19
+ import { OpenMultiAgent } from '../src/index.js'
20
+ import type { AgentConfig, OrchestratorEvent } from '../src/types.js'
21
+
22
+ // ---------------------------------------------------------------------------
23
+ // Agents
24
+ // ---------------------------------------------------------------------------
25
+
26
+ const fetcher: AgentConfig = {
27
+ name: 'fetcher',
28
+ model: 'claude-sonnet-4-6',
29
+ systemPrompt: `You are a data-fetching agent. When given a topic, produce a short
30
+ JSON summary with 3-5 key facts. Output ONLY valid JSON, no markdown fences.
31
+ Example: {"topic":"...", "facts":["fact1","fact2","fact3"]}`,
32
+ maxTurns: 2,
33
+ }
34
+
35
+ const analyst: AgentConfig = {
36
+ name: 'analyst',
37
+ model: 'claude-sonnet-4-6',
38
+ systemPrompt: `You are a data analyst. Read the fetched data from shared memory
39
+ and produce a brief analysis (3-4 sentences) highlighting trends or insights.`,
40
+ maxTurns: 2,
41
+ }
42
+
43
+ // ---------------------------------------------------------------------------
44
+ // Progress handler — watch for task_retry events
45
+ // ---------------------------------------------------------------------------
46
+
47
+ function handleProgress(event: OrchestratorEvent): void {
48
+ const ts = new Date().toISOString().slice(11, 23)
49
+
50
+ switch (event.type) {
51
+ case 'task_start':
52
+ console.log(`[${ts}] TASK START "${event.task}" (agent: ${event.agent})`)
53
+ break
54
+ case 'task_complete':
55
+ console.log(`[${ts}] TASK DONE "${event.task}"`)
56
+ break
57
+ case 'task_retry': {
58
+ const d = event.data as { attempt: number; maxAttempts: number; error: string; nextDelayMs: number }
59
+ console.log(`[${ts}] TASK RETRY "${event.task}" — attempt ${d.attempt}/${d.maxAttempts}, next in ${d.nextDelayMs}ms`)
60
+ console.log(` error: ${d.error.slice(0, 120)}`)
61
+ break
62
+ }
63
+ case 'error':
64
+ console.log(`[${ts}] ERROR "${event.task}" agent=${event.agent}`)
65
+ break
66
+ }
67
+ }
68
+
69
+ // ---------------------------------------------------------------------------
70
+ // Orchestrator + team
71
+ // ---------------------------------------------------------------------------
72
+
73
+ const orchestrator = new OpenMultiAgent({
74
+ defaultModel: 'claude-sonnet-4-6',
75
+ onProgress: handleProgress,
76
+ })
77
+
78
+ const team = orchestrator.createTeam('retry-demo', {
79
+ name: 'retry-demo',
80
+ agents: [fetcher, analyst],
81
+ sharedMemory: true,
82
+ })
83
+
84
+ // ---------------------------------------------------------------------------
85
+ // Tasks — fetcher has retry config, analyst depends on it
86
+ // ---------------------------------------------------------------------------
87
+
88
+ const tasks = [
89
+ {
90
+ title: 'Fetch data',
91
+ description: 'Fetch key facts about the adoption of TypeScript in open-source projects as of 2024. Output a JSON object with a "topic" and "facts" array.',
92
+ assignee: 'fetcher',
93
+ // Retry config: up to 2 retries, 500ms base delay, 2x backoff (500ms, 1000ms)
94
+ maxRetries: 2,
95
+ retryDelayMs: 500,
96
+ retryBackoff: 2,
97
+ },
98
+ {
99
+ title: 'Analyze data',
100
+ description: 'Read the fetched data from shared memory and produce a 3-4 sentence analysis of TypeScript adoption trends.',
101
+ assignee: 'analyst',
102
+ dependsOn: ['Fetch data'],
103
+ // No retry — if analysis fails, just report the error
104
+ },
105
+ ]
106
+
107
+ // ---------------------------------------------------------------------------
108
+ // Run
109
+ // ---------------------------------------------------------------------------
110
+
111
+ console.log('Task Retry Example')
112
+ console.log('='.repeat(60))
113
+ console.log('Pipeline: fetch (with retry) → analyze')
114
+ console.log(`Retry config: maxRetries=2, delay=500ms, backoff=2x`)
115
+ console.log('='.repeat(60))
116
+ console.log()
117
+
118
+ const result = await orchestrator.runTasks(team, tasks)
119
+
120
+ // ---------------------------------------------------------------------------
121
+ // Summary
122
+ // ---------------------------------------------------------------------------
123
+
124
+ console.log('\n' + '='.repeat(60))
125
+ console.log(`Overall success: ${result.success}`)
126
+ console.log(`Tokens — input: ${result.totalTokenUsage.input_tokens}, output: ${result.totalTokenUsage.output_tokens}`)
127
+
128
+ for (const [name, r] of result.agentResults) {
129
+ const icon = r.success ? 'OK ' : 'FAIL'
130
+ console.log(` [${icon}] ${name}`)
131
+ console.log(` ${r.output.slice(0, 200)}`)
132
+ }