@jackchen_me/open-multi-agent 0.2.0 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. package/.github/workflows/ci.yml +1 -1
  2. package/CLAUDE.md +11 -3
  3. package/README.md +87 -20
  4. package/README_zh.md +85 -25
  5. package/dist/agent/agent.d.ts +15 -1
  6. package/dist/agent/agent.d.ts.map +1 -1
  7. package/dist/agent/agent.js +144 -10
  8. package/dist/agent/agent.js.map +1 -1
  9. package/dist/agent/loop-detector.d.ts +39 -0
  10. package/dist/agent/loop-detector.d.ts.map +1 -0
  11. package/dist/agent/loop-detector.js +122 -0
  12. package/dist/agent/loop-detector.js.map +1 -0
  13. package/dist/agent/pool.d.ts +2 -1
  14. package/dist/agent/pool.d.ts.map +1 -1
  15. package/dist/agent/pool.js +4 -2
  16. package/dist/agent/pool.js.map +1 -1
  17. package/dist/agent/runner.d.ts +23 -1
  18. package/dist/agent/runner.d.ts.map +1 -1
  19. package/dist/agent/runner.js +113 -12
  20. package/dist/agent/runner.js.map +1 -1
  21. package/dist/index.d.ts +3 -1
  22. package/dist/index.d.ts.map +1 -1
  23. package/dist/index.js +2 -0
  24. package/dist/index.js.map +1 -1
  25. package/dist/llm/adapter.d.ts +4 -1
  26. package/dist/llm/adapter.d.ts.map +1 -1
  27. package/dist/llm/adapter.js +11 -0
  28. package/dist/llm/adapter.js.map +1 -1
  29. package/dist/llm/copilot.d.ts.map +1 -1
  30. package/dist/llm/copilot.js +2 -1
  31. package/dist/llm/copilot.js.map +1 -1
  32. package/dist/llm/gemini.d.ts +65 -0
  33. package/dist/llm/gemini.d.ts.map +1 -0
  34. package/dist/llm/gemini.js +317 -0
  35. package/dist/llm/gemini.js.map +1 -0
  36. package/dist/llm/grok.d.ts +21 -0
  37. package/dist/llm/grok.d.ts.map +1 -0
  38. package/dist/llm/grok.js +24 -0
  39. package/dist/llm/grok.js.map +1 -0
  40. package/dist/llm/openai-common.d.ts +8 -1
  41. package/dist/llm/openai-common.d.ts.map +1 -1
  42. package/dist/llm/openai-common.js +35 -2
  43. package/dist/llm/openai-common.js.map +1 -1
  44. package/dist/llm/openai.d.ts +1 -1
  45. package/dist/llm/openai.d.ts.map +1 -1
  46. package/dist/llm/openai.js +20 -2
  47. package/dist/llm/openai.js.map +1 -1
  48. package/dist/orchestrator/orchestrator.d.ts.map +1 -1
  49. package/dist/orchestrator/orchestrator.js +89 -9
  50. package/dist/orchestrator/orchestrator.js.map +1 -1
  51. package/dist/task/queue.d.ts +31 -2
  52. package/dist/task/queue.d.ts.map +1 -1
  53. package/dist/task/queue.js +69 -2
  54. package/dist/task/queue.js.map +1 -1
  55. package/dist/tool/text-tool-extractor.d.ts +32 -0
  56. package/dist/tool/text-tool-extractor.d.ts.map +1 -0
  57. package/dist/tool/text-tool-extractor.js +187 -0
  58. package/dist/tool/text-tool-extractor.js.map +1 -0
  59. package/dist/types.d.ts +139 -7
  60. package/dist/types.d.ts.map +1 -1
  61. package/dist/utils/trace.d.ts +12 -0
  62. package/dist/utils/trace.d.ts.map +1 -0
  63. package/dist/utils/trace.js +30 -0
  64. package/dist/utils/trace.js.map +1 -0
  65. package/examples/06-local-model.ts +1 -0
  66. package/examples/08-gemma4-local.ts +76 -87
  67. package/examples/09-structured-output.ts +73 -0
  68. package/examples/10-task-retry.ts +132 -0
  69. package/examples/11-trace-observability.ts +133 -0
  70. package/examples/12-grok.ts +154 -0
  71. package/examples/13-gemini.ts +48 -0
  72. package/package.json +11 -1
  73. package/src/agent/agent.ts +159 -10
  74. package/src/agent/loop-detector.ts +137 -0
  75. package/src/agent/pool.ts +9 -2
  76. package/src/agent/runner.ts +148 -19
  77. package/src/index.ts +15 -0
  78. package/src/llm/adapter.ts +12 -1
  79. package/src/llm/copilot.ts +2 -1
  80. package/src/llm/gemini.ts +378 -0
  81. package/src/llm/grok.ts +29 -0
  82. package/src/llm/openai-common.ts +41 -2
  83. package/src/llm/openai.ts +23 -3
  84. package/src/orchestrator/orchestrator.ts +105 -11
  85. package/src/task/queue.ts +73 -3
  86. package/src/tool/text-tool-extractor.ts +219 -0
  87. package/src/types.ts +157 -6
  88. package/src/utils/trace.ts +34 -0
  89. package/tests/agent-hooks.test.ts +473 -0
  90. package/tests/agent-pool.test.ts +212 -0
  91. package/tests/approval.test.ts +464 -0
  92. package/tests/built-in-tools.test.ts +393 -0
  93. package/tests/gemini-adapter.test.ts +97 -0
  94. package/tests/grok-adapter.test.ts +74 -0
  95. package/tests/llm-adapters.test.ts +357 -0
  96. package/tests/loop-detection.test.ts +456 -0
  97. package/tests/openai-fallback.test.ts +159 -0
  98. package/tests/orchestrator.test.ts +281 -0
  99. package/tests/scheduler.test.ts +221 -0
  100. package/tests/team-messaging.test.ts +329 -0
  101. package/tests/text-tool-extractor.test.ts +170 -0
  102. package/tests/trace.test.ts +453 -0
  103. package/vitest.config.ts +9 -0
  104. package/examples/09-gemma4-auto-orchestration.ts +0 -162
@@ -1,15 +1,16 @@
1
1
  /**
2
- * Example 08 — Gemma 4 Local Agent Team (100% Local, Zero API Cost)
2
+ * Example 08 — Gemma 4 Local (100% Local, Zero API Cost)
3
3
  *
4
- * Demonstrates a fully local multi-agent team using Google's Gemma 4 via
4
+ * Demonstrates both execution modes with a fully local Gemma 4 model via
5
5
  * Ollama. No cloud API keys needed — everything runs on your machine.
6
6
  *
7
- * Two agents collaborate through a task pipeline:
8
- * - researcher: uses bash + file_write to gather system info and write a report
9
- * - summarizer: uses file_read to read the report and produce a concise summary
7
+ * Part 1 runTasks(): explicit task pipeline (researcher → summarizer)
8
+ * Part 2 runTeam(): auto-orchestration where Gemma 4 acts as coordinator,
9
+ * decomposes the goal into tasks, and synthesises the final result
10
10
  *
11
- * This pattern works with any Ollama model that supports tool-calling.
12
- * Gemma 4 (released 2026-04-02) has native tool-calling support.
11
+ * This is the hardest test for a local model runTeam() requires it to
12
+ * produce valid JSON for task decomposition AND do tool-calling for execution.
13
+ * Gemma 4 e2b (5.1B params) handles both reliably.
13
14
  *
14
15
  * Run:
15
16
  * no_proxy=localhost npx tsx examples/08-gemma4-local.ts
@@ -38,46 +39,31 @@ const OLLAMA_BASE_URL = 'http://localhost:11434/v1'
38
39
  const OUTPUT_DIR = '/tmp/gemma4-demo'
39
40
 
40
41
  // ---------------------------------------------------------------------------
41
- // Agents — both use Gemma 4 locally
42
+ // Agents
42
43
  // ---------------------------------------------------------------------------
43
44
 
44
- /**
45
- * Researcher — gathers system information using shell commands.
46
- */
47
45
  const researcher: AgentConfig = {
48
46
  name: 'researcher',
49
47
  model: OLLAMA_MODEL,
50
48
  provider: 'openai',
51
49
  baseURL: OLLAMA_BASE_URL,
52
50
  apiKey: 'ollama', // placeholder — Ollama ignores this, but the OpenAI SDK requires a non-empty value
53
- systemPrompt: `You are a system researcher. Your job is to gather information
54
- about the current machine using shell commands and write a structured report.
55
-
56
- Use the bash tool to run commands like: uname -a, df -h, uptime, and similar
57
- non-destructive read-only commands.
58
- On macOS you can also use: sw_vers, sysctl -n hw.memsize.
59
- On Linux you can also use: cat /etc/os-release, free -h.
60
-
61
- Then use file_write to save a Markdown report to ${OUTPUT_DIR}/system-report.md.
62
- The report should have sections: OS, Hardware, Disk, and Uptime.
63
- Be concise — one or two lines per section is enough.`,
51
+ systemPrompt: `You are a system researcher. Use bash to run non-destructive,
52
+ read-only commands (uname -a, sw_vers, df -h, uptime, etc.) and report results.
53
+ Use file_write to save reports when asked.`,
64
54
  tools: ['bash', 'file_write'],
65
55
  maxTurns: 8,
66
56
  }
67
57
 
68
- /**
69
- * Summarizer — reads the report and writes a one-paragraph executive summary.
70
- */
71
58
  const summarizer: AgentConfig = {
72
59
  name: 'summarizer',
73
60
  model: OLLAMA_MODEL,
74
61
  provider: 'openai',
75
62
  baseURL: OLLAMA_BASE_URL,
76
63
  apiKey: 'ollama',
77
- systemPrompt: `You are a technical writer. Read the system report file provided,
78
- then produce a concise one-paragraph executive summary (3-5 sentences).
79
- Focus on the key highlights: what OS, how much RAM, disk status, and uptime.`,
80
- tools: ['file_read'],
64
+ systemPrompt: `You are a technical writer. Read files and produce concise,
65
+ structured Markdown summaries. Use file_write to save reports when asked.`,
66
+ tools: ['file_read', 'file_write'],
81
67
  maxTurns: 4,
82
68
  }
83
69
 
@@ -85,23 +71,17 @@ Focus on the key highlights: what OS, how much RAM, disk status, and uptime.`,
85
71
  // Progress handler
86
72
  // ---------------------------------------------------------------------------
87
73
 
88
- const taskTimes = new Map<string, number>()
89
-
90
74
  function handleProgress(event: OrchestratorEvent): void {
91
75
  const ts = new Date().toISOString().slice(11, 23)
92
-
93
76
  switch (event.type) {
94
77
  case 'task_start': {
95
- taskTimes.set(event.task ?? '', Date.now())
96
78
  const task = event.data as Task | undefined
97
79
  console.log(`[${ts}] TASK START "${task?.title ?? event.task}" → ${task?.assignee ?? '?'}`)
98
80
  break
99
81
  }
100
- case 'task_complete': {
101
- const elapsed = Date.now() - (taskTimes.get(event.task ?? '') ?? Date.now())
102
- console.log(`[${ts}] TASK DONE "${event.task}" in ${(elapsed / 1000).toFixed(1)}s`)
82
+ case 'task_complete':
83
+ console.log(`[${ts}] TASK DONE "${event.task}"`)
103
84
  break
104
- }
105
85
  case 'agent_start':
106
86
  console.log(`[${ts}] AGENT START ${event.agent}`)
107
87
  break
@@ -114,32 +94,29 @@ function handleProgress(event: OrchestratorEvent): void {
114
94
  }
115
95
  }
116
96
 
117
- // ---------------------------------------------------------------------------
118
- // Orchestrator + Team
119
- // ---------------------------------------------------------------------------
97
+ // ═══════════════════════════════════════════════════════════════════════════
98
+ // Part 1: runTasks() — Explicit task pipeline
99
+ // ═══════════════════════════════════════════════════════════════════════════
120
100
 
121
- const orchestrator = new OpenMultiAgent({
101
+ console.log('Part 1: runTasks() Explicit Pipeline')
102
+ console.log('='.repeat(60))
103
+ console.log(` model → ${OLLAMA_MODEL} via Ollama`)
104
+ console.log(` pipeline → researcher gathers info → summarizer writes summary`)
105
+ console.log()
106
+
107
+ const orchestrator1 = new OpenMultiAgent({
122
108
  defaultModel: OLLAMA_MODEL,
123
- maxConcurrency: 1, // run agents sequentially — local model can only serve one at a time
109
+ maxConcurrency: 1, // local model serves one request at a time
124
110
  onProgress: handleProgress,
125
111
  })
126
112
 
127
- const team = orchestrator.createTeam('gemma4-team', {
128
- name: 'gemma4-team',
113
+ const team1 = orchestrator1.createTeam('explicit', {
114
+ name: 'explicit',
129
115
  agents: [researcher, summarizer],
130
116
  sharedMemory: true,
131
117
  })
132
118
 
133
- // ---------------------------------------------------------------------------
134
- // Task pipeline: research → summarize
135
- // ---------------------------------------------------------------------------
136
-
137
- const tasks: Array<{
138
- title: string
139
- description: string
140
- assignee?: string
141
- dependsOn?: string[]
142
- }> = [
119
+ const tasks = [
143
120
  {
144
121
  title: 'Gather system information',
145
122
  description: `Use bash to run system info commands (uname -a, sw_vers, sysctl, df -h, uptime).
@@ -156,47 +133,59 @@ Produce a concise one-paragraph executive summary of the system information.`,
156
133
  },
157
134
  ]
158
135
 
159
- // ---------------------------------------------------------------------------
160
- // Run
161
- // ---------------------------------------------------------------------------
136
+ const start1 = Date.now()
137
+ const result1 = await orchestrator1.runTasks(team1, tasks)
162
138
 
163
- console.log('Gemma 4 Local Agent Team Zero API Cost')
139
+ console.log(`\nSuccess: ${result1.success} Time: ${((Date.now() - start1) / 1000).toFixed(1)}s`)
140
+ console.log(`Tokens — input: ${result1.totalTokenUsage.input_tokens}, output: ${result1.totalTokenUsage.output_tokens}`)
141
+
142
+ const summary = result1.agentResults.get('summarizer')
143
+ if (summary?.success) {
144
+ console.log('\nSummary (from local Gemma 4):')
145
+ console.log('-'.repeat(60))
146
+ console.log(summary.output)
147
+ console.log('-'.repeat(60))
148
+ }
149
+
150
+ // ═══════════════════════════════════════════════════════════════════════════
151
+ // Part 2: runTeam() — Auto-orchestration (Gemma 4 as coordinator)
152
+ // ═══════════════════════════════════════════════════════════════════════════
153
+
154
+ console.log('\n\nPart 2: runTeam() — Auto-Orchestration')
164
155
  console.log('='.repeat(60))
165
- console.log(` model ${OLLAMA_MODEL} via Ollama`)
166
- console.log(` researcher bash + file_write`)
167
- console.log(` summarizer → file_read`)
168
- console.log(` output dir → ${OUTPUT_DIR}`)
156
+ console.log(` coordinator auto-created by runTeam(), also Gemma 4`)
157
+ console.log(` goal given in natural language, framework plans everything`)
169
158
  console.log()
170
- console.log('Pipeline: researcher gathers info → summarizer writes summary')
171
- console.log('='.repeat(60))
172
159
 
173
- const start = Date.now()
174
- const result = await orchestrator.runTasks(team, tasks)
175
- const totalTime = Date.now() - start
160
+ const orchestrator2 = new OpenMultiAgent({
161
+ defaultModel: OLLAMA_MODEL,
162
+ defaultProvider: 'openai',
163
+ defaultBaseURL: OLLAMA_BASE_URL,
164
+ defaultApiKey: 'ollama',
165
+ maxConcurrency: 1,
166
+ onProgress: handleProgress,
167
+ })
176
168
 
177
- // ---------------------------------------------------------------------------
178
- // Summary
179
- // ---------------------------------------------------------------------------
169
+ const team2 = orchestrator2.createTeam('auto', {
170
+ name: 'auto',
171
+ agents: [researcher, summarizer],
172
+ sharedMemory: true,
173
+ })
180
174
 
181
- console.log('\n' + '='.repeat(60))
182
- console.log('Pipeline complete.\n')
183
- console.log(`Overall success: ${result.success}`)
184
- console.log(`Total time: ${(totalTime / 1000).toFixed(1)}s`)
185
- console.log(`Tokens — input: ${result.totalTokenUsage.input_tokens}, output: ${result.totalTokenUsage.output_tokens}`)
186
-
187
- console.log('\nPer-agent results:')
188
- for (const [name, r] of result.agentResults) {
189
- const icon = r.success ? 'OK ' : 'FAIL'
190
- const tools = r.toolCalls.map(c => c.toolName).join(', ')
191
- console.log(` [${icon}] ${name.padEnd(12)} tools: ${tools || '(none)'}`)
192
- }
175
+ const goal = `Check this machine's Node.js version, npm version, and OS info,
176
+ then write a short Markdown summary report to /tmp/gemma4-auto/report.md`
193
177
 
194
- // Print the summarizer's output
195
- const summary = result.agentResults.get('summarizer')
196
- if (summary?.success) {
197
- console.log('\nExecutive Summary (from local Gemma 4):')
178
+ const start2 = Date.now()
179
+ const result2 = await orchestrator2.runTeam(team2, goal)
180
+
181
+ console.log(`\nSuccess: ${result2.success} Time: ${((Date.now() - start2) / 1000).toFixed(1)}s`)
182
+ console.log(`Tokens — input: ${result2.totalTokenUsage.input_tokens}, output: ${result2.totalTokenUsage.output_tokens}`)
183
+
184
+ const coordResult = result2.agentResults.get('coordinator')
185
+ if (coordResult?.success) {
186
+ console.log('\nFinal synthesis (from local Gemma 4 coordinator):')
198
187
  console.log('-'.repeat(60))
199
- console.log(summary.output)
188
+ console.log(coordResult.output)
200
189
  console.log('-'.repeat(60))
201
190
  }
202
191
 
@@ -0,0 +1,73 @@
1
+ /**
2
+ * Example 09 — Structured Output
3
+ *
4
+ * Demonstrates `outputSchema` on AgentConfig. The agent's response is
5
+ * automatically parsed as JSON and validated against a Zod schema.
6
+ * On validation failure, the framework retries once with error feedback.
7
+ *
8
+ * The validated result is available via `result.structured`.
9
+ *
10
+ * Run:
11
+ * npx tsx examples/09-structured-output.ts
12
+ *
13
+ * Prerequisites:
14
+ * ANTHROPIC_API_KEY env var must be set.
15
+ */
16
+
17
+ import { z } from 'zod'
18
+ import { OpenMultiAgent } from '../src/index.js'
19
+ import type { AgentConfig } from '../src/types.js'
20
+
21
+ // ---------------------------------------------------------------------------
22
+ // Define a Zod schema for the expected output
23
+ // ---------------------------------------------------------------------------
24
+
25
+ const ReviewAnalysis = z.object({
26
+ summary: z.string().describe('One-sentence summary of the review'),
27
+ sentiment: z.enum(['positive', 'negative', 'neutral']),
28
+ confidence: z.number().min(0).max(1).describe('How confident the analysis is'),
29
+ keyTopics: z.array(z.string()).describe('Main topics mentioned in the review'),
30
+ })
31
+
32
+ type ReviewAnalysis = z.infer<typeof ReviewAnalysis>
33
+
34
+ // ---------------------------------------------------------------------------
35
+ // Agent with outputSchema
36
+ // ---------------------------------------------------------------------------
37
+
38
+ const analyst: AgentConfig = {
39
+ name: 'analyst',
40
+ model: 'claude-sonnet-4-6',
41
+ systemPrompt: 'You are a product review analyst. Analyze the given review and extract structured insights.',
42
+ outputSchema: ReviewAnalysis,
43
+ }
44
+
45
+ // ---------------------------------------------------------------------------
46
+ // Run
47
+ // ---------------------------------------------------------------------------
48
+
49
+ const orchestrator = new OpenMultiAgent({ defaultModel: 'claude-sonnet-4-6' })
50
+
51
+ const reviews = [
52
+ 'This keyboard is amazing! The mechanical switches feel incredible and the RGB lighting is stunning. Build quality is top-notch. Only downside is the price.',
53
+ 'Terrible experience. The product arrived broken, customer support was unhelpful, and the return process took 3 weeks.',
54
+ 'It works fine. Nothing special, nothing bad. Does what it says on the box.',
55
+ ]
56
+
57
+ console.log('Analyzing product reviews with structured output...\n')
58
+
59
+ for (const review of reviews) {
60
+ const result = await orchestrator.runAgent(analyst, `Analyze this review: "${review}"`)
61
+
62
+ if (result.structured) {
63
+ const data = result.structured as ReviewAnalysis
64
+ console.log(`Sentiment: ${data.sentiment} (confidence: ${data.confidence})`)
65
+ console.log(`Summary: ${data.summary}`)
66
+ console.log(`Topics: ${data.keyTopics.join(', ')}`)
67
+ } else {
68
+ console.log(`Validation failed. Raw output: ${result.output.slice(0, 100)}`)
69
+ }
70
+
71
+ console.log(`Tokens: ${result.tokenUsage.input_tokens} in / ${result.tokenUsage.output_tokens} out`)
72
+ console.log('---')
73
+ }
@@ -0,0 +1,132 @@
1
+ /**
2
+ * Example 10 — Task Retry with Exponential Backoff
3
+ *
4
+ * Demonstrates `maxRetries`, `retryDelayMs`, and `retryBackoff` on task config.
5
+ * When a task fails, the framework automatically retries with exponential
6
+ * backoff. The `onProgress` callback receives `task_retry` events so you can
7
+ * log retry attempts in real time.
8
+ *
9
+ * Scenario: a two-step pipeline where the first task (data fetch) is configured
10
+ * to retry on failure, and the second task (analysis) depends on it.
11
+ *
12
+ * Run:
13
+ * npx tsx examples/10-task-retry.ts
14
+ *
15
+ * Prerequisites:
16
+ * ANTHROPIC_API_KEY env var must be set.
17
+ */
18
+
19
+ import { OpenMultiAgent } from '../src/index.js'
20
+ import type { AgentConfig, OrchestratorEvent } from '../src/types.js'
21
+
22
+ // ---------------------------------------------------------------------------
23
+ // Agents
24
+ // ---------------------------------------------------------------------------
25
+
26
+ const fetcher: AgentConfig = {
27
+ name: 'fetcher',
28
+ model: 'claude-sonnet-4-6',
29
+ systemPrompt: `You are a data-fetching agent. When given a topic, produce a short
30
+ JSON summary with 3-5 key facts. Output ONLY valid JSON, no markdown fences.
31
+ Example: {"topic":"...", "facts":["fact1","fact2","fact3"]}`,
32
+ maxTurns: 2,
33
+ }
34
+
35
+ const analyst: AgentConfig = {
36
+ name: 'analyst',
37
+ model: 'claude-sonnet-4-6',
38
+ systemPrompt: `You are a data analyst. Read the fetched data from shared memory
39
+ and produce a brief analysis (3-4 sentences) highlighting trends or insights.`,
40
+ maxTurns: 2,
41
+ }
42
+
43
+ // ---------------------------------------------------------------------------
44
+ // Progress handler — watch for task_retry events
45
+ // ---------------------------------------------------------------------------
46
+
47
+ function handleProgress(event: OrchestratorEvent): void {
48
+ const ts = new Date().toISOString().slice(11, 23)
49
+
50
+ switch (event.type) {
51
+ case 'task_start':
52
+ console.log(`[${ts}] TASK START "${event.task}" (agent: ${event.agent})`)
53
+ break
54
+ case 'task_complete':
55
+ console.log(`[${ts}] TASK DONE "${event.task}"`)
56
+ break
57
+ case 'task_retry': {
58
+ const d = event.data as { attempt: number; maxAttempts: number; error: string; nextDelayMs: number }
59
+ console.log(`[${ts}] TASK RETRY "${event.task}" — attempt ${d.attempt}/${d.maxAttempts}, next in ${d.nextDelayMs}ms`)
60
+ console.log(` error: ${d.error.slice(0, 120)}`)
61
+ break
62
+ }
63
+ case 'error':
64
+ console.log(`[${ts}] ERROR "${event.task}" agent=${event.agent}`)
65
+ break
66
+ }
67
+ }
68
+
69
+ // ---------------------------------------------------------------------------
70
+ // Orchestrator + team
71
+ // ---------------------------------------------------------------------------
72
+
73
+ const orchestrator = new OpenMultiAgent({
74
+ defaultModel: 'claude-sonnet-4-6',
75
+ onProgress: handleProgress,
76
+ })
77
+
78
+ const team = orchestrator.createTeam('retry-demo', {
79
+ name: 'retry-demo',
80
+ agents: [fetcher, analyst],
81
+ sharedMemory: true,
82
+ })
83
+
84
+ // ---------------------------------------------------------------------------
85
+ // Tasks — fetcher has retry config, analyst depends on it
86
+ // ---------------------------------------------------------------------------
87
+
88
+ const tasks = [
89
+ {
90
+ title: 'Fetch data',
91
+ description: 'Fetch key facts about the adoption of TypeScript in open-source projects as of 2024. Output a JSON object with a "topic" and "facts" array.',
92
+ assignee: 'fetcher',
93
+ // Retry config: up to 2 retries, 500ms base delay, 2x backoff (500ms, 1000ms)
94
+ maxRetries: 2,
95
+ retryDelayMs: 500,
96
+ retryBackoff: 2,
97
+ },
98
+ {
99
+ title: 'Analyze data',
100
+ description: 'Read the fetched data from shared memory and produce a 3-4 sentence analysis of TypeScript adoption trends.',
101
+ assignee: 'analyst',
102
+ dependsOn: ['Fetch data'],
103
+ // No retry — if analysis fails, just report the error
104
+ },
105
+ ]
106
+
107
+ // ---------------------------------------------------------------------------
108
+ // Run
109
+ // ---------------------------------------------------------------------------
110
+
111
+ console.log('Task Retry Example')
112
+ console.log('='.repeat(60))
113
+ console.log('Pipeline: fetch (with retry) → analyze')
114
+ console.log(`Retry config: maxRetries=2, delay=500ms, backoff=2x`)
115
+ console.log('='.repeat(60))
116
+ console.log()
117
+
118
+ const result = await orchestrator.runTasks(team, tasks)
119
+
120
+ // ---------------------------------------------------------------------------
121
+ // Summary
122
+ // ---------------------------------------------------------------------------
123
+
124
+ console.log('\n' + '='.repeat(60))
125
+ console.log(`Overall success: ${result.success}`)
126
+ console.log(`Tokens — input: ${result.totalTokenUsage.input_tokens}, output: ${result.totalTokenUsage.output_tokens}`)
127
+
128
+ for (const [name, r] of result.agentResults) {
129
+ const icon = r.success ? 'OK ' : 'FAIL'
130
+ console.log(` [${icon}] ${name}`)
131
+ console.log(` ${r.output.slice(0, 200)}`)
132
+ }
@@ -0,0 +1,133 @@
1
+ /**
2
+ * Example 11 — Trace Observability
3
+ *
4
+ * Demonstrates the `onTrace` callback for lightweight observability. Every LLM
5
+ * call, tool execution, task lifecycle, and agent run emits a structured trace
6
+ * event with timing data and token usage — giving you full visibility into
7
+ * what's happening inside a multi-agent run.
8
+ *
9
+ * Trace events share a `runId` for correlation, so you can reconstruct the
10
+ * full execution timeline. Pipe them into your own logging, OpenTelemetry, or
11
+ * dashboard.
12
+ *
13
+ * Run:
14
+ * npx tsx examples/11-trace-observability.ts
15
+ *
16
+ * Prerequisites:
17
+ * ANTHROPIC_API_KEY env var must be set.
18
+ */
19
+
20
+ import { OpenMultiAgent } from '../src/index.js'
21
+ import type { AgentConfig, TraceEvent } from '../src/types.js'
22
+
23
+ // ---------------------------------------------------------------------------
24
+ // Agents
25
+ // ---------------------------------------------------------------------------
26
+
27
+ const researcher: AgentConfig = {
28
+ name: 'researcher',
29
+ model: 'claude-sonnet-4-6',
30
+ systemPrompt: 'You are a research assistant. Provide concise, factual answers.',
31
+ maxTurns: 2,
32
+ }
33
+
34
+ const writer: AgentConfig = {
35
+ name: 'writer',
36
+ model: 'claude-sonnet-4-6',
37
+ systemPrompt: 'You are a technical writer. Summarize research into clear prose.',
38
+ maxTurns: 2,
39
+ }
40
+
41
+ // ---------------------------------------------------------------------------
42
+ // Trace handler — log every span with timing
43
+ // ---------------------------------------------------------------------------
44
+
45
+ function handleTrace(event: TraceEvent): void {
46
+ const dur = `${event.durationMs}ms`.padStart(7)
47
+
48
+ switch (event.type) {
49
+ case 'llm_call':
50
+ console.log(
51
+ ` [LLM] ${dur} agent=${event.agent} model=${event.model} turn=${event.turn}` +
52
+ ` tokens=${event.tokens.input_tokens}in/${event.tokens.output_tokens}out`,
53
+ )
54
+ break
55
+ case 'tool_call':
56
+ console.log(
57
+ ` [TOOL] ${dur} agent=${event.agent} tool=${event.tool}` +
58
+ ` error=${event.isError}`,
59
+ )
60
+ break
61
+ case 'task':
62
+ console.log(
63
+ ` [TASK] ${dur} task="${event.taskTitle}" agent=${event.agent}` +
64
+ ` success=${event.success} retries=${event.retries}`,
65
+ )
66
+ break
67
+ case 'agent':
68
+ console.log(
69
+ ` [AGENT] ${dur} agent=${event.agent} turns=${event.turns}` +
70
+ ` tools=${event.toolCalls} tokens=${event.tokens.input_tokens}in/${event.tokens.output_tokens}out`,
71
+ )
72
+ break
73
+ }
74
+ }
75
+
76
+ // ---------------------------------------------------------------------------
77
+ // Orchestrator + team
78
+ // ---------------------------------------------------------------------------
79
+
80
+ const orchestrator = new OpenMultiAgent({
81
+ defaultModel: 'claude-sonnet-4-6',
82
+ onTrace: handleTrace,
83
+ })
84
+
85
+ const team = orchestrator.createTeam('trace-demo', {
86
+ name: 'trace-demo',
87
+ agents: [researcher, writer],
88
+ sharedMemory: true,
89
+ })
90
+
91
+ // ---------------------------------------------------------------------------
92
+ // Tasks — researcher first, then writer summarizes
93
+ // ---------------------------------------------------------------------------
94
+
95
+ const tasks = [
96
+ {
97
+ title: 'Research topic',
98
+ description: 'List 5 key benefits of TypeScript for large codebases. Be concise.',
99
+ assignee: 'researcher',
100
+ },
101
+ {
102
+ title: 'Write summary',
103
+ description: 'Read the research from shared memory and write a 3-sentence summary.',
104
+ assignee: 'writer',
105
+ dependsOn: ['Research topic'],
106
+ },
107
+ ]
108
+
109
+ // ---------------------------------------------------------------------------
110
+ // Run
111
+ // ---------------------------------------------------------------------------
112
+
113
+ console.log('Trace Observability Example')
114
+ console.log('='.repeat(60))
115
+ console.log('Pipeline: research → write (with full trace output)')
116
+ console.log('='.repeat(60))
117
+ console.log()
118
+
119
+ const result = await orchestrator.runTasks(team, tasks)
120
+
121
+ // ---------------------------------------------------------------------------
122
+ // Summary
123
+ // ---------------------------------------------------------------------------
124
+
125
+ console.log('\n' + '='.repeat(60))
126
+ console.log(`Overall success: ${result.success}`)
127
+ console.log(`Tokens — input: ${result.totalTokenUsage.input_tokens}, output: ${result.totalTokenUsage.output_tokens}`)
128
+
129
+ for (const [name, r] of result.agentResults) {
130
+ const icon = r.success ? 'OK ' : 'FAIL'
131
+ console.log(` [${icon}] ${name}`)
132
+ console.log(` ${r.output.slice(0, 200)}`)
133
+ }