@jackchen_me/open-multi-agent 0.1.0 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. package/.github/ISSUE_TEMPLATE/bug_report.md +40 -0
  2. package/.github/ISSUE_TEMPLATE/feature_request.md +23 -0
  3. package/.github/pull_request_template.md +14 -0
  4. package/.github/workflows/ci.yml +23 -0
  5. package/CLAUDE.md +72 -0
  6. package/CODE_OF_CONDUCT.md +48 -0
  7. package/CONTRIBUTING.md +72 -0
  8. package/DECISIONS.md +43 -0
  9. package/README.md +73 -140
  10. package/README_zh.md +217 -0
  11. package/SECURITY.md +17 -0
  12. package/dist/agent/agent.d.ts +5 -0
  13. package/dist/agent/agent.d.ts.map +1 -1
  14. package/dist/agent/agent.js +90 -3
  15. package/dist/agent/agent.js.map +1 -1
  16. package/dist/agent/structured-output.d.ts +33 -0
  17. package/dist/agent/structured-output.d.ts.map +1 -0
  18. package/dist/agent/structured-output.js +116 -0
  19. package/dist/agent/structured-output.js.map +1 -0
  20. package/dist/index.d.ts +2 -1
  21. package/dist/index.d.ts.map +1 -1
  22. package/dist/index.js +2 -1
  23. package/dist/index.js.map +1 -1
  24. package/dist/llm/adapter.d.ts +9 -4
  25. package/dist/llm/adapter.d.ts.map +1 -1
  26. package/dist/llm/adapter.js +17 -5
  27. package/dist/llm/adapter.js.map +1 -1
  28. package/dist/llm/anthropic.d.ts +1 -1
  29. package/dist/llm/anthropic.d.ts.map +1 -1
  30. package/dist/llm/anthropic.js +2 -1
  31. package/dist/llm/anthropic.js.map +1 -1
  32. package/dist/llm/copilot.d.ts +92 -0
  33. package/dist/llm/copilot.d.ts.map +1 -0
  34. package/dist/llm/copilot.js +426 -0
  35. package/dist/llm/copilot.js.map +1 -0
  36. package/dist/llm/openai-common.d.ts +47 -0
  37. package/dist/llm/openai-common.d.ts.map +1 -0
  38. package/dist/llm/openai-common.js +209 -0
  39. package/dist/llm/openai-common.js.map +1 -0
  40. package/dist/llm/openai.d.ts +1 -1
  41. package/dist/llm/openai.d.ts.map +1 -1
  42. package/dist/llm/openai.js +3 -224
  43. package/dist/llm/openai.js.map +1 -1
  44. package/dist/orchestrator/orchestrator.d.ts +25 -1
  45. package/dist/orchestrator/orchestrator.d.ts.map +1 -1
  46. package/dist/orchestrator/orchestrator.js +130 -37
  47. package/dist/orchestrator/orchestrator.js.map +1 -1
  48. package/dist/task/queue.js +1 -1
  49. package/dist/task/queue.js.map +1 -1
  50. package/dist/task/task.d.ts +3 -0
  51. package/dist/task/task.d.ts.map +1 -1
  52. package/dist/task/task.js +5 -1
  53. package/dist/task/task.js.map +1 -1
  54. package/dist/team/messaging.d.ts.map +1 -1
  55. package/dist/team/messaging.js +2 -1
  56. package/dist/team/messaging.js.map +1 -1
  57. package/dist/types.d.ts +31 -3
  58. package/dist/types.d.ts.map +1 -1
  59. package/examples/05-copilot-test.ts +49 -0
  60. package/examples/06-local-model.ts +199 -0
  61. package/examples/07-fan-out-aggregate.ts +209 -0
  62. package/examples/08-gemma4-local.ts +203 -0
  63. package/examples/09-gemma4-auto-orchestration.ts +162 -0
  64. package/package.json +4 -3
  65. package/src/agent/agent.ts +115 -6
  66. package/src/agent/structured-output.ts +126 -0
  67. package/src/index.ts +2 -1
  68. package/src/llm/adapter.ts +18 -5
  69. package/src/llm/anthropic.ts +2 -1
  70. package/src/llm/copilot.ts +551 -0
  71. package/src/llm/openai-common.ts +255 -0
  72. package/src/llm/openai.ts +8 -258
  73. package/src/orchestrator/orchestrator.ts +164 -38
  74. package/src/task/queue.ts +1 -1
  75. package/src/task/task.ts +8 -1
  76. package/src/team/messaging.ts +3 -1
  77. package/src/types.ts +31 -2
  78. package/tests/semaphore.test.ts +57 -0
  79. package/tests/shared-memory.test.ts +122 -0
  80. package/tests/structured-output.test.ts +331 -0
  81. package/tests/task-queue.test.ts +244 -0
  82. package/tests/task-retry.test.ts +368 -0
  83. package/tests/task-utils.test.ts +155 -0
  84. package/tests/tool-executor.test.ts +193 -0
@@ -0,0 +1,49 @@
1
+ /**
2
+ * Quick smoke test for the Copilot adapter.
3
+ *
4
+ * Run:
5
+ * npx tsx examples/05-copilot-test.ts
6
+ *
7
+ * If GITHUB_COPILOT_TOKEN is not set, the adapter will start an interactive
8
+ * OAuth2 device flow — you'll be prompted to sign in via your browser.
9
+ */
10
+
11
+ import { OpenMultiAgent } from '../src/index.js'
12
+ import type { OrchestratorEvent } from '../src/types.js'
13
+
14
+ const orchestrator = new OpenMultiAgent({
15
+ defaultModel: 'gpt-4o',
16
+ defaultProvider: 'copilot',
17
+ onProgress: (event: OrchestratorEvent) => {
18
+ if (event.type === 'agent_start') {
19
+ console.log(`[start] agent=${event.agent}`)
20
+ } else if (event.type === 'agent_complete') {
21
+ console.log(`[complete] agent=${event.agent}`)
22
+ }
23
+ },
24
+ })
25
+
26
+ console.log('Testing Copilot adapter with gpt-4o...\n')
27
+
28
+ const result = await orchestrator.runAgent(
29
+ {
30
+ name: 'assistant',
31
+ model: 'gpt-4o',
32
+ provider: 'copilot',
33
+ systemPrompt: 'You are a helpful assistant. Keep answers brief.',
34
+ maxTurns: 1,
35
+ maxTokens: 256,
36
+ },
37
+ 'What is 2 + 2? Reply in one sentence.',
38
+ )
39
+
40
+ if (result.success) {
41
+ console.log('\nAgent output:')
42
+ console.log('─'.repeat(60))
43
+ console.log(result.output)
44
+ console.log('─'.repeat(60))
45
+ console.log(`\nTokens: input=${result.tokenUsage.input_tokens}, output=${result.tokenUsage.output_tokens}`)
46
+ } else {
47
+ console.error('Agent failed:', result.output)
48
+ process.exit(1)
49
+ }
@@ -0,0 +1,199 @@
1
+ /**
2
+ * Example 06 — Local Model + Cloud Model Team (Ollama + Claude)
3
+ *
4
+ * Demonstrates mixing a local model served by Ollama with a cloud model
5
+ * (Claude) in the same task pipeline. The key technique is using
6
+ * `provider: 'openai'` with a custom `baseURL` pointing at Ollama's
7
+ * OpenAI-compatible endpoint.
8
+ *
9
+ * This pattern works with ANY OpenAI-compatible local server:
10
+ * - Ollama → http://localhost:11434/v1
11
+ * - vLLM → http://localhost:8000/v1
12
+ * - LM Studio → http://localhost:1234/v1
13
+ * - llama.cpp → http://localhost:8080/v1
14
+ * Just change the baseURL and model name below.
15
+ *
16
+ * Run:
17
+ * npx tsx examples/06-local-model.ts
18
+ *
19
+ * Prerequisites:
20
+ * 1. Ollama installed and running: https://ollama.com
21
+ * 2. Pull the model: ollama pull llama3.1
22
+ * 3. ANTHROPIC_API_KEY env var must be set.
23
+ */
24
+
25
+ import { OpenMultiAgent } from '../src/index.js'
26
+ import type { AgentConfig, OrchestratorEvent, Task } from '../src/types.js'
27
+
28
+ // ---------------------------------------------------------------------------
29
+ // Agents
30
+ // ---------------------------------------------------------------------------
31
+
32
+ /**
33
+ * Coder — uses Claude (Anthropic) for high-quality code generation.
34
+ */
35
+ const coder: AgentConfig = {
36
+ name: 'coder',
37
+ model: 'claude-sonnet-4-6',
38
+ provider: 'anthropic',
39
+ systemPrompt: `You are a senior TypeScript developer. Write clean, well-typed,
40
+ production-quality code. Use the tools to write files to /tmp/local-model-demo/.
41
+ Always include brief JSDoc comments on exported functions.`,
42
+ tools: ['bash', 'file_write'],
43
+ maxTurns: 6,
44
+ }
45
+
46
+ /**
47
+ * Reviewer — uses a local Ollama model via the OpenAI-compatible API.
48
+ * The apiKey is required by the OpenAI SDK but Ollama ignores it,
49
+ * so we pass the placeholder string 'ollama'.
50
+ */
51
+ const reviewer: AgentConfig = {
52
+ name: 'reviewer',
53
+ model: 'llama3.1',
54
+ provider: 'openai', // 'openai' here means "OpenAI-compatible protocol", not the OpenAI cloud
55
+ baseURL: 'http://localhost:11434/v1',
56
+ apiKey: 'ollama',
57
+ systemPrompt: `You are a code reviewer. You read source files and produce a structured review.
58
+ Your review MUST include these sections:
59
+ - Summary (2-3 sentences)
60
+ - Strengths (bullet list)
61
+ - Issues (bullet list — or "None found" if the code is clean)
62
+ - Verdict: SHIP or NEEDS WORK
63
+
64
+ Be specific and constructive. Reference line numbers or function names when possible.`,
65
+ tools: ['file_read'],
66
+ maxTurns: 4,
67
+ }
68
+
69
+ // ---------------------------------------------------------------------------
70
+ // Progress handler
71
+ // ---------------------------------------------------------------------------
72
+
73
+ const taskTimes = new Map<string, number>()
74
+
75
+ function handleProgress(event: OrchestratorEvent): void {
76
+ const ts = new Date().toISOString().slice(11, 23)
77
+
78
+ switch (event.type) {
79
+ case 'task_start': {
80
+ taskTimes.set(event.task ?? '', Date.now())
81
+ const task = event.data as Task | undefined
82
+ console.log(`[${ts}] TASK READY "${task?.title ?? event.task}" → ${task?.assignee ?? '?'}`)
83
+ break
84
+ }
85
+ case 'task_complete': {
86
+ const elapsed = Date.now() - (taskTimes.get(event.task ?? '') ?? Date.now())
87
+ console.log(`[${ts}] TASK DONE task=${event.task} in ${elapsed}ms`)
88
+ break
89
+ }
90
+ case 'agent_start':
91
+ console.log(`[${ts}] AGENT START ${event.agent}`)
92
+ break
93
+ case 'agent_complete':
94
+ console.log(`[${ts}] AGENT DONE ${event.agent}`)
95
+ break
96
+ case 'error':
97
+ console.error(`[${ts}] ERROR ${event.agent ?? ''} task=${event.task ?? '?'}`)
98
+ break
99
+ }
100
+ }
101
+
102
+ // ---------------------------------------------------------------------------
103
+ // Orchestrator + Team
104
+ // ---------------------------------------------------------------------------
105
+
106
+ const orchestrator = new OpenMultiAgent({
107
+ defaultModel: 'claude-sonnet-4-6',
108
+ maxConcurrency: 2,
109
+ onProgress: handleProgress,
110
+ })
111
+
112
+ const team = orchestrator.createTeam('local-cloud-team', {
113
+ name: 'local-cloud-team',
114
+ agents: [coder, reviewer],
115
+ sharedMemory: true,
116
+ })
117
+
118
+ // ---------------------------------------------------------------------------
119
+ // Task pipeline: code → review
120
+ // ---------------------------------------------------------------------------
121
+
122
+ const OUTPUT_DIR = '/tmp/local-model-demo'
123
+
124
+ const tasks: Array<{
125
+ title: string
126
+ description: string
127
+ assignee?: string
128
+ dependsOn?: string[]
129
+ }> = [
130
+ {
131
+ title: 'Write: retry utility',
132
+ description: `Write a small but complete TypeScript utility to ${OUTPUT_DIR}/retry.ts.
133
+
134
+ The module should export:
135
+ 1. A \`RetryOptions\` interface with: maxRetries (number), delayMs (number),
136
+ backoffFactor (optional number, default 2), shouldRetry (optional predicate
137
+ taking the error and returning boolean).
138
+ 2. An async \`retry<T>(fn: () => Promise<T>, options: RetryOptions): Promise<T>\`
139
+ function that retries \`fn\` with exponential backoff.
140
+ 3. A convenience \`withRetry\` wrapper that returns a new function with retry
141
+ behaviour baked in.
142
+
143
+ Include JSDoc comments. No external dependencies — use only Node built-ins.
144
+ After writing the file, also create a small test script at ${OUTPUT_DIR}/retry-test.ts
145
+ that exercises the happy path and a failure case, then run it with \`npx tsx\`.`,
146
+ assignee: 'coder',
147
+ },
148
+ {
149
+ title: 'Review: retry utility',
150
+ description: `Read the files at ${OUTPUT_DIR}/retry.ts and ${OUTPUT_DIR}/retry-test.ts.
151
+
152
+ Produce a structured code review covering:
153
+ - Summary (2-3 sentences describing the module)
154
+ - Strengths (bullet list)
155
+ - Issues (bullet list — be specific about what and why)
156
+ - Verdict: SHIP or NEEDS WORK`,
157
+ assignee: 'reviewer',
158
+ dependsOn: ['Write: retry utility'],
159
+ },
160
+ ]
161
+
162
+ // ---------------------------------------------------------------------------
163
+ // Run
164
+ // ---------------------------------------------------------------------------
165
+
166
+ console.log('Local + Cloud model team')
167
+ console.log(` coder → Claude (${coder.model}) via Anthropic API`)
168
+ console.log(` reviewer → Ollama (${reviewer.model}) at ${reviewer.baseURL}`)
169
+ console.log()
170
+ console.log('Pipeline: coder writes code → local model reviews it')
171
+ console.log('='.repeat(60))
172
+
173
+ const result = await orchestrator.runTasks(team, tasks)
174
+
175
+ // ---------------------------------------------------------------------------
176
+ // Summary
177
+ // ---------------------------------------------------------------------------
178
+
179
+ console.log('\n' + '='.repeat(60))
180
+ console.log('Pipeline complete.\n')
181
+ console.log(`Overall success: ${result.success}`)
182
+ console.log(`Tokens — input: ${result.totalTokenUsage.input_tokens}, output: ${result.totalTokenUsage.output_tokens}`)
183
+
184
+ console.log('\nPer-agent summary:')
185
+ for (const [name, r] of result.agentResults) {
186
+ const icon = r.success ? 'OK ' : 'FAIL'
187
+ const provider = name === 'coder' ? 'anthropic' : 'ollama (local)'
188
+ const tools = r.toolCalls.map(c => c.toolName).join(', ')
189
+ console.log(` [${icon}] ${name.padEnd(10)} (${provider.padEnd(16)}) tools: ${tools || '(none)'}`)
190
+ }
191
+
192
+ // Print the reviewer's output
193
+ const review = result.agentResults.get('reviewer')
194
+ if (review?.success) {
195
+ console.log('\nCode review (from local model):')
196
+ console.log('─'.repeat(60))
197
+ console.log(review.output)
198
+ console.log('─'.repeat(60))
199
+ }
@@ -0,0 +1,209 @@
1
+ /**
2
+ * Example 07 — Fan-Out / Aggregate (MapReduce) Pattern
3
+ *
4
+ * Demonstrates:
5
+ * - Fan-out: send the same question to N "analyst" agents in parallel
6
+ * - Aggregate: a "synthesizer" agent reads all analyst outputs and produces
7
+ * a balanced final report
8
+ * - AgentPool with runParallel() for concurrent fan-out
9
+ * - No tools needed — pure LLM reasoning to keep the focus on the pattern
10
+ *
11
+ * Run:
12
+ * npx tsx examples/07-fan-out-aggregate.ts
13
+ *
14
+ * Prerequisites:
15
+ * ANTHROPIC_API_KEY env var must be set.
16
+ */
17
+
18
+ import { Agent, AgentPool, ToolRegistry, ToolExecutor, registerBuiltInTools } from '../src/index.js'
19
+ import type { AgentConfig, AgentRunResult } from '../src/types.js'
20
+
21
+ // ---------------------------------------------------------------------------
22
+ // Analysis topic
23
+ // ---------------------------------------------------------------------------
24
+
25
+ const TOPIC = `Should a solo developer build a SaaS product that uses AI agents
26
+ for automated customer support? Consider the current state of AI technology,
27
+ market demand, competition, costs, and the unique constraints of being a solo
28
+ founder with limited time (~6 hours/day of productive work).`
29
+
30
+ // ---------------------------------------------------------------------------
31
+ // Analyst agent configs — three perspectives on the same question
32
+ // ---------------------------------------------------------------------------
33
+
34
+ const optimistConfig: AgentConfig = {
35
+ name: 'optimist',
36
+ model: 'claude-sonnet-4-6',
37
+ systemPrompt: `You are an optimistic technology analyst who focuses on
38
+ opportunities, upside potential, and emerging trends. You see possibilities
39
+ where others see obstacles. Back your optimism with concrete reasoning —
40
+ cite market trends, cost curves, and real capabilities. Keep your analysis
41
+ to 200-300 words.`,
42
+ maxTurns: 1,
43
+ temperature: 0.4,
44
+ }
45
+
46
+ const skepticConfig: AgentConfig = {
47
+ name: 'skeptic',
48
+ model: 'claude-sonnet-4-6',
49
+ systemPrompt: `You are a skeptical technology analyst who focuses on risks,
50
+ challenges, failure modes, and hidden costs. You stress-test assumptions and
51
+ ask "what could go wrong?" Back your skepticism with concrete reasoning —
52
+ cite failure rates, technical limitations, and market realities. Keep your
53
+ analysis to 200-300 words.`,
54
+ maxTurns: 1,
55
+ temperature: 0.4,
56
+ }
57
+
58
+ const pragmatistConfig: AgentConfig = {
59
+ name: 'pragmatist',
60
+ model: 'claude-sonnet-4-6',
61
+ systemPrompt: `You are a pragmatic technology analyst who focuses on practical
62
+ feasibility, execution complexity, and resource requirements. You care about
63
+ what works today, not what might work someday. You think in terms of MVPs,
64
+ timelines, and concrete tradeoffs. Keep your analysis to 200-300 words.`,
65
+ maxTurns: 1,
66
+ temperature: 0.4,
67
+ }
68
+
69
+ const synthesizerConfig: AgentConfig = {
70
+ name: 'synthesizer',
71
+ model: 'claude-sonnet-4-6',
72
+ systemPrompt: `You are a senior strategy advisor who synthesizes multiple
73
+ perspectives into a balanced, actionable recommendation. You do not simply
74
+ summarise — you weigh the arguments, identify where they agree and disagree,
75
+ and produce a clear verdict with next steps. Structure your output as:
76
+
77
+ 1. Key agreements across perspectives
78
+ 2. Key disagreements and how you weigh them
79
+ 3. Verdict (go / no-go / conditional go)
80
+ 4. Recommended next steps (3-5 bullet points)
81
+
82
+ Keep the final report to 300-400 words.`,
83
+ maxTurns: 1,
84
+ temperature: 0.3,
85
+ }
86
+
87
+ // ---------------------------------------------------------------------------
88
+ // Build agents — no tools needed for pure reasoning
89
+ // ---------------------------------------------------------------------------
90
+
91
+ function buildAgent(config: AgentConfig): Agent {
92
+ const registry = new ToolRegistry()
93
+ registerBuiltInTools(registry) // not needed here, but safe if tools are added later
94
+ const executor = new ToolExecutor(registry)
95
+ return new Agent(config, registry, executor)
96
+ }
97
+
98
+ const optimist = buildAgent(optimistConfig)
99
+ const skeptic = buildAgent(skepticConfig)
100
+ const pragmatist = buildAgent(pragmatistConfig)
101
+ const synthesizer = buildAgent(synthesizerConfig)
102
+
103
+ // ---------------------------------------------------------------------------
104
+ // Set up the pool
105
+ // ---------------------------------------------------------------------------
106
+
107
+ const pool = new AgentPool(3) // 3 analysts can run simultaneously
108
+ pool.add(optimist)
109
+ pool.add(skeptic)
110
+ pool.add(pragmatist)
111
+ pool.add(synthesizer)
112
+
113
+ console.log('Fan-Out / Aggregate (MapReduce) Pattern')
114
+ console.log('='.repeat(60))
115
+ console.log(`\nTopic: ${TOPIC.replace(/\n/g, ' ').trim()}\n`)
116
+
117
+ // ---------------------------------------------------------------------------
118
+ // Step 1: Fan-out — run all 3 analysts in parallel
119
+ // ---------------------------------------------------------------------------
120
+
121
+ console.log('[Step 1] Fan-out: 3 analysts running in parallel...\n')
122
+
123
+ const analystResults: Map<string, AgentRunResult> = await pool.runParallel([
124
+ { agent: 'optimist', prompt: TOPIC },
125
+ { agent: 'skeptic', prompt: TOPIC },
126
+ { agent: 'pragmatist', prompt: TOPIC },
127
+ ])
128
+
129
+ // Print each analyst's output (truncated)
130
+ const analysts = ['optimist', 'skeptic', 'pragmatist'] as const
131
+ for (const name of analysts) {
132
+ const result = analystResults.get(name)!
133
+ const status = result.success ? 'OK' : 'FAILED'
134
+ console.log(` ${name} [${status}] — ${result.tokenUsage.output_tokens} output tokens`)
135
+ console.log(` ${result.output.slice(0, 150).replace(/\n/g, ' ')}...`)
136
+ console.log()
137
+ }
138
+
139
+ // Check all analysts succeeded
140
+ for (const name of analysts) {
141
+ if (!analystResults.get(name)!.success) {
142
+ console.error(`Analyst '${name}' failed: ${analystResults.get(name)!.output}`)
143
+ process.exit(1)
144
+ }
145
+ }
146
+
147
+ // ---------------------------------------------------------------------------
148
+ // Step 2: Aggregate — synthesizer reads all 3 analyses
149
+ // ---------------------------------------------------------------------------
150
+
151
+ console.log('[Step 2] Aggregate: synthesizer producing final report...\n')
152
+
153
+ const synthesizerPrompt = `Three analysts have independently evaluated the same question.
154
+ Read their analyses below and produce your synthesis report.
155
+
156
+ --- OPTIMIST ---
157
+ ${analystResults.get('optimist')!.output}
158
+
159
+ --- SKEPTIC ---
160
+ ${analystResults.get('skeptic')!.output}
161
+
162
+ --- PRAGMATIST ---
163
+ ${analystResults.get('pragmatist')!.output}
164
+
165
+ Now synthesize these three perspectives into a balanced recommendation.`
166
+
167
+ const synthResult = await pool.run('synthesizer', synthesizerPrompt)
168
+
169
+ if (!synthResult.success) {
170
+ console.error('Synthesizer failed:', synthResult.output)
171
+ process.exit(1)
172
+ }
173
+
174
+ // ---------------------------------------------------------------------------
175
+ // Final output
176
+ // ---------------------------------------------------------------------------
177
+
178
+ console.log('='.repeat(60))
179
+ console.log('SYNTHESIZED REPORT')
180
+ console.log('='.repeat(60))
181
+ console.log()
182
+ console.log(synthResult.output)
183
+ console.log()
184
+ console.log('-'.repeat(60))
185
+
186
+ // ---------------------------------------------------------------------------
187
+ // Token usage comparison
188
+ // ---------------------------------------------------------------------------
189
+
190
+ console.log('\nToken Usage Summary:')
191
+ console.log('-'.repeat(60))
192
+
193
+ let totalInput = 0
194
+ let totalOutput = 0
195
+
196
+ for (const name of analysts) {
197
+ const r = analystResults.get(name)!
198
+ totalInput += r.tokenUsage.input_tokens
199
+ totalOutput += r.tokenUsage.output_tokens
200
+ console.log(` ${name.padEnd(12)} — input: ${r.tokenUsage.input_tokens}, output: ${r.tokenUsage.output_tokens}`)
201
+ }
202
+
203
+ totalInput += synthResult.tokenUsage.input_tokens
204
+ totalOutput += synthResult.tokenUsage.output_tokens
205
+ console.log(` ${'synthesizer'.padEnd(12)} — input: ${synthResult.tokenUsage.input_tokens}, output: ${synthResult.tokenUsage.output_tokens}`)
206
+ console.log('-'.repeat(60))
207
+ console.log(` ${'TOTAL'.padEnd(12)} — input: ${totalInput}, output: ${totalOutput}`)
208
+
209
+ console.log('\nDone.')
@@ -0,0 +1,203 @@
1
+ /**
2
+ * Example 08 — Gemma 4 Local Agent Team (100% Local, Zero API Cost)
3
+ *
4
+ * Demonstrates a fully local multi-agent team using Google's Gemma 4 via
5
+ * Ollama. No cloud API keys needed — everything runs on your machine.
6
+ *
7
+ * Two agents collaborate through a task pipeline:
8
+ * - researcher: uses bash + file_write to gather system info and write a report
9
+ * - summarizer: uses file_read to read the report and produce a concise summary
10
+ *
11
+ * This pattern works with any Ollama model that supports tool-calling.
12
+ * Gemma 4 (released 2026-04-02) has native tool-calling support.
13
+ *
14
+ * Run:
15
+ * no_proxy=localhost npx tsx examples/08-gemma4-local.ts
16
+ *
17
+ * Prerequisites:
18
+ * 1. Ollama >= 0.20.0 installed and running: https://ollama.com
19
+ * 2. Pull the model: ollama pull gemma4:e2b
20
+ * (or gemma4:e4b for better quality on machines with more RAM)
21
+ * 3. No API keys needed!
22
+ *
23
+ * Note: The no_proxy=localhost prefix is needed if you have an HTTP proxy
24
+ * configured, since the OpenAI SDK would otherwise route Ollama requests
25
+ * through the proxy.
26
+ */
27
+
28
+ import { OpenMultiAgent } from '../src/index.js'
29
+ import type { AgentConfig, OrchestratorEvent, Task } from '../src/types.js'
30
+
31
+ // ---------------------------------------------------------------------------
32
+ // Configuration — change this to match your Ollama setup
33
+ // ---------------------------------------------------------------------------
34
+
35
+ // See available tags at https://ollama.com/library/gemma4
36
+ const OLLAMA_MODEL = 'gemma4:e2b' // or 'gemma4:e4b', 'gemma4:26b'
37
+ const OLLAMA_BASE_URL = 'http://localhost:11434/v1'
38
+ const OUTPUT_DIR = '/tmp/gemma4-demo'
39
+
40
+ // ---------------------------------------------------------------------------
41
+ // Agents — both use Gemma 4 locally
42
+ // ---------------------------------------------------------------------------
43
+
44
+ /**
45
+ * Researcher — gathers system information using shell commands.
46
+ */
47
+ const researcher: AgentConfig = {
48
+ name: 'researcher',
49
+ model: OLLAMA_MODEL,
50
+ provider: 'openai',
51
+ baseURL: OLLAMA_BASE_URL,
52
+ apiKey: 'ollama', // placeholder — Ollama ignores this, but the OpenAI SDK requires a non-empty value
53
+ systemPrompt: `You are a system researcher. Your job is to gather information
54
+ about the current machine using shell commands and write a structured report.
55
+
56
+ Use the bash tool to run commands like: uname -a, df -h, uptime, and similar
57
+ non-destructive read-only commands.
58
+ On macOS you can also use: sw_vers, sysctl -n hw.memsize.
59
+ On Linux you can also use: cat /etc/os-release, free -h.
60
+
61
+ Then use file_write to save a Markdown report to ${OUTPUT_DIR}/system-report.md.
62
+ The report should have sections: OS, Hardware, Disk, and Uptime.
63
+ Be concise — one or two lines per section is enough.`,
64
+ tools: ['bash', 'file_write'],
65
+ maxTurns: 8,
66
+ }
67
+
68
+ /**
69
+ * Summarizer — reads the report and writes a one-paragraph executive summary.
70
+ */
71
+ const summarizer: AgentConfig = {
72
+ name: 'summarizer',
73
+ model: OLLAMA_MODEL,
74
+ provider: 'openai',
75
+ baseURL: OLLAMA_BASE_URL,
76
+ apiKey: 'ollama',
77
+ systemPrompt: `You are a technical writer. Read the system report file provided,
78
+ then produce a concise one-paragraph executive summary (3-5 sentences).
79
+ Focus on the key highlights: what OS, how much RAM, disk status, and uptime.`,
80
+ tools: ['file_read'],
81
+ maxTurns: 4,
82
+ }
83
+
84
+ // ---------------------------------------------------------------------------
85
+ // Progress handler
86
+ // ---------------------------------------------------------------------------
87
+
88
+ const taskTimes = new Map<string, number>()
89
+
90
+ function handleProgress(event: OrchestratorEvent): void {
91
+ const ts = new Date().toISOString().slice(11, 23)
92
+
93
+ switch (event.type) {
94
+ case 'task_start': {
95
+ taskTimes.set(event.task ?? '', Date.now())
96
+ const task = event.data as Task | undefined
97
+ console.log(`[${ts}] TASK START "${task?.title ?? event.task}" → ${task?.assignee ?? '?'}`)
98
+ break
99
+ }
100
+ case 'task_complete': {
101
+ const elapsed = Date.now() - (taskTimes.get(event.task ?? '') ?? Date.now())
102
+ console.log(`[${ts}] TASK DONE "${event.task}" in ${(elapsed / 1000).toFixed(1)}s`)
103
+ break
104
+ }
105
+ case 'agent_start':
106
+ console.log(`[${ts}] AGENT START ${event.agent}`)
107
+ break
108
+ case 'agent_complete':
109
+ console.log(`[${ts}] AGENT DONE ${event.agent}`)
110
+ break
111
+ case 'error':
112
+ console.error(`[${ts}] ERROR ${event.agent ?? ''} task=${event.task ?? '?'}`)
113
+ break
114
+ }
115
+ }
116
+
117
+ // ---------------------------------------------------------------------------
118
+ // Orchestrator + Team
119
+ // ---------------------------------------------------------------------------
120
+
121
+ const orchestrator = new OpenMultiAgent({
122
+ defaultModel: OLLAMA_MODEL,
123
+ maxConcurrency: 1, // run agents sequentially — local model can only serve one at a time
124
+ onProgress: handleProgress,
125
+ })
126
+
127
+ const team = orchestrator.createTeam('gemma4-team', {
128
+ name: 'gemma4-team',
129
+ agents: [researcher, summarizer],
130
+ sharedMemory: true,
131
+ })
132
+
133
+ // ---------------------------------------------------------------------------
134
+ // Task pipeline: research → summarize
135
+ // ---------------------------------------------------------------------------
136
+
137
+ const tasks: Array<{
138
+ title: string
139
+ description: string
140
+ assignee?: string
141
+ dependsOn?: string[]
142
+ }> = [
143
+ {
144
+ title: 'Gather system information',
145
+ description: `Use bash to run system info commands (uname -a, sw_vers, sysctl, df -h, uptime).
146
+ Then write a structured Markdown report to ${OUTPUT_DIR}/system-report.md with sections:
147
+ OS, Hardware, Disk, and Uptime.`,
148
+ assignee: 'researcher',
149
+ },
150
+ {
151
+ title: 'Summarize the report',
152
+ description: `Read the file at ${OUTPUT_DIR}/system-report.md.
153
+ Produce a concise one-paragraph executive summary of the system information.`,
154
+ assignee: 'summarizer',
155
+ dependsOn: ['Gather system information'],
156
+ },
157
+ ]
158
+
159
+ // ---------------------------------------------------------------------------
160
+ // Run
161
+ // ---------------------------------------------------------------------------
162
+
163
+ console.log('Gemma 4 Local Agent Team — Zero API Cost')
164
+ console.log('='.repeat(60))
165
+ console.log(` model → ${OLLAMA_MODEL} via Ollama`)
166
+ console.log(` researcher → bash + file_write`)
167
+ console.log(` summarizer → file_read`)
168
+ console.log(` output dir → ${OUTPUT_DIR}`)
169
+ console.log()
170
+ console.log('Pipeline: researcher gathers info → summarizer writes summary')
171
+ console.log('='.repeat(60))
172
+
173
+ const start = Date.now()
174
+ const result = await orchestrator.runTasks(team, tasks)
175
+ const totalTime = Date.now() - start
176
+
177
+ // ---------------------------------------------------------------------------
178
+ // Summary
179
+ // ---------------------------------------------------------------------------
180
+
181
+ console.log('\n' + '='.repeat(60))
182
+ console.log('Pipeline complete.\n')
183
+ console.log(`Overall success: ${result.success}`)
184
+ console.log(`Total time: ${(totalTime / 1000).toFixed(1)}s`)
185
+ console.log(`Tokens — input: ${result.totalTokenUsage.input_tokens}, output: ${result.totalTokenUsage.output_tokens}`)
186
+
187
+ console.log('\nPer-agent results:')
188
+ for (const [name, r] of result.agentResults) {
189
+ const icon = r.success ? 'OK ' : 'FAIL'
190
+ const tools = r.toolCalls.map(c => c.toolName).join(', ')
191
+ console.log(` [${icon}] ${name.padEnd(12)} tools: ${tools || '(none)'}`)
192
+ }
193
+
194
+ // Print the summarizer's output
195
+ const summary = result.agentResults.get('summarizer')
196
+ if (summary?.success) {
197
+ console.log('\nExecutive Summary (from local Gemma 4):')
198
+ console.log('-'.repeat(60))
199
+ console.log(summary.output)
200
+ console.log('-'.repeat(60))
201
+ }
202
+
203
+ console.log('\nAll processing done locally. $0 API cost.')