@jackchen_me/open-multi-agent 0.2.0 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. package/.github/workflows/ci.yml +1 -1
  2. package/CLAUDE.md +11 -3
  3. package/README.md +87 -20
  4. package/README_zh.md +85 -25
  5. package/dist/agent/agent.d.ts +15 -1
  6. package/dist/agent/agent.d.ts.map +1 -1
  7. package/dist/agent/agent.js +144 -10
  8. package/dist/agent/agent.js.map +1 -1
  9. package/dist/agent/loop-detector.d.ts +39 -0
  10. package/dist/agent/loop-detector.d.ts.map +1 -0
  11. package/dist/agent/loop-detector.js +122 -0
  12. package/dist/agent/loop-detector.js.map +1 -0
  13. package/dist/agent/pool.d.ts +2 -1
  14. package/dist/agent/pool.d.ts.map +1 -1
  15. package/dist/agent/pool.js +4 -2
  16. package/dist/agent/pool.js.map +1 -1
  17. package/dist/agent/runner.d.ts +23 -1
  18. package/dist/agent/runner.d.ts.map +1 -1
  19. package/dist/agent/runner.js +113 -12
  20. package/dist/agent/runner.js.map +1 -1
  21. package/dist/index.d.ts +3 -1
  22. package/dist/index.d.ts.map +1 -1
  23. package/dist/index.js +2 -0
  24. package/dist/index.js.map +1 -1
  25. package/dist/llm/adapter.d.ts +4 -1
  26. package/dist/llm/adapter.d.ts.map +1 -1
  27. package/dist/llm/adapter.js +11 -0
  28. package/dist/llm/adapter.js.map +1 -1
  29. package/dist/llm/copilot.d.ts.map +1 -1
  30. package/dist/llm/copilot.js +2 -1
  31. package/dist/llm/copilot.js.map +1 -1
  32. package/dist/llm/gemini.d.ts +65 -0
  33. package/dist/llm/gemini.d.ts.map +1 -0
  34. package/dist/llm/gemini.js +317 -0
  35. package/dist/llm/gemini.js.map +1 -0
  36. package/dist/llm/grok.d.ts +21 -0
  37. package/dist/llm/grok.d.ts.map +1 -0
  38. package/dist/llm/grok.js +24 -0
  39. package/dist/llm/grok.js.map +1 -0
  40. package/dist/llm/openai-common.d.ts +8 -1
  41. package/dist/llm/openai-common.d.ts.map +1 -1
  42. package/dist/llm/openai-common.js +35 -2
  43. package/dist/llm/openai-common.js.map +1 -1
  44. package/dist/llm/openai.d.ts +1 -1
  45. package/dist/llm/openai.d.ts.map +1 -1
  46. package/dist/llm/openai.js +20 -2
  47. package/dist/llm/openai.js.map +1 -1
  48. package/dist/orchestrator/orchestrator.d.ts.map +1 -1
  49. package/dist/orchestrator/orchestrator.js +89 -9
  50. package/dist/orchestrator/orchestrator.js.map +1 -1
  51. package/dist/task/queue.d.ts +31 -2
  52. package/dist/task/queue.d.ts.map +1 -1
  53. package/dist/task/queue.js +69 -2
  54. package/dist/task/queue.js.map +1 -1
  55. package/dist/tool/text-tool-extractor.d.ts +32 -0
  56. package/dist/tool/text-tool-extractor.d.ts.map +1 -0
  57. package/dist/tool/text-tool-extractor.js +187 -0
  58. package/dist/tool/text-tool-extractor.js.map +1 -0
  59. package/dist/types.d.ts +139 -7
  60. package/dist/types.d.ts.map +1 -1
  61. package/dist/utils/trace.d.ts +12 -0
  62. package/dist/utils/trace.d.ts.map +1 -0
  63. package/dist/utils/trace.js +30 -0
  64. package/dist/utils/trace.js.map +1 -0
  65. package/examples/06-local-model.ts +1 -0
  66. package/examples/08-gemma4-local.ts +76 -87
  67. package/examples/09-structured-output.ts +73 -0
  68. package/examples/10-task-retry.ts +132 -0
  69. package/examples/11-trace-observability.ts +133 -0
  70. package/examples/12-grok.ts +154 -0
  71. package/examples/13-gemini.ts +48 -0
  72. package/package.json +11 -1
  73. package/src/agent/agent.ts +159 -10
  74. package/src/agent/loop-detector.ts +137 -0
  75. package/src/agent/pool.ts +9 -2
  76. package/src/agent/runner.ts +148 -19
  77. package/src/index.ts +15 -0
  78. package/src/llm/adapter.ts +12 -1
  79. package/src/llm/copilot.ts +2 -1
  80. package/src/llm/gemini.ts +378 -0
  81. package/src/llm/grok.ts +29 -0
  82. package/src/llm/openai-common.ts +41 -2
  83. package/src/llm/openai.ts +23 -3
  84. package/src/orchestrator/orchestrator.ts +105 -11
  85. package/src/task/queue.ts +73 -3
  86. package/src/tool/text-tool-extractor.ts +219 -0
  87. package/src/types.ts +157 -6
  88. package/src/utils/trace.ts +34 -0
  89. package/tests/agent-hooks.test.ts +473 -0
  90. package/tests/agent-pool.test.ts +212 -0
  91. package/tests/approval.test.ts +464 -0
  92. package/tests/built-in-tools.test.ts +393 -0
  93. package/tests/gemini-adapter.test.ts +97 -0
  94. package/tests/grok-adapter.test.ts +74 -0
  95. package/tests/llm-adapters.test.ts +357 -0
  96. package/tests/loop-detection.test.ts +456 -0
  97. package/tests/openai-fallback.test.ts +159 -0
  98. package/tests/orchestrator.test.ts +281 -0
  99. package/tests/scheduler.test.ts +221 -0
  100. package/tests/team-messaging.test.ts +329 -0
  101. package/tests/text-tool-extractor.test.ts +170 -0
  102. package/tests/trace.test.ts +453 -0
  103. package/vitest.config.ts +9 -0
  104. package/examples/09-gemma4-auto-orchestration.ts +0 -162
@@ -0,0 +1,453 @@
1
+ import { describe, it, expect, vi } from 'vitest'
2
+ import { z } from 'zod'
3
+ import { Agent } from '../src/agent/agent.js'
4
+ import { AgentRunner, type RunOptions } from '../src/agent/runner.js'
5
+ import { ToolRegistry, defineTool } from '../src/tool/framework.js'
6
+ import { ToolExecutor } from '../src/tool/executor.js'
7
+ import { executeWithRetry } from '../src/orchestrator/orchestrator.js'
8
+ import { emitTrace, generateRunId } from '../src/utils/trace.js'
9
+ import { createTask } from '../src/task/task.js'
10
+ import type {
11
+ AgentConfig,
12
+ AgentRunResult,
13
+ LLMAdapter,
14
+ LLMResponse,
15
+ TraceEvent,
16
+ } from '../src/types.js'
17
+
18
+ // ---------------------------------------------------------------------------
19
+ // Mock adapters
20
+ // ---------------------------------------------------------------------------
21
+
22
+ function mockAdapter(responses: LLMResponse[]): LLMAdapter {
23
+ let callIndex = 0
24
+ return {
25
+ name: 'mock',
26
+ async chat() {
27
+ return responses[callIndex++]!
28
+ },
29
+ async *stream() {
30
+ /* unused */
31
+ },
32
+ }
33
+ }
34
+
35
+ function textResponse(text: string): LLMResponse {
36
+ return {
37
+ id: `resp-${Math.random().toString(36).slice(2)}`,
38
+ content: [{ type: 'text' as const, text }],
39
+ model: 'mock-model',
40
+ stop_reason: 'end_turn',
41
+ usage: { input_tokens: 10, output_tokens: 20 },
42
+ }
43
+ }
44
+
45
+ function toolUseResponse(toolName: string, input: Record<string, unknown>): LLMResponse {
46
+ return {
47
+ id: `resp-${Math.random().toString(36).slice(2)}`,
48
+ content: [
49
+ {
50
+ type: 'tool_use' as const,
51
+ id: `tu-${Math.random().toString(36).slice(2)}`,
52
+ name: toolName,
53
+ input,
54
+ },
55
+ ],
56
+ model: 'mock-model',
57
+ stop_reason: 'tool_use',
58
+ usage: { input_tokens: 15, output_tokens: 25 },
59
+ }
60
+ }
61
+
62
+ function buildMockAgent(
63
+ config: AgentConfig,
64
+ responses: LLMResponse[],
65
+ registry?: ToolRegistry,
66
+ executor?: ToolExecutor,
67
+ ): Agent {
68
+ const reg = registry ?? new ToolRegistry()
69
+ const exec = executor ?? new ToolExecutor(reg)
70
+ const adapter = mockAdapter(responses)
71
+ const agent = new Agent(config, reg, exec)
72
+
73
+ const runner = new AgentRunner(adapter, reg, exec, {
74
+ model: config.model,
75
+ systemPrompt: config.systemPrompt,
76
+ maxTurns: config.maxTurns,
77
+ maxTokens: config.maxTokens,
78
+ temperature: config.temperature,
79
+ agentName: config.name,
80
+ })
81
+ ;(agent as any).runner = runner
82
+
83
+ return agent
84
+ }
85
+
86
+ // ---------------------------------------------------------------------------
87
+ // emitTrace helper
88
+ // ---------------------------------------------------------------------------
89
+
90
+ describe('emitTrace', () => {
91
+ it('does nothing when fn is undefined', () => {
92
+ // Should not throw
93
+ emitTrace(undefined, {
94
+ type: 'agent',
95
+ runId: 'r1',
96
+ agent: 'a',
97
+ turns: 1,
98
+ tokens: { input_tokens: 0, output_tokens: 0 },
99
+ toolCalls: 0,
100
+ startMs: 0,
101
+ endMs: 0,
102
+ durationMs: 0,
103
+ })
104
+ })
105
+
106
+ it('calls fn with the event', () => {
107
+ const fn = vi.fn()
108
+ const event: TraceEvent = {
109
+ type: 'agent',
110
+ runId: 'r1',
111
+ agent: 'a',
112
+ turns: 1,
113
+ tokens: { input_tokens: 0, output_tokens: 0 },
114
+ toolCalls: 0,
115
+ startMs: 0,
116
+ endMs: 0,
117
+ durationMs: 0,
118
+ }
119
+ emitTrace(fn, event)
120
+ expect(fn).toHaveBeenCalledWith(event)
121
+ })
122
+
123
+ it('swallows errors thrown by callback', () => {
124
+ const fn = () => { throw new Error('boom') }
125
+ expect(() =>
126
+ emitTrace(fn, {
127
+ type: 'agent',
128
+ runId: 'r1',
129
+ agent: 'a',
130
+ turns: 1,
131
+ tokens: { input_tokens: 0, output_tokens: 0 },
132
+ toolCalls: 0,
133
+ startMs: 0,
134
+ endMs: 0,
135
+ durationMs: 0,
136
+ }),
137
+ ).not.toThrow()
138
+ })
139
+
140
+ it('swallows rejected promises from async callbacks', async () => {
141
+ // An async onTrace that rejects should not produce unhandled rejection
142
+ const fn = async () => { throw new Error('async boom') }
143
+ emitTrace(fn as unknown as (event: TraceEvent) => void, {
144
+ type: 'agent',
145
+ runId: 'r1',
146
+ agent: 'a',
147
+ turns: 1,
148
+ tokens: { input_tokens: 0, output_tokens: 0 },
149
+ toolCalls: 0,
150
+ startMs: 0,
151
+ endMs: 0,
152
+ durationMs: 0,
153
+ })
154
+ // If the rejection is not caught, vitest will fail with unhandled rejection.
155
+ // Give the microtask queue a tick to surface any unhandled rejection.
156
+ await new Promise(resolve => setTimeout(resolve, 10))
157
+ })
158
+ })
159
+
160
+ describe('generateRunId', () => {
161
+ it('returns a UUID string', () => {
162
+ const id = generateRunId()
163
+ expect(id).toMatch(/^[0-9a-f-]{36}$/)
164
+ })
165
+
166
+ it('returns unique IDs', () => {
167
+ const ids = new Set(Array.from({ length: 100 }, generateRunId))
168
+ expect(ids.size).toBe(100)
169
+ })
170
+ })
171
+
172
+ // ---------------------------------------------------------------------------
173
+ // AgentRunner trace events
174
+ // ---------------------------------------------------------------------------
175
+
176
+ describe('AgentRunner trace events', () => {
177
+ it('emits llm_call trace for each LLM turn', async () => {
178
+ const traces: TraceEvent[] = []
179
+ const registry = new ToolRegistry()
180
+ const executor = new ToolExecutor(registry)
181
+ const adapter = mockAdapter([textResponse('Hello!')])
182
+
183
+ const runner = new AgentRunner(adapter, registry, executor, {
184
+ model: 'test-model',
185
+ agentName: 'test-agent',
186
+ })
187
+
188
+ const runOptions: RunOptions = {
189
+ onTrace: (e) => traces.push(e),
190
+ runId: 'run-1',
191
+ traceAgent: 'test-agent',
192
+ }
193
+
194
+ await runner.run(
195
+ [{ role: 'user', content: [{ type: 'text', text: 'hi' }] }],
196
+ runOptions,
197
+ )
198
+
199
+ const llmTraces = traces.filter(t => t.type === 'llm_call')
200
+ expect(llmTraces).toHaveLength(1)
201
+
202
+ const llm = llmTraces[0]!
203
+ expect(llm.type).toBe('llm_call')
204
+ expect(llm.runId).toBe('run-1')
205
+ expect(llm.agent).toBe('test-agent')
206
+ expect(llm.model).toBe('test-model')
207
+ expect(llm.turn).toBe(1)
208
+ expect(llm.tokens).toEqual({ input_tokens: 10, output_tokens: 20 })
209
+ expect(llm.durationMs).toBeGreaterThanOrEqual(0)
210
+ expect(llm.startMs).toBeLessThanOrEqual(llm.endMs)
211
+ })
212
+
213
+ it('emits tool_call trace with correct fields', async () => {
214
+ const traces: TraceEvent[] = []
215
+ const registry = new ToolRegistry()
216
+ registry.register(
217
+ defineTool({
218
+ name: 'echo',
219
+ description: 'echoes',
220
+ inputSchema: z.object({ msg: z.string() }),
221
+ execute: async ({ msg }) => ({ data: msg }),
222
+ }),
223
+ )
224
+ const executor = new ToolExecutor(registry)
225
+ const adapter = mockAdapter([
226
+ toolUseResponse('echo', { msg: 'hello' }),
227
+ textResponse('Done'),
228
+ ])
229
+
230
+ const runner = new AgentRunner(adapter, registry, executor, {
231
+ model: 'test-model',
232
+ agentName: 'tooler',
233
+ })
234
+
235
+ await runner.run(
236
+ [{ role: 'user', content: [{ type: 'text', text: 'test' }] }],
237
+ { onTrace: (e) => traces.push(e), runId: 'run-2', traceAgent: 'tooler' },
238
+ )
239
+
240
+ const toolTraces = traces.filter(t => t.type === 'tool_call')
241
+ expect(toolTraces).toHaveLength(1)
242
+
243
+ const tool = toolTraces[0]!
244
+ expect(tool.type).toBe('tool_call')
245
+ expect(tool.runId).toBe('run-2')
246
+ expect(tool.agent).toBe('tooler')
247
+ expect(tool.tool).toBe('echo')
248
+ expect(tool.isError).toBe(false)
249
+ expect(tool.durationMs).toBeGreaterThanOrEqual(0)
250
+ })
251
+
252
+ it('tool_call trace has isError: true on tool failure', async () => {
253
+ const traces: TraceEvent[] = []
254
+ const registry = new ToolRegistry()
255
+ registry.register(
256
+ defineTool({
257
+ name: 'boom',
258
+ description: 'fails',
259
+ inputSchema: z.object({}),
260
+ execute: async () => { throw new Error('fail') },
261
+ }),
262
+ )
263
+ const executor = new ToolExecutor(registry)
264
+ const adapter = mockAdapter([
265
+ toolUseResponse('boom', {}),
266
+ textResponse('Handled'),
267
+ ])
268
+
269
+ const runner = new AgentRunner(adapter, registry, executor, {
270
+ model: 'test-model',
271
+ agentName: 'err-agent',
272
+ })
273
+
274
+ await runner.run(
275
+ [{ role: 'user', content: [{ type: 'text', text: 'test' }] }],
276
+ { onTrace: (e) => traces.push(e), runId: 'run-3', traceAgent: 'err-agent' },
277
+ )
278
+
279
+ const toolTraces = traces.filter(t => t.type === 'tool_call')
280
+ expect(toolTraces).toHaveLength(1)
281
+ expect(toolTraces[0]!.isError).toBe(true)
282
+ })
283
+
284
+ it('does not call Date.now for LLM timing when onTrace is absent', async () => {
285
+ // This test just verifies no errors occur when onTrace is not provided
286
+ const registry = new ToolRegistry()
287
+ const executor = new ToolExecutor(registry)
288
+ const adapter = mockAdapter([textResponse('hi')])
289
+
290
+ const runner = new AgentRunner(adapter, registry, executor, {
291
+ model: 'test-model',
292
+ })
293
+
294
+ const result = await runner.run(
295
+ [{ role: 'user', content: [{ type: 'text', text: 'test' }] }],
296
+ {},
297
+ )
298
+
299
+ expect(result.output).toBe('hi')
300
+ })
301
+ })
302
+
303
+ // ---------------------------------------------------------------------------
304
+ // Agent-level trace events
305
+ // ---------------------------------------------------------------------------
306
+
307
+ describe('Agent trace events', () => {
308
+ it('emits agent trace with turns, tokens, and toolCalls', async () => {
309
+ const traces: TraceEvent[] = []
310
+ const config: AgentConfig = {
311
+ name: 'my-agent',
312
+ model: 'mock-model',
313
+ systemPrompt: 'You are a test.',
314
+ }
315
+
316
+ const agent = buildMockAgent(config, [textResponse('Hello world')])
317
+
318
+ const runOptions: Partial<RunOptions> = {
319
+ onTrace: (e) => traces.push(e),
320
+ runId: 'run-agent-1',
321
+ traceAgent: 'my-agent',
322
+ }
323
+
324
+ const result = await agent.run('Say hello', runOptions)
325
+ expect(result.success).toBe(true)
326
+
327
+ const agentTraces = traces.filter(t => t.type === 'agent')
328
+ expect(agentTraces).toHaveLength(1)
329
+
330
+ const at = agentTraces[0]!
331
+ expect(at.type).toBe('agent')
332
+ expect(at.runId).toBe('run-agent-1')
333
+ expect(at.agent).toBe('my-agent')
334
+ expect(at.turns).toBe(1) // one assistant message
335
+ expect(at.tokens).toEqual({ input_tokens: 10, output_tokens: 20 })
336
+ expect(at.toolCalls).toBe(0)
337
+ expect(at.durationMs).toBeGreaterThanOrEqual(0)
338
+ })
339
+
340
+ it('all traces share the same runId', async () => {
341
+ const traces: TraceEvent[] = []
342
+ const registry = new ToolRegistry()
343
+ registry.register(
344
+ defineTool({
345
+ name: 'greet',
346
+ description: 'greets',
347
+ inputSchema: z.object({ name: z.string() }),
348
+ execute: async ({ name }) => ({ data: `Hi ${name}` }),
349
+ }),
350
+ )
351
+ const executor = new ToolExecutor(registry)
352
+ const config: AgentConfig = {
353
+ name: 'multi-trace-agent',
354
+ model: 'mock-model',
355
+ tools: ['greet'],
356
+ }
357
+
358
+ const agent = buildMockAgent(
359
+ config,
360
+ [
361
+ toolUseResponse('greet', { name: 'world' }),
362
+ textResponse('Done'),
363
+ ],
364
+ registry,
365
+ executor,
366
+ )
367
+
368
+ const runId = 'shared-run-id'
369
+ await agent.run('test', {
370
+ onTrace: (e) => traces.push(e),
371
+ runId,
372
+ traceAgent: 'multi-trace-agent',
373
+ })
374
+
375
+ // Should have: 2 llm_call, 1 tool_call, 1 agent
376
+ expect(traces.length).toBeGreaterThanOrEqual(4)
377
+
378
+ for (const trace of traces) {
379
+ expect(trace.runId).toBe(runId)
380
+ }
381
+ })
382
+
383
+ it('onTrace error does not break agent execution', async () => {
384
+ const config: AgentConfig = {
385
+ name: 'resilient-agent',
386
+ model: 'mock-model',
387
+ }
388
+
389
+ const agent = buildMockAgent(config, [textResponse('OK')])
390
+
391
+ const result = await agent.run('test', {
392
+ onTrace: () => { throw new Error('callback exploded') },
393
+ runId: 'run-err',
394
+ traceAgent: 'resilient-agent',
395
+ })
396
+
397
+ // The run should still succeed despite the broken callback
398
+ expect(result.success).toBe(true)
399
+ expect(result.output).toBe('OK')
400
+ })
401
+
402
+ it('per-turn token usage in llm_call traces', async () => {
403
+ const traces: TraceEvent[] = []
404
+ const registry = new ToolRegistry()
405
+ registry.register(
406
+ defineTool({
407
+ name: 'noop',
408
+ description: 'noop',
409
+ inputSchema: z.object({}),
410
+ execute: async () => ({ data: 'ok' }),
411
+ }),
412
+ )
413
+ const executor = new ToolExecutor(registry)
414
+
415
+ // Two LLM calls: first triggers a tool, second is the final response
416
+ const resp1: LLMResponse = {
417
+ id: 'r1',
418
+ content: [{ type: 'tool_use', id: 'tu1', name: 'noop', input: {} }],
419
+ model: 'mock-model',
420
+ stop_reason: 'tool_use',
421
+ usage: { input_tokens: 100, output_tokens: 50 },
422
+ }
423
+ const resp2: LLMResponse = {
424
+ id: 'r2',
425
+ content: [{ type: 'text', text: 'Final answer' }],
426
+ model: 'mock-model',
427
+ stop_reason: 'end_turn',
428
+ usage: { input_tokens: 200, output_tokens: 100 },
429
+ }
430
+
431
+ const adapter = mockAdapter([resp1, resp2])
432
+ const runner = new AgentRunner(adapter, registry, executor, {
433
+ model: 'mock-model',
434
+ agentName: 'token-agent',
435
+ })
436
+
437
+ await runner.run(
438
+ [{ role: 'user', content: [{ type: 'text', text: 'go' }] }],
439
+ { onTrace: (e) => traces.push(e), runId: 'run-tok', traceAgent: 'token-agent' },
440
+ )
441
+
442
+ const llmTraces = traces.filter(t => t.type === 'llm_call')
443
+ expect(llmTraces).toHaveLength(2)
444
+
445
+ // Each trace carries its own turn's token usage, not the aggregate
446
+ expect(llmTraces[0]!.tokens).toEqual({ input_tokens: 100, output_tokens: 50 })
447
+ expect(llmTraces[1]!.tokens).toEqual({ input_tokens: 200, output_tokens: 100 })
448
+
449
+ // Turn numbers should be sequential
450
+ expect(llmTraces[0]!.turn).toBe(1)
451
+ expect(llmTraces[1]!.turn).toBe(2)
452
+ })
453
+ })
@@ -0,0 +1,9 @@
1
+ import { defineConfig } from 'vitest/config'
2
+
3
+ export default defineConfig({
4
+ test: {
5
+ coverage: {
6
+ include: ['src/**'],
7
+ },
8
+ },
9
+ })
@@ -1,162 +0,0 @@
1
- /**
2
- * Example 09 — Gemma 4 Auto-Orchestration (runTeam, 100% Local)
3
- *
4
- * Demonstrates the framework's key feature — automatic task decomposition —
5
- * powered entirely by a local Gemma 4 model. No cloud API needed.
6
- *
7
- * What happens:
8
- * 1. A Gemma 4 "coordinator" receives the goal + agent roster
9
- * 2. It outputs a structured JSON task array (title, description, assignee, dependsOn)
10
- * 3. The framework resolves dependencies, schedules tasks, and runs agents
11
- * 4. The coordinator synthesises all task results into a final answer
12
- *
13
- * This is the hardest test for a local model — it must produce valid JSON
14
- * for task decomposition AND do tool-calling for actual task execution.
15
- * Gemma 4 e2b (5.1B params) handles both reliably.
16
- *
17
- * Run:
18
- * no_proxy=localhost npx tsx examples/09-gemma4-auto-orchestration.ts
19
- *
20
- * Prerequisites:
21
- * 1. Ollama >= 0.20.0 installed and running: https://ollama.com
22
- * 2. Pull the model: ollama pull gemma4:e2b
23
- * 3. No API keys needed!
24
- *
25
- * Note: The no_proxy=localhost prefix is needed if you have an HTTP proxy
26
- * configured, since the OpenAI SDK would otherwise route Ollama requests
27
- * through the proxy.
28
- */
29
-
30
- import { OpenMultiAgent } from '../src/index.js'
31
- import type { AgentConfig, OrchestratorEvent, Task } from '../src/types.js'
32
-
33
- // ---------------------------------------------------------------------------
34
- // Configuration
35
- // ---------------------------------------------------------------------------
36
-
37
- // See available tags at https://ollama.com/library/gemma4
38
- const OLLAMA_MODEL = 'gemma4:e2b' // or 'gemma4:e4b', 'gemma4:26b'
39
- const OLLAMA_BASE_URL = 'http://localhost:11434/v1'
40
-
41
- // ---------------------------------------------------------------------------
42
- // Agents — the coordinator is created automatically by runTeam()
43
- // ---------------------------------------------------------------------------
44
-
45
- const researcher: AgentConfig = {
46
- name: 'researcher',
47
- model: OLLAMA_MODEL,
48
- provider: 'openai',
49
- baseURL: OLLAMA_BASE_URL,
50
- apiKey: 'ollama',
51
- systemPrompt: `You are a system researcher. Use bash to run non-destructive,
52
- read-only commands and report the results concisely.`,
53
- tools: ['bash'],
54
- maxTurns: 4,
55
- }
56
-
57
- const writer: AgentConfig = {
58
- name: 'writer',
59
- model: OLLAMA_MODEL,
60
- provider: 'openai',
61
- baseURL: OLLAMA_BASE_URL,
62
- apiKey: 'ollama',
63
- systemPrompt: `You are a technical writer. Use file_write to create clear,
64
- structured Markdown reports based on the information provided.`,
65
- tools: ['file_write'],
66
- maxTurns: 4,
67
- }
68
-
69
- // ---------------------------------------------------------------------------
70
- // Progress handler
71
- // ---------------------------------------------------------------------------
72
-
73
- function handleProgress(event: OrchestratorEvent): void {
74
- const ts = new Date().toISOString().slice(11, 23)
75
- switch (event.type) {
76
- case 'task_start': {
77
- const task = event.data as Task | undefined
78
- console.log(`[${ts}] TASK START "${task?.title ?? event.task}" → ${task?.assignee ?? '?'}`)
79
- break
80
- }
81
- case 'task_complete':
82
- console.log(`[${ts}] TASK DONE "${event.task}"`)
83
- break
84
- case 'agent_start':
85
- console.log(`[${ts}] AGENT START ${event.agent}`)
86
- break
87
- case 'agent_complete':
88
- console.log(`[${ts}] AGENT DONE ${event.agent}`)
89
- break
90
- case 'error':
91
- console.error(`[${ts}] ERROR ${event.agent ?? ''} task=${event.task ?? '?'}`)
92
- break
93
- }
94
- }
95
-
96
- // ---------------------------------------------------------------------------
97
- // Orchestrator — defaultModel is used for the coordinator agent
98
- // ---------------------------------------------------------------------------
99
-
100
- const orchestrator = new OpenMultiAgent({
101
- defaultModel: OLLAMA_MODEL,
102
- defaultProvider: 'openai',
103
- defaultBaseURL: OLLAMA_BASE_URL,
104
- defaultApiKey: 'ollama',
105
- maxConcurrency: 1, // local model serves one request at a time
106
- onProgress: handleProgress,
107
- })
108
-
109
- const team = orchestrator.createTeam('gemma4-auto', {
110
- name: 'gemma4-auto',
111
- agents: [researcher, writer],
112
- sharedMemory: true,
113
- })
114
-
115
- // ---------------------------------------------------------------------------
116
- // Give a goal — the framework handles the rest
117
- // ---------------------------------------------------------------------------
118
-
119
- const goal = `Check this machine's Node.js version, npm version, and OS info,
120
- then write a short Markdown summary report to /tmp/gemma4-auto/report.md`
121
-
122
- console.log('Gemma 4 Auto-Orchestration — Zero API Cost')
123
- console.log('='.repeat(60))
124
- console.log(` model → ${OLLAMA_MODEL} via Ollama (all agents + coordinator)`)
125
- console.log(` researcher → bash`)
126
- console.log(` writer → file_write`)
127
- console.log(` coordinator → auto-created by runTeam()`)
128
- console.log()
129
- console.log(`Goal: ${goal.replace(/\n/g, ' ').trim()}`)
130
- console.log('='.repeat(60))
131
-
132
- const start = Date.now()
133
- const result = await orchestrator.runTeam(team, goal)
134
- const totalTime = Date.now() - start
135
-
136
- // ---------------------------------------------------------------------------
137
- // Results
138
- // ---------------------------------------------------------------------------
139
-
140
- console.log('\n' + '='.repeat(60))
141
- console.log('Pipeline complete.\n')
142
- console.log(`Overall success: ${result.success}`)
143
- console.log(`Total time: ${(totalTime / 1000).toFixed(1)}s`)
144
- console.log(`Tokens — input: ${result.totalTokenUsage.input_tokens}, output: ${result.totalTokenUsage.output_tokens}`)
145
-
146
- console.log('\nPer-agent results:')
147
- for (const [name, r] of result.agentResults) {
148
- const icon = r.success ? 'OK ' : 'FAIL'
149
- const tools = r.toolCalls.length > 0 ? r.toolCalls.map(c => c.toolName).join(', ') : '(none)'
150
- console.log(` [${icon}] ${name.padEnd(24)} tools: ${tools}`)
151
- }
152
-
153
- // Print the coordinator's final synthesis
154
- const coordResult = result.agentResults.get('coordinator')
155
- if (coordResult?.success) {
156
- console.log('\nFinal synthesis (from local Gemma 4 coordinator):')
157
- console.log('-'.repeat(60))
158
- console.log(coordResult.output)
159
- console.log('-'.repeat(60))
160
- }
161
-
162
- console.log('\nAll processing done locally. $0 API cost.')