@jackchen_me/open-multi-agent 0.2.0 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (117) hide show
  1. package/README.md +87 -20
  2. package/dist/agent/agent.d.ts +15 -1
  3. package/dist/agent/agent.d.ts.map +1 -1
  4. package/dist/agent/agent.js +144 -10
  5. package/dist/agent/agent.js.map +1 -1
  6. package/dist/agent/loop-detector.d.ts +39 -0
  7. package/dist/agent/loop-detector.d.ts.map +1 -0
  8. package/dist/agent/loop-detector.js +122 -0
  9. package/dist/agent/loop-detector.js.map +1 -0
  10. package/dist/agent/pool.d.ts +2 -1
  11. package/dist/agent/pool.d.ts.map +1 -1
  12. package/dist/agent/pool.js +4 -2
  13. package/dist/agent/pool.js.map +1 -1
  14. package/dist/agent/runner.d.ts +23 -1
  15. package/dist/agent/runner.d.ts.map +1 -1
  16. package/dist/agent/runner.js +113 -12
  17. package/dist/agent/runner.js.map +1 -1
  18. package/dist/index.d.ts +3 -1
  19. package/dist/index.d.ts.map +1 -1
  20. package/dist/index.js +2 -0
  21. package/dist/index.js.map +1 -1
  22. package/dist/llm/adapter.d.ts +4 -1
  23. package/dist/llm/adapter.d.ts.map +1 -1
  24. package/dist/llm/adapter.js +11 -0
  25. package/dist/llm/adapter.js.map +1 -1
  26. package/dist/llm/copilot.d.ts.map +1 -1
  27. package/dist/llm/copilot.js +2 -1
  28. package/dist/llm/copilot.js.map +1 -1
  29. package/dist/llm/gemini.d.ts +65 -0
  30. package/dist/llm/gemini.d.ts.map +1 -0
  31. package/dist/llm/gemini.js +317 -0
  32. package/dist/llm/gemini.js.map +1 -0
  33. package/dist/llm/grok.d.ts +21 -0
  34. package/dist/llm/grok.d.ts.map +1 -0
  35. package/dist/llm/grok.js +24 -0
  36. package/dist/llm/grok.js.map +1 -0
  37. package/dist/llm/openai-common.d.ts +8 -1
  38. package/dist/llm/openai-common.d.ts.map +1 -1
  39. package/dist/llm/openai-common.js +35 -2
  40. package/dist/llm/openai-common.js.map +1 -1
  41. package/dist/llm/openai.d.ts +1 -1
  42. package/dist/llm/openai.d.ts.map +1 -1
  43. package/dist/llm/openai.js +20 -2
  44. package/dist/llm/openai.js.map +1 -1
  45. package/dist/orchestrator/orchestrator.d.ts.map +1 -1
  46. package/dist/orchestrator/orchestrator.js +89 -9
  47. package/dist/orchestrator/orchestrator.js.map +1 -1
  48. package/dist/task/queue.d.ts +31 -2
  49. package/dist/task/queue.d.ts.map +1 -1
  50. package/dist/task/queue.js +69 -2
  51. package/dist/task/queue.js.map +1 -1
  52. package/dist/tool/text-tool-extractor.d.ts +32 -0
  53. package/dist/tool/text-tool-extractor.d.ts.map +1 -0
  54. package/dist/tool/text-tool-extractor.js +187 -0
  55. package/dist/tool/text-tool-extractor.js.map +1 -0
  56. package/dist/types.d.ts +139 -7
  57. package/dist/types.d.ts.map +1 -1
  58. package/dist/utils/trace.d.ts +12 -0
  59. package/dist/utils/trace.d.ts.map +1 -0
  60. package/dist/utils/trace.js +30 -0
  61. package/dist/utils/trace.js.map +1 -0
  62. package/package.json +18 -2
  63. package/.github/ISSUE_TEMPLATE/bug_report.md +0 -40
  64. package/.github/ISSUE_TEMPLATE/feature_request.md +0 -23
  65. package/.github/pull_request_template.md +0 -14
  66. package/.github/workflows/ci.yml +0 -23
  67. package/CLAUDE.md +0 -72
  68. package/CODE_OF_CONDUCT.md +0 -48
  69. package/CONTRIBUTING.md +0 -72
  70. package/DECISIONS.md +0 -43
  71. package/README_zh.md +0 -217
  72. package/SECURITY.md +0 -17
  73. package/examples/01-single-agent.ts +0 -131
  74. package/examples/02-team-collaboration.ts +0 -167
  75. package/examples/03-task-pipeline.ts +0 -201
  76. package/examples/04-multi-model-team.ts +0 -261
  77. package/examples/05-copilot-test.ts +0 -49
  78. package/examples/06-local-model.ts +0 -199
  79. package/examples/07-fan-out-aggregate.ts +0 -209
  80. package/examples/08-gemma4-local.ts +0 -203
  81. package/examples/09-gemma4-auto-orchestration.ts +0 -162
  82. package/src/agent/agent.ts +0 -473
  83. package/src/agent/pool.ts +0 -278
  84. package/src/agent/runner.ts +0 -413
  85. package/src/agent/structured-output.ts +0 -126
  86. package/src/index.ts +0 -167
  87. package/src/llm/adapter.ts +0 -87
  88. package/src/llm/anthropic.ts +0 -389
  89. package/src/llm/copilot.ts +0 -551
  90. package/src/llm/openai-common.ts +0 -255
  91. package/src/llm/openai.ts +0 -272
  92. package/src/memory/shared.ts +0 -181
  93. package/src/memory/store.ts +0 -124
  94. package/src/orchestrator/orchestrator.ts +0 -977
  95. package/src/orchestrator/scheduler.ts +0 -352
  96. package/src/task/queue.ts +0 -394
  97. package/src/task/task.ts +0 -239
  98. package/src/team/messaging.ts +0 -232
  99. package/src/team/team.ts +0 -334
  100. package/src/tool/built-in/bash.ts +0 -187
  101. package/src/tool/built-in/file-edit.ts +0 -154
  102. package/src/tool/built-in/file-read.ts +0 -105
  103. package/src/tool/built-in/file-write.ts +0 -81
  104. package/src/tool/built-in/grep.ts +0 -362
  105. package/src/tool/built-in/index.ts +0 -50
  106. package/src/tool/executor.ts +0 -178
  107. package/src/tool/framework.ts +0 -557
  108. package/src/types.ts +0 -391
  109. package/src/utils/semaphore.ts +0 -89
  110. package/tests/semaphore.test.ts +0 -57
  111. package/tests/shared-memory.test.ts +0 -122
  112. package/tests/structured-output.test.ts +0 -331
  113. package/tests/task-queue.test.ts +0 -244
  114. package/tests/task-retry.test.ts +0 -368
  115. package/tests/task-utils.test.ts +0 -155
  116. package/tests/tool-executor.test.ts +0 -193
  117. package/tsconfig.json +0 -25
@@ -1,473 +0,0 @@
1
- /**
2
- * @fileoverview High-level Agent class for open-multi-agent.
3
- *
4
- * {@link Agent} is the primary interface most consumers interact with.
5
- * It wraps {@link AgentRunner} with:
6
- * - Persistent conversation history (`prompt()`)
7
- * - Fresh-conversation semantics (`run()`)
8
- * - Streaming support (`stream()`)
9
- * - Dynamic tool registration at runtime
10
- * - Full lifecycle state tracking (`idle → running → completed | error`)
11
- *
12
- * @example
13
- * ```ts
14
- * const agent = new Agent({
15
- * name: 'researcher',
16
- * model: 'claude-opus-4-6',
17
- * systemPrompt: 'You are a rigorous research assistant.',
18
- * tools: ['web_search', 'read_file'],
19
- * })
20
- *
21
- * const result = await agent.run('Summarise the last 3 IPCC reports.')
22
- * console.log(result.output)
23
- * ```
24
- */
25
-
26
- import type {
27
- AgentConfig,
28
- AgentState,
29
- AgentRunResult,
30
- LLMMessage,
31
- StreamEvent,
32
- TokenUsage,
33
- ToolUseContext,
34
- } from '../types.js'
35
- import type { ToolDefinition as FrameworkToolDefinition, ToolRegistry } from '../tool/framework.js'
36
- import type { ToolExecutor } from '../tool/executor.js'
37
- import { createAdapter } from '../llm/adapter.js'
38
- import { AgentRunner, type RunnerOptions, type RunOptions, type RunResult } from './runner.js'
39
- import {
40
- buildStructuredOutputInstruction,
41
- extractJSON,
42
- validateOutput,
43
- } from './structured-output.js'
44
-
45
- // ---------------------------------------------------------------------------
46
- // Internal helpers
47
- // ---------------------------------------------------------------------------
48
-
49
- const ZERO_USAGE: TokenUsage = { input_tokens: 0, output_tokens: 0 }
50
-
51
- function addUsage(a: TokenUsage, b: TokenUsage): TokenUsage {
52
- return {
53
- input_tokens: a.input_tokens + b.input_tokens,
54
- output_tokens: a.output_tokens + b.output_tokens,
55
- }
56
- }
57
-
58
- // ---------------------------------------------------------------------------
59
- // Agent
60
- // ---------------------------------------------------------------------------
61
-
62
- /**
63
- * High-level wrapper around {@link AgentRunner} that manages conversation
64
- * history, state transitions, and tool lifecycle.
65
- */
66
- export class Agent {
67
- readonly name: string
68
- readonly config: AgentConfig
69
-
70
- private runner: AgentRunner | null = null
71
- private state: AgentState
72
- private readonly _toolRegistry: ToolRegistry
73
- private readonly _toolExecutor: ToolExecutor
74
- private messageHistory: LLMMessage[] = []
75
-
76
- /**
77
- * @param config - Static configuration for this agent.
78
- * @param toolRegistry - Registry used to resolve and manage tools.
79
- * @param toolExecutor - Executor that dispatches tool calls.
80
- *
81
- * `toolRegistry` and `toolExecutor` are injected rather than instantiated
82
- * internally so that teams of agents can share a single registry.
83
- */
84
- constructor(
85
- config: AgentConfig,
86
- toolRegistry: ToolRegistry,
87
- toolExecutor: ToolExecutor,
88
- ) {
89
- this.name = config.name
90
- this.config = config
91
- this._toolRegistry = toolRegistry
92
- this._toolExecutor = toolExecutor
93
-
94
- this.state = {
95
- status: 'idle',
96
- messages: [],
97
- tokenUsage: ZERO_USAGE,
98
- }
99
- }
100
-
101
- // -------------------------------------------------------------------------
102
- // Initialisation (async, called lazily)
103
- // -------------------------------------------------------------------------
104
-
105
- /**
106
- * Lazily create the {@link AgentRunner}.
107
- *
108
- * The adapter is created asynchronously (it may lazy-import provider SDKs),
109
- * so we defer construction until the first `run` / `prompt` / `stream` call.
110
- */
111
- private async getRunner(): Promise<AgentRunner> {
112
- if (this.runner !== null) {
113
- return this.runner
114
- }
115
-
116
- const provider = this.config.provider ?? 'anthropic'
117
- const adapter = await createAdapter(provider, this.config.apiKey, this.config.baseURL)
118
-
119
- // Append structured-output instructions when an outputSchema is configured.
120
- let effectiveSystemPrompt = this.config.systemPrompt
121
- if (this.config.outputSchema) {
122
- const instruction = buildStructuredOutputInstruction(this.config.outputSchema)
123
- effectiveSystemPrompt = effectiveSystemPrompt
124
- ? effectiveSystemPrompt + '\n' + instruction
125
- : instruction
126
- }
127
-
128
- const runnerOptions: RunnerOptions = {
129
- model: this.config.model,
130
- systemPrompt: effectiveSystemPrompt,
131
- maxTurns: this.config.maxTurns,
132
- maxTokens: this.config.maxTokens,
133
- temperature: this.config.temperature,
134
- allowedTools: this.config.tools,
135
- agentName: this.name,
136
- agentRole: this.config.systemPrompt?.slice(0, 50) ?? 'assistant',
137
- }
138
-
139
- this.runner = new AgentRunner(
140
- adapter,
141
- this._toolRegistry,
142
- this._toolExecutor,
143
- runnerOptions,
144
- )
145
-
146
- return this.runner
147
- }
148
-
149
- // -------------------------------------------------------------------------
150
- // Primary execution methods
151
- // -------------------------------------------------------------------------
152
-
153
- /**
154
- * Run `prompt` in a fresh conversation (history is NOT used).
155
- *
156
- * Equivalent to constructing a brand-new messages array `[{ role:'user', … }]`
157
- * and calling the runner once. The agent's persistent history is not modified.
158
- *
159
- * Use this for one-shot queries where past context is irrelevant.
160
- */
161
- async run(prompt: string): Promise<AgentRunResult> {
162
- const messages: LLMMessage[] = [
163
- { role: 'user', content: [{ type: 'text', text: prompt }] },
164
- ]
165
-
166
- return this.executeRun(messages)
167
- }
168
-
169
- /**
170
- * Run `prompt` as part of the ongoing conversation.
171
- *
172
- * Appends the user message to the persistent history, runs the agent, then
173
- * appends the resulting messages to the history for the next call.
174
- *
175
- * Use this for multi-turn interactions.
176
- */
177
- async prompt(message: string): Promise<AgentRunResult> {
178
- const userMessage: LLMMessage = {
179
- role: 'user',
180
- content: [{ type: 'text', text: message }],
181
- }
182
-
183
- this.messageHistory.push(userMessage)
184
-
185
- const result = await this.executeRun([...this.messageHistory])
186
-
187
- // Persist the new messages into history so the next `prompt` sees them.
188
- for (const msg of result.messages) {
189
- this.messageHistory.push(msg)
190
- }
191
-
192
- return result
193
- }
194
-
195
- /**
196
- * Stream a fresh-conversation response, yielding {@link StreamEvent}s.
197
- *
198
- * Like {@link run}, this does not use or update the persistent history.
199
- */
200
- async *stream(prompt: string): AsyncGenerator<StreamEvent> {
201
- const messages: LLMMessage[] = [
202
- { role: 'user', content: [{ type: 'text', text: prompt }] },
203
- ]
204
-
205
- yield* this.executeStream(messages)
206
- }
207
-
208
- // -------------------------------------------------------------------------
209
- // State management
210
- // -------------------------------------------------------------------------
211
-
212
- /** Return a snapshot of the current agent state (does not clone nested objects). */
213
- getState(): AgentState {
214
- return { ...this.state, messages: [...this.state.messages] }
215
- }
216
-
217
- /** Return a copy of the persistent message history. */
218
- getHistory(): LLMMessage[] {
219
- return [...this.messageHistory]
220
- }
221
-
222
- /**
223
- * Clear the persistent conversation history and reset state to `idle`.
224
- * Does NOT discard the runner instance — the adapter connection is reused.
225
- */
226
- reset(): void {
227
- this.messageHistory = []
228
- this.state = {
229
- status: 'idle',
230
- messages: [],
231
- tokenUsage: ZERO_USAGE,
232
- }
233
- }
234
-
235
- // -------------------------------------------------------------------------
236
- // Dynamic tool management
237
- // -------------------------------------------------------------------------
238
-
239
- /**
240
- * Register a new tool with this agent's tool registry at runtime.
241
- *
242
- * The tool becomes available to the next LLM call — no restart required.
243
- */
244
- addTool(tool: FrameworkToolDefinition): void {
245
- this._toolRegistry.register(tool)
246
- }
247
-
248
- /**
249
- * Deregister a tool by name.
250
- * If the tool is not registered this is a no-op (no error is thrown).
251
- */
252
- removeTool(name: string): void {
253
- this._toolRegistry.deregister(name)
254
- }
255
-
256
- /** Return the names of all currently registered tools. */
257
- getTools(): string[] {
258
- return this._toolRegistry.list().map((t) => t.name)
259
- }
260
-
261
- // -------------------------------------------------------------------------
262
- // Private execution core
263
- // -------------------------------------------------------------------------
264
-
265
- /**
266
- * Shared execution path used by both `run` and `prompt`.
267
- * Handles state transitions and error wrapping.
268
- */
269
- private async executeRun(messages: LLMMessage[]): Promise<AgentRunResult> {
270
- this.transitionTo('running')
271
-
272
- try {
273
- const runner = await this.getRunner()
274
- const runOptions: RunOptions = {
275
- onMessage: msg => {
276
- this.state.messages.push(msg)
277
- },
278
- }
279
-
280
- const result = await runner.run(messages, runOptions)
281
- this.state.tokenUsage = addUsage(this.state.tokenUsage, result.tokenUsage)
282
-
283
- // --- Structured output validation ---
284
- if (this.config.outputSchema) {
285
- return this.validateStructuredOutput(
286
- messages,
287
- result,
288
- runner,
289
- runOptions,
290
- )
291
- }
292
-
293
- this.transitionTo('completed')
294
- return this.toAgentRunResult(result, true)
295
- } catch (err) {
296
- const error = err instanceof Error ? err : new Error(String(err))
297
- this.transitionToError(error)
298
-
299
- return {
300
- success: false,
301
- output: error.message,
302
- messages: [],
303
- tokenUsage: ZERO_USAGE,
304
- toolCalls: [],
305
- structured: undefined,
306
- }
307
- }
308
- }
309
-
310
- /**
311
- * Validate agent output against the configured `outputSchema`.
312
- * On first validation failure, retry once with error feedback.
313
- */
314
- private async validateStructuredOutput(
315
- originalMessages: LLMMessage[],
316
- result: RunResult,
317
- runner: AgentRunner,
318
- runOptions: RunOptions,
319
- ): Promise<AgentRunResult> {
320
- const schema = this.config.outputSchema!
321
-
322
- // First attempt
323
- let firstAttemptError: unknown
324
- try {
325
- const parsed = extractJSON(result.output)
326
- const validated = validateOutput(schema, parsed)
327
- this.transitionTo('completed')
328
- return this.toAgentRunResult(result, true, validated)
329
- } catch (e) {
330
- firstAttemptError = e
331
- }
332
-
333
- // Retry: send full context + error feedback
334
- const errorMsg = firstAttemptError instanceof Error
335
- ? firstAttemptError.message
336
- : String(firstAttemptError)
337
-
338
- const errorFeedbackMessage: LLMMessage = {
339
- role: 'user' as const,
340
- content: [{
341
- type: 'text' as const,
342
- text: [
343
- 'Your previous response did not produce valid JSON matching the required schema.',
344
- '',
345
- `Error: ${errorMsg}`,
346
- '',
347
- 'Please try again. Respond with ONLY valid JSON, no other text.',
348
- ].join('\n'),
349
- }],
350
- }
351
-
352
- const retryMessages: LLMMessage[] = [
353
- ...originalMessages,
354
- ...result.messages,
355
- errorFeedbackMessage,
356
- ]
357
-
358
- const retryResult = await runner.run(retryMessages, runOptions)
359
- this.state.tokenUsage = addUsage(this.state.tokenUsage, retryResult.tokenUsage)
360
-
361
- const mergedTokenUsage = addUsage(result.tokenUsage, retryResult.tokenUsage)
362
- // Include the error feedback turn to maintain alternating user/assistant roles,
363
- // which is required by Anthropic's API for subsequent prompt() calls.
364
- const mergedMessages = [...result.messages, errorFeedbackMessage, ...retryResult.messages]
365
- const mergedToolCalls = [...result.toolCalls, ...retryResult.toolCalls]
366
-
367
- try {
368
- const parsed = extractJSON(retryResult.output)
369
- const validated = validateOutput(schema, parsed)
370
- this.transitionTo('completed')
371
- return {
372
- success: true,
373
- output: retryResult.output,
374
- messages: mergedMessages,
375
- tokenUsage: mergedTokenUsage,
376
- toolCalls: mergedToolCalls,
377
- structured: validated,
378
- }
379
- } catch {
380
- // Retry also failed
381
- this.transitionTo('completed')
382
- return {
383
- success: false,
384
- output: retryResult.output,
385
- messages: mergedMessages,
386
- tokenUsage: mergedTokenUsage,
387
- toolCalls: mergedToolCalls,
388
- structured: undefined,
389
- }
390
- }
391
- }
392
-
393
- /**
394
- * Shared streaming path used by `stream`.
395
- * Handles state transitions and error wrapping.
396
- */
397
- private async *executeStream(messages: LLMMessage[]): AsyncGenerator<StreamEvent> {
398
- this.transitionTo('running')
399
-
400
- try {
401
- const runner = await this.getRunner()
402
-
403
- for await (const event of runner.stream(messages)) {
404
- if (event.type === 'done') {
405
- const result = event.data as import('./runner.js').RunResult
406
- this.state.tokenUsage = addUsage(this.state.tokenUsage, result.tokenUsage)
407
- this.transitionTo('completed')
408
- } else if (event.type === 'error') {
409
- const error = event.data instanceof Error
410
- ? event.data
411
- : new Error(String(event.data))
412
- this.transitionToError(error)
413
- }
414
-
415
- yield event
416
- }
417
- } catch (err) {
418
- const error = err instanceof Error ? err : new Error(String(err))
419
- this.transitionToError(error)
420
- yield { type: 'error', data: error } satisfies StreamEvent
421
- }
422
- }
423
-
424
- // -------------------------------------------------------------------------
425
- // State transition helpers
426
- // -------------------------------------------------------------------------
427
-
428
- private transitionTo(status: 'idle' | 'running' | 'completed' | 'error'): void {
429
- this.state = { ...this.state, status }
430
- }
431
-
432
- private transitionToError(error: Error): void {
433
- this.state = { ...this.state, status: 'error', error }
434
- }
435
-
436
- // -------------------------------------------------------------------------
437
- // Result mapping
438
- // -------------------------------------------------------------------------
439
-
440
- private toAgentRunResult(
441
- result: RunResult,
442
- success: boolean,
443
- structured?: unknown,
444
- ): AgentRunResult {
445
- return {
446
- success,
447
- output: result.output,
448
- messages: result.messages,
449
- tokenUsage: result.tokenUsage,
450
- toolCalls: result.toolCalls,
451
- structured,
452
- }
453
- }
454
-
455
- // -------------------------------------------------------------------------
456
- // ToolUseContext builder (for direct use by subclasses or advanced callers)
457
- // -------------------------------------------------------------------------
458
-
459
- /**
460
- * Build a {@link ToolUseContext} that identifies this agent.
461
- * Exposed so team orchestrators can inject richer context (e.g. `TeamInfo`).
462
- */
463
- buildToolContext(abortSignal?: AbortSignal): ToolUseContext {
464
- return {
465
- agent: {
466
- name: this.name,
467
- role: this.config.systemPrompt?.slice(0, 60) ?? 'assistant',
468
- model: this.config.model,
469
- },
470
- abortSignal,
471
- }
472
- }
473
- }