@jackchen_me/open-multi-agent 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +280 -0
- package/dist/agent/agent.d.ts +121 -0
- package/dist/agent/agent.d.ts.map +1 -0
- package/dist/agent/agent.js +294 -0
- package/dist/agent/agent.js.map +1 -0
- package/dist/agent/pool.d.ts +128 -0
- package/dist/agent/pool.d.ts.map +1 -0
- package/dist/agent/pool.js +236 -0
- package/dist/agent/pool.js.map +1 -0
- package/dist/agent/runner.d.ts +120 -0
- package/dist/agent/runner.d.ts.map +1 -0
- package/dist/agent/runner.js +274 -0
- package/dist/agent/runner.js.map +1 -0
- package/dist/index.d.ts +73 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +87 -0
- package/dist/index.js.map +1 -0
- package/dist/llm/adapter.d.ts +38 -0
- package/dist/llm/adapter.d.ts.map +1 -0
- package/dist/llm/adapter.js +46 -0
- package/dist/llm/adapter.js.map +1 -0
- package/dist/llm/anthropic.d.ts +56 -0
- package/dist/llm/anthropic.d.ts.map +1 -0
- package/dist/llm/anthropic.js +307 -0
- package/dist/llm/anthropic.js.map +1 -0
- package/dist/llm/openai.d.ts +62 -0
- package/dist/llm/openai.d.ts.map +1 -0
- package/dist/llm/openai.js +424 -0
- package/dist/llm/openai.js.map +1 -0
- package/dist/memory/shared.d.ts +86 -0
- package/dist/memory/shared.d.ts.map +1 -0
- package/dist/memory/shared.js +155 -0
- package/dist/memory/shared.js.map +1 -0
- package/dist/memory/store.d.ts +64 -0
- package/dist/memory/store.d.ts.map +1 -0
- package/dist/memory/store.js +103 -0
- package/dist/memory/store.js.map +1 -0
- package/dist/orchestrator/orchestrator.d.ts +173 -0
- package/dist/orchestrator/orchestrator.d.ts.map +1 -0
- package/dist/orchestrator/orchestrator.js +698 -0
- package/dist/orchestrator/orchestrator.js.map +1 -0
- package/dist/orchestrator/scheduler.d.ts +112 -0
- package/dist/orchestrator/scheduler.d.ts.map +1 -0
- package/dist/orchestrator/scheduler.js +282 -0
- package/dist/orchestrator/scheduler.js.map +1 -0
- package/dist/task/queue.d.ts +160 -0
- package/dist/task/queue.d.ts.map +1 -0
- package/dist/task/queue.js +337 -0
- package/dist/task/queue.js.map +1 -0
- package/dist/task/task.d.ts +86 -0
- package/dist/task/task.d.ts.map +1 -0
- package/dist/task/task.js +201 -0
- package/dist/task/task.js.map +1 -0
- package/dist/team/messaging.d.ts +106 -0
- package/dist/team/messaging.d.ts.map +1 -0
- package/dist/team/messaging.js +182 -0
- package/dist/team/messaging.js.map +1 -0
- package/dist/team/team.d.ts +141 -0
- package/dist/team/team.d.ts.map +1 -0
- package/dist/team/team.js +282 -0
- package/dist/team/team.js.map +1 -0
- package/dist/tool/built-in/bash.d.ts +12 -0
- package/dist/tool/built-in/bash.d.ts.map +1 -0
- package/dist/tool/built-in/bash.js +133 -0
- package/dist/tool/built-in/bash.js.map +1 -0
- package/dist/tool/built-in/file-edit.d.ts +14 -0
- package/dist/tool/built-in/file-edit.d.ts.map +1 -0
- package/dist/tool/built-in/file-edit.js +130 -0
- package/dist/tool/built-in/file-edit.js.map +1 -0
- package/dist/tool/built-in/file-read.d.ts +12 -0
- package/dist/tool/built-in/file-read.d.ts.map +1 -0
- package/dist/tool/built-in/file-read.js +82 -0
- package/dist/tool/built-in/file-read.js.map +1 -0
- package/dist/tool/built-in/file-write.d.ts +11 -0
- package/dist/tool/built-in/file-write.d.ts.map +1 -0
- package/dist/tool/built-in/file-write.js +70 -0
- package/dist/tool/built-in/file-write.js.map +1 -0
- package/dist/tool/built-in/grep.d.ts +15 -0
- package/dist/tool/built-in/grep.d.ts.map +1 -0
- package/dist/tool/built-in/grep.js +287 -0
- package/dist/tool/built-in/grep.js.map +1 -0
- package/dist/tool/built-in/index.d.ts +36 -0
- package/dist/tool/built-in/index.d.ts.map +1 -0
- package/dist/tool/built-in/index.js +45 -0
- package/dist/tool/built-in/index.js.map +1 -0
- package/dist/tool/executor.d.ts +71 -0
- package/dist/tool/executor.d.ts.map +1 -0
- package/dist/tool/executor.js +116 -0
- package/dist/tool/executor.js.map +1 -0
- package/dist/tool/framework.d.ts +143 -0
- package/dist/tool/framework.d.ts.map +1 -0
- package/dist/tool/framework.js +371 -0
- package/dist/tool/framework.js.map +1 -0
- package/dist/types.d.ts +285 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +8 -0
- package/dist/types.js.map +1 -0
- package/dist/utils/semaphore.d.ts +47 -0
- package/dist/utils/semaphore.d.ts.map +1 -0
- package/dist/utils/semaphore.js +85 -0
- package/dist/utils/semaphore.js.map +1 -0
- package/examples/01-single-agent.ts +131 -0
- package/examples/02-team-collaboration.ts +167 -0
- package/examples/03-task-pipeline.ts +201 -0
- package/examples/04-multi-model-team.ts +261 -0
- package/package.json +49 -0
- package/src/agent/agent.ts +364 -0
- package/src/agent/pool.ts +278 -0
- package/src/agent/runner.ts +413 -0
- package/src/index.ts +166 -0
- package/src/llm/adapter.ts +74 -0
- package/src/llm/anthropic.ts +388 -0
- package/src/llm/openai.ts +522 -0
- package/src/memory/shared.ts +181 -0
- package/src/memory/store.ts +124 -0
- package/src/orchestrator/orchestrator.ts +851 -0
- package/src/orchestrator/scheduler.ts +352 -0
- package/src/task/queue.ts +394 -0
- package/src/task/task.ts +232 -0
- package/src/team/messaging.ts +230 -0
- package/src/team/team.ts +334 -0
- package/src/tool/built-in/bash.ts +187 -0
- package/src/tool/built-in/file-edit.ts +154 -0
- package/src/tool/built-in/file-read.ts +105 -0
- package/src/tool/built-in/file-write.ts +81 -0
- package/src/tool/built-in/grep.ts +362 -0
- package/src/tool/built-in/index.ts +50 -0
- package/src/tool/executor.ts +178 -0
- package/src/tool/framework.ts +557 -0
- package/src/types.ts +362 -0
- package/src/utils/semaphore.ts +89 -0
- package/tsconfig.json +25 -0
|
@@ -0,0 +1,413 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Core conversation loop engine for open-multi-agent.
|
|
3
|
+
*
|
|
4
|
+
* {@link AgentRunner} is the heart of the framework. It handles:
|
|
5
|
+
* - Sending messages to the LLM adapter
|
|
6
|
+
* - Extracting tool-use blocks from the response
|
|
7
|
+
* - Executing tool calls in parallel via {@link ToolExecutor}
|
|
8
|
+
* - Appending tool results and looping back until `end_turn`
|
|
9
|
+
* - Accumulating token usage and timing data across all turns
|
|
10
|
+
*
|
|
11
|
+
* The loop follows a standard agentic conversation pattern:
|
|
12
|
+
* one outer `while (true)` that breaks on `end_turn` or maxTurns exhaustion.
|
|
13
|
+
*/
|
|
14
|
+
|
|
15
|
+
import type {
|
|
16
|
+
LLMMessage,
|
|
17
|
+
ContentBlock,
|
|
18
|
+
TextBlock,
|
|
19
|
+
ToolUseBlock,
|
|
20
|
+
ToolResultBlock,
|
|
21
|
+
ToolCallRecord,
|
|
22
|
+
TokenUsage,
|
|
23
|
+
StreamEvent,
|
|
24
|
+
ToolResult,
|
|
25
|
+
ToolUseContext,
|
|
26
|
+
LLMAdapter,
|
|
27
|
+
LLMChatOptions,
|
|
28
|
+
} from '../types.js'
|
|
29
|
+
import type { ToolRegistry } from '../tool/framework.js'
|
|
30
|
+
import type { ToolExecutor } from '../tool/executor.js'
|
|
31
|
+
|
|
32
|
+
// ---------------------------------------------------------------------------
|
|
33
|
+
// Public interfaces
|
|
34
|
+
// ---------------------------------------------------------------------------
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Static configuration for an {@link AgentRunner} instance.
|
|
38
|
+
* These values are constant across every `run` / `stream` call.
|
|
39
|
+
*/
|
|
40
|
+
export interface RunnerOptions {
|
|
41
|
+
/** LLM model identifier, e.g. `'claude-opus-4-6'`. */
|
|
42
|
+
readonly model: string
|
|
43
|
+
/** Optional system prompt prepended to every conversation. */
|
|
44
|
+
readonly systemPrompt?: string
|
|
45
|
+
/**
|
|
46
|
+
* Maximum number of tool-call round-trips before the runner stops.
|
|
47
|
+
* Prevents unbounded loops. Defaults to `10`.
|
|
48
|
+
*/
|
|
49
|
+
readonly maxTurns?: number
|
|
50
|
+
/** Maximum output tokens per LLM response. */
|
|
51
|
+
readonly maxTokens?: number
|
|
52
|
+
/** Sampling temperature passed to the adapter. */
|
|
53
|
+
readonly temperature?: number
|
|
54
|
+
/** AbortSignal that cancels any in-flight adapter call and stops the loop. */
|
|
55
|
+
readonly abortSignal?: AbortSignal
|
|
56
|
+
/**
|
|
57
|
+
* Whitelist of tool names this runner is allowed to use.
|
|
58
|
+
* When provided, only tools whose name appears in this list are sent to the
|
|
59
|
+
* LLM. When omitted, all registered tools are available.
|
|
60
|
+
*/
|
|
61
|
+
readonly allowedTools?: readonly string[]
|
|
62
|
+
/** Display name of the agent driving this runner (used in tool context). */
|
|
63
|
+
readonly agentName?: string
|
|
64
|
+
/** Short role description of the agent (used in tool context). */
|
|
65
|
+
readonly agentRole?: string
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* Per-call callbacks for observing tool execution in real time.
|
|
70
|
+
* All callbacks are optional; unused ones are simply skipped.
|
|
71
|
+
*/
|
|
72
|
+
export interface RunOptions {
|
|
73
|
+
/** Fired just before each tool is dispatched. */
|
|
74
|
+
readonly onToolCall?: (name: string, input: Record<string, unknown>) => void
|
|
75
|
+
/** Fired after each tool result is received. */
|
|
76
|
+
readonly onToolResult?: (name: string, result: ToolResult) => void
|
|
77
|
+
/** Fired after each complete {@link LLMMessage} is appended. */
|
|
78
|
+
readonly onMessage?: (message: LLMMessage) => void
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
/** The aggregated result returned when a full run completes. */
|
|
82
|
+
export interface RunResult {
|
|
83
|
+
/** All messages accumulated during this run (assistant + tool results). */
|
|
84
|
+
readonly messages: LLMMessage[]
|
|
85
|
+
/** The final text output from the last assistant turn. */
|
|
86
|
+
readonly output: string
|
|
87
|
+
/** All tool calls made during this run, in execution order. */
|
|
88
|
+
readonly toolCalls: ToolCallRecord[]
|
|
89
|
+
/** Aggregated token counts across every LLM call in this run. */
|
|
90
|
+
readonly tokenUsage: TokenUsage
|
|
91
|
+
/** Total number of LLM turns (including tool-call follow-ups). */
|
|
92
|
+
readonly turns: number
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
// ---------------------------------------------------------------------------
|
|
96
|
+
// Internal helpers
|
|
97
|
+
// ---------------------------------------------------------------------------
|
|
98
|
+
|
|
99
|
+
/** Extract every TextBlock from a content array and join them. */
|
|
100
|
+
function extractText(content: readonly ContentBlock[]): string {
|
|
101
|
+
return content
|
|
102
|
+
.filter((b): b is TextBlock => b.type === 'text')
|
|
103
|
+
.map(b => b.text)
|
|
104
|
+
.join('')
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
/** Extract every ToolUseBlock from a content array. */
|
|
108
|
+
function extractToolUseBlocks(content: readonly ContentBlock[]): ToolUseBlock[] {
|
|
109
|
+
return content.filter((b): b is ToolUseBlock => b.type === 'tool_use')
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
/** Add two {@link TokenUsage} values together, returning a new object. */
|
|
113
|
+
function addTokenUsage(a: TokenUsage, b: TokenUsage): TokenUsage {
|
|
114
|
+
return {
|
|
115
|
+
input_tokens: a.input_tokens + b.input_tokens,
|
|
116
|
+
output_tokens: a.output_tokens + b.output_tokens,
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
const ZERO_USAGE: TokenUsage = { input_tokens: 0, output_tokens: 0 }
|
|
121
|
+
|
|
122
|
+
// ---------------------------------------------------------------------------
|
|
123
|
+
// AgentRunner
|
|
124
|
+
// ---------------------------------------------------------------------------
|
|
125
|
+
|
|
126
|
+
/**
|
|
127
|
+
* Drives a full agentic conversation: LLM calls, tool execution, and looping.
|
|
128
|
+
*
|
|
129
|
+
* @example
|
|
130
|
+
* ```ts
|
|
131
|
+
* const runner = new AgentRunner(adapter, registry, executor, {
|
|
132
|
+
* model: 'claude-opus-4-6',
|
|
133
|
+
* maxTurns: 10,
|
|
134
|
+
* })
|
|
135
|
+
* const result = await runner.run(messages)
|
|
136
|
+
* console.log(result.output)
|
|
137
|
+
* ```
|
|
138
|
+
*/
|
|
139
|
+
export class AgentRunner {
|
|
140
|
+
private readonly maxTurns: number
|
|
141
|
+
|
|
142
|
+
constructor(
|
|
143
|
+
private readonly adapter: LLMAdapter,
|
|
144
|
+
private readonly toolRegistry: ToolRegistry,
|
|
145
|
+
private readonly toolExecutor: ToolExecutor,
|
|
146
|
+
private readonly options: RunnerOptions,
|
|
147
|
+
) {
|
|
148
|
+
this.maxTurns = options.maxTurns ?? 10
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
// -------------------------------------------------------------------------
|
|
152
|
+
// Public API
|
|
153
|
+
// -------------------------------------------------------------------------
|
|
154
|
+
|
|
155
|
+
/**
|
|
156
|
+
* Run a complete conversation starting from `messages`.
|
|
157
|
+
*
|
|
158
|
+
* The call may internally make multiple LLM requests (one per tool-call
|
|
159
|
+
* round-trip). It returns only when:
|
|
160
|
+
* - The LLM emits `end_turn` with no tool-use blocks, or
|
|
161
|
+
* - `maxTurns` is exceeded, or
|
|
162
|
+
* - The abort signal is triggered.
|
|
163
|
+
*/
|
|
164
|
+
async run(
|
|
165
|
+
messages: LLMMessage[],
|
|
166
|
+
options: RunOptions = {},
|
|
167
|
+
): Promise<RunResult> {
|
|
168
|
+
// Collect everything yielded by the internal streaming loop.
|
|
169
|
+
const accumulated: {
|
|
170
|
+
messages: LLMMessage[]
|
|
171
|
+
output: string
|
|
172
|
+
toolCalls: ToolCallRecord[]
|
|
173
|
+
tokenUsage: TokenUsage
|
|
174
|
+
turns: number
|
|
175
|
+
} = {
|
|
176
|
+
messages: [],
|
|
177
|
+
output: '',
|
|
178
|
+
toolCalls: [],
|
|
179
|
+
tokenUsage: ZERO_USAGE,
|
|
180
|
+
turns: 0,
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
for await (const event of this.stream(messages, options)) {
|
|
184
|
+
if (event.type === 'done') {
|
|
185
|
+
const result = event.data as RunResult
|
|
186
|
+
accumulated.messages = result.messages
|
|
187
|
+
accumulated.output = result.output
|
|
188
|
+
accumulated.toolCalls = result.toolCalls
|
|
189
|
+
accumulated.tokenUsage = result.tokenUsage
|
|
190
|
+
accumulated.turns = result.turns
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
return accumulated
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
/**
|
|
198
|
+
* Run the conversation and yield {@link StreamEvent}s incrementally.
|
|
199
|
+
*
|
|
200
|
+
* Callers receive:
|
|
201
|
+
* - `{ type: 'text', data: string }` for each text delta
|
|
202
|
+
* - `{ type: 'tool_use', data: ToolUseBlock }` when the model requests a tool
|
|
203
|
+
* - `{ type: 'tool_result', data: ToolResultBlock }` after each execution
|
|
204
|
+
* - `{ type: 'done', data: RunResult }` at the very end
|
|
205
|
+
* - `{ type: 'error', data: Error }` on unrecoverable failure
|
|
206
|
+
*/
|
|
207
|
+
async *stream(
|
|
208
|
+
initialMessages: LLMMessage[],
|
|
209
|
+
options: RunOptions = {},
|
|
210
|
+
): AsyncGenerator<StreamEvent> {
|
|
211
|
+
// Working copy of the conversation — mutated as turns progress.
|
|
212
|
+
const conversationMessages: LLMMessage[] = [...initialMessages]
|
|
213
|
+
|
|
214
|
+
// Accumulated state across all turns.
|
|
215
|
+
let totalUsage: TokenUsage = ZERO_USAGE
|
|
216
|
+
const allToolCalls: ToolCallRecord[] = []
|
|
217
|
+
let finalOutput = ''
|
|
218
|
+
let turns = 0
|
|
219
|
+
|
|
220
|
+
// Build the stable LLM options once; model / tokens / temp don't change.
|
|
221
|
+
// toToolDefs() returns LLMToolDef[] (inputSchema, camelCase) — matches
|
|
222
|
+
// LLMChatOptions.tools from types.ts directly.
|
|
223
|
+
const allDefs = this.toolRegistry.toToolDefs()
|
|
224
|
+
const toolDefs = this.options.allowedTools
|
|
225
|
+
? allDefs.filter(d => this.options.allowedTools!.includes(d.name))
|
|
226
|
+
: allDefs
|
|
227
|
+
|
|
228
|
+
const baseChatOptions: LLMChatOptions = {
|
|
229
|
+
model: this.options.model,
|
|
230
|
+
tools: toolDefs.length > 0 ? toolDefs : undefined,
|
|
231
|
+
maxTokens: this.options.maxTokens,
|
|
232
|
+
temperature: this.options.temperature,
|
|
233
|
+
systemPrompt: this.options.systemPrompt,
|
|
234
|
+
abortSignal: this.options.abortSignal,
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
try {
|
|
238
|
+
// -----------------------------------------------------------------
|
|
239
|
+
// Main agentic loop — `while (true)` until end_turn or maxTurns
|
|
240
|
+
// -----------------------------------------------------------------
|
|
241
|
+
while (true) {
|
|
242
|
+
// Respect abort before each LLM call.
|
|
243
|
+
if (this.options.abortSignal?.aborted) {
|
|
244
|
+
break
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
// Guard against unbounded loops.
|
|
248
|
+
if (turns >= this.maxTurns) {
|
|
249
|
+
break
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
turns++
|
|
253
|
+
|
|
254
|
+
// ------------------------------------------------------------------
|
|
255
|
+
// Step 1: Call the LLM and collect the full response for this turn.
|
|
256
|
+
// ------------------------------------------------------------------
|
|
257
|
+
const response = await this.adapter.chat(conversationMessages, baseChatOptions)
|
|
258
|
+
|
|
259
|
+
totalUsage = addTokenUsage(totalUsage, response.usage)
|
|
260
|
+
|
|
261
|
+
// ------------------------------------------------------------------
|
|
262
|
+
// Step 2: Build the assistant message from the response content.
|
|
263
|
+
// ------------------------------------------------------------------
|
|
264
|
+
const assistantMessage: LLMMessage = {
|
|
265
|
+
role: 'assistant',
|
|
266
|
+
content: response.content,
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
conversationMessages.push(assistantMessage)
|
|
270
|
+
options.onMessage?.(assistantMessage)
|
|
271
|
+
|
|
272
|
+
// Yield text deltas so streaming callers can display them promptly.
|
|
273
|
+
const turnText = extractText(response.content)
|
|
274
|
+
if (turnText.length > 0) {
|
|
275
|
+
yield { type: 'text', data: turnText } satisfies StreamEvent
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
// Announce each tool-use block the model requested.
|
|
279
|
+
const toolUseBlocks = extractToolUseBlocks(response.content)
|
|
280
|
+
for (const block of toolUseBlocks) {
|
|
281
|
+
yield { type: 'tool_use', data: block } satisfies StreamEvent
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
// ------------------------------------------------------------------
|
|
285
|
+
// Step 3: Decide whether to continue looping.
|
|
286
|
+
// ------------------------------------------------------------------
|
|
287
|
+
if (toolUseBlocks.length === 0) {
|
|
288
|
+
// No tools requested — this is the terminal assistant turn.
|
|
289
|
+
finalOutput = turnText
|
|
290
|
+
break
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
// ------------------------------------------------------------------
|
|
294
|
+
// Step 4: Execute all tool calls in PARALLEL.
|
|
295
|
+
//
|
|
296
|
+
// Parallel execution is critical for multi-tool responses where the
|
|
297
|
+
// tools are independent (e.g. reading several files at once).
|
|
298
|
+
// ------------------------------------------------------------------
|
|
299
|
+
const toolContext: ToolUseContext = this.buildToolContext()
|
|
300
|
+
|
|
301
|
+
const executionPromises = toolUseBlocks.map(async (block): Promise<{
|
|
302
|
+
resultBlock: ToolResultBlock
|
|
303
|
+
record: ToolCallRecord
|
|
304
|
+
}> => {
|
|
305
|
+
options.onToolCall?.(block.name, block.input)
|
|
306
|
+
|
|
307
|
+
const startTime = Date.now()
|
|
308
|
+
let result: ToolResult
|
|
309
|
+
|
|
310
|
+
try {
|
|
311
|
+
result = await this.toolExecutor.execute(
|
|
312
|
+
block.name,
|
|
313
|
+
block.input,
|
|
314
|
+
toolContext,
|
|
315
|
+
)
|
|
316
|
+
} catch (err) {
|
|
317
|
+
// Tool executor errors become error results — the loop continues.
|
|
318
|
+
const message = err instanceof Error ? err.message : String(err)
|
|
319
|
+
result = { data: message, isError: true }
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
const duration = Date.now() - startTime
|
|
323
|
+
|
|
324
|
+
options.onToolResult?.(block.name, result)
|
|
325
|
+
|
|
326
|
+
const record: ToolCallRecord = {
|
|
327
|
+
toolName: block.name,
|
|
328
|
+
input: block.input,
|
|
329
|
+
output: result.data,
|
|
330
|
+
duration,
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
const resultBlock: ToolResultBlock = {
|
|
334
|
+
type: 'tool_result',
|
|
335
|
+
tool_use_id: block.id,
|
|
336
|
+
content: result.data,
|
|
337
|
+
is_error: result.isError,
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
return { resultBlock, record }
|
|
341
|
+
})
|
|
342
|
+
|
|
343
|
+
// Wait for every tool in this turn to finish.
|
|
344
|
+
const executions = await Promise.all(executionPromises)
|
|
345
|
+
|
|
346
|
+
// ------------------------------------------------------------------
|
|
347
|
+
// Step 5: Accumulate results and build the user message that carries
|
|
348
|
+
// them back to the LLM in the next turn.
|
|
349
|
+
// ------------------------------------------------------------------
|
|
350
|
+
const toolResultBlocks: ContentBlock[] = executions.map(e => e.resultBlock)
|
|
351
|
+
|
|
352
|
+
for (const { record, resultBlock } of executions) {
|
|
353
|
+
allToolCalls.push(record)
|
|
354
|
+
yield { type: 'tool_result', data: resultBlock } satisfies StreamEvent
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
const toolResultMessage: LLMMessage = {
|
|
358
|
+
role: 'user',
|
|
359
|
+
content: toolResultBlocks,
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
conversationMessages.push(toolResultMessage)
|
|
363
|
+
options.onMessage?.(toolResultMessage)
|
|
364
|
+
|
|
365
|
+
// Loop back to Step 1 — send updated conversation to the LLM.
|
|
366
|
+
}
|
|
367
|
+
} catch (err) {
|
|
368
|
+
const error = err instanceof Error ? err : new Error(String(err))
|
|
369
|
+
yield { type: 'error', data: error } satisfies StreamEvent
|
|
370
|
+
return
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
// If the loop exited due to maxTurns, use whatever text was last emitted.
|
|
374
|
+
if (finalOutput === '' && conversationMessages.length > 0) {
|
|
375
|
+
const lastAssistant = [...conversationMessages]
|
|
376
|
+
.reverse()
|
|
377
|
+
.find(m => m.role === 'assistant')
|
|
378
|
+
if (lastAssistant !== undefined) {
|
|
379
|
+
finalOutput = extractText(lastAssistant.content)
|
|
380
|
+
}
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
const runResult: RunResult = {
|
|
384
|
+
// Return only the messages added during this run (not the initial seed).
|
|
385
|
+
messages: conversationMessages.slice(initialMessages.length),
|
|
386
|
+
output: finalOutput,
|
|
387
|
+
toolCalls: allToolCalls,
|
|
388
|
+
tokenUsage: totalUsage,
|
|
389
|
+
turns,
|
|
390
|
+
}
|
|
391
|
+
|
|
392
|
+
yield { type: 'done', data: runResult } satisfies StreamEvent
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
// -------------------------------------------------------------------------
|
|
396
|
+
// Private helpers
|
|
397
|
+
// -------------------------------------------------------------------------
|
|
398
|
+
|
|
399
|
+
/**
|
|
400
|
+
* Build the {@link ToolUseContext} passed to every tool execution.
|
|
401
|
+
* Identifies this runner as the invoking agent.
|
|
402
|
+
*/
|
|
403
|
+
private buildToolContext(): ToolUseContext {
|
|
404
|
+
return {
|
|
405
|
+
agent: {
|
|
406
|
+
name: this.options.agentName ?? 'runner',
|
|
407
|
+
role: this.options.agentRole ?? 'assistant',
|
|
408
|
+
model: this.options.model,
|
|
409
|
+
},
|
|
410
|
+
abortSignal: this.options.abortSignal,
|
|
411
|
+
}
|
|
412
|
+
}
|
|
413
|
+
}
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview open-multi-agent — public API surface.
|
|
3
|
+
*
|
|
4
|
+
* Import from `'open-multi-agent'` to access everything you need:
|
|
5
|
+
*
|
|
6
|
+
* ```ts
|
|
7
|
+
* import { OpenMultiAgent, Agent, Team, defineTool } from 'open-multi-agent'
|
|
8
|
+
* ```
|
|
9
|
+
*
|
|
10
|
+
* ## Quickstart
|
|
11
|
+
*
|
|
12
|
+
* ### Single agent
|
|
13
|
+
* ```ts
|
|
14
|
+
* const orchestrator = new OpenMultiAgent({ defaultModel: 'claude-opus-4-6' })
|
|
15
|
+
* const result = await orchestrator.runAgent(
|
|
16
|
+
* { name: 'assistant', model: 'claude-opus-4-6' },
|
|
17
|
+
* 'Explain monads in one paragraph.',
|
|
18
|
+
* )
|
|
19
|
+
* console.log(result.output)
|
|
20
|
+
* ```
|
|
21
|
+
*
|
|
22
|
+
* ### Multi-agent team (auto-orchestrated)
|
|
23
|
+
* ```ts
|
|
24
|
+
* const orchestrator = new OpenMultiAgent()
|
|
25
|
+
* const team = orchestrator.createTeam('writers', {
|
|
26
|
+
* name: 'writers',
|
|
27
|
+
* agents: [
|
|
28
|
+
* { name: 'researcher', model: 'claude-opus-4-6', systemPrompt: 'You research topics thoroughly.' },
|
|
29
|
+
* { name: 'writer', model: 'claude-opus-4-6', systemPrompt: 'You write clear documentation.' },
|
|
30
|
+
* ],
|
|
31
|
+
* sharedMemory: true,
|
|
32
|
+
* })
|
|
33
|
+
* const result = await orchestrator.runTeam(team, 'Write a guide on TypeScript generics.')
|
|
34
|
+
* console.log(result.agentResults.get('coordinator')?.output)
|
|
35
|
+
* ```
|
|
36
|
+
*
|
|
37
|
+
* ### Custom tools
|
|
38
|
+
* ```ts
|
|
39
|
+
* import { z } from 'zod'
|
|
40
|
+
*
|
|
41
|
+
* const myTool = defineTool({
|
|
42
|
+
* name: 'fetch_data',
|
|
43
|
+
* description: 'Fetch JSON data from a URL.',
|
|
44
|
+
* inputSchema: z.object({ url: z.string().url() }),
|
|
45
|
+
* execute: async ({ url }) => {
|
|
46
|
+
* const res = await fetch(url)
|
|
47
|
+
* return { data: await res.text() }
|
|
48
|
+
* },
|
|
49
|
+
* })
|
|
50
|
+
* ```
|
|
51
|
+
*/
|
|
52
|
+
|
|
53
|
+
// ---------------------------------------------------------------------------
|
|
54
|
+
// Orchestrator (primary entry point)
|
|
55
|
+
// ---------------------------------------------------------------------------
|
|
56
|
+
|
|
57
|
+
export { OpenMultiAgent } from './orchestrator/orchestrator.js'
|
|
58
|
+
export { Scheduler } from './orchestrator/scheduler.js'
|
|
59
|
+
export type { SchedulingStrategy } from './orchestrator/scheduler.js'
|
|
60
|
+
|
|
61
|
+
// ---------------------------------------------------------------------------
|
|
62
|
+
// Agent layer
|
|
63
|
+
// ---------------------------------------------------------------------------
|
|
64
|
+
|
|
65
|
+
export { Agent } from './agent/agent.js'
|
|
66
|
+
export { AgentPool, Semaphore } from './agent/pool.js'
|
|
67
|
+
export type { PoolStatus } from './agent/pool.js'
|
|
68
|
+
|
|
69
|
+
// ---------------------------------------------------------------------------
|
|
70
|
+
// Team layer
|
|
71
|
+
// ---------------------------------------------------------------------------
|
|
72
|
+
|
|
73
|
+
export { Team } from './team/team.js'
|
|
74
|
+
export { MessageBus } from './team/messaging.js'
|
|
75
|
+
export type { Message } from './team/messaging.js'
|
|
76
|
+
|
|
77
|
+
// ---------------------------------------------------------------------------
|
|
78
|
+
// Task layer
|
|
79
|
+
// ---------------------------------------------------------------------------
|
|
80
|
+
|
|
81
|
+
export { TaskQueue } from './task/queue.js'
|
|
82
|
+
export { createTask, isTaskReady, getTaskDependencyOrder, validateTaskDependencies } from './task/task.js'
|
|
83
|
+
export type { TaskQueueEvent } from './task/queue.js'
|
|
84
|
+
|
|
85
|
+
// ---------------------------------------------------------------------------
|
|
86
|
+
// Tool system
|
|
87
|
+
// ---------------------------------------------------------------------------
|
|
88
|
+
|
|
89
|
+
export { defineTool, ToolRegistry, zodToJsonSchema } from './tool/framework.js'
|
|
90
|
+
export { ToolExecutor } from './tool/executor.js'
|
|
91
|
+
export type { ToolExecutorOptions, BatchToolCall } from './tool/executor.js'
|
|
92
|
+
export {
|
|
93
|
+
registerBuiltInTools,
|
|
94
|
+
BUILT_IN_TOOLS,
|
|
95
|
+
bashTool,
|
|
96
|
+
fileReadTool,
|
|
97
|
+
fileWriteTool,
|
|
98
|
+
fileEditTool,
|
|
99
|
+
grepTool,
|
|
100
|
+
} from './tool/built-in/index.js'
|
|
101
|
+
|
|
102
|
+
// ---------------------------------------------------------------------------
|
|
103
|
+
// LLM adapters
|
|
104
|
+
// ---------------------------------------------------------------------------
|
|
105
|
+
|
|
106
|
+
export { createAdapter } from './llm/adapter.js'
|
|
107
|
+
export type { SupportedProvider } from './llm/adapter.js'
|
|
108
|
+
|
|
109
|
+
// ---------------------------------------------------------------------------
|
|
110
|
+
// Memory
|
|
111
|
+
// ---------------------------------------------------------------------------
|
|
112
|
+
|
|
113
|
+
export { InMemoryStore } from './memory/store.js'
|
|
114
|
+
export { SharedMemory } from './memory/shared.js'
|
|
115
|
+
|
|
116
|
+
// ---------------------------------------------------------------------------
|
|
117
|
+
// Types — all public interfaces re-exported for consumer type-checking
|
|
118
|
+
// ---------------------------------------------------------------------------
|
|
119
|
+
|
|
120
|
+
export type {
|
|
121
|
+
// Content blocks
|
|
122
|
+
TextBlock,
|
|
123
|
+
ToolUseBlock,
|
|
124
|
+
ToolResultBlock,
|
|
125
|
+
ImageBlock,
|
|
126
|
+
ContentBlock,
|
|
127
|
+
|
|
128
|
+
// LLM
|
|
129
|
+
LLMMessage,
|
|
130
|
+
LLMResponse,
|
|
131
|
+
LLMAdapter,
|
|
132
|
+
LLMChatOptions,
|
|
133
|
+
LLMStreamOptions,
|
|
134
|
+
LLMToolDef,
|
|
135
|
+
TokenUsage,
|
|
136
|
+
StreamEvent,
|
|
137
|
+
|
|
138
|
+
// Tools
|
|
139
|
+
ToolDefinition,
|
|
140
|
+
ToolResult,
|
|
141
|
+
ToolUseContext,
|
|
142
|
+
AgentInfo,
|
|
143
|
+
TeamInfo,
|
|
144
|
+
|
|
145
|
+
// Agent
|
|
146
|
+
AgentConfig,
|
|
147
|
+
AgentState,
|
|
148
|
+
AgentRunResult,
|
|
149
|
+
ToolCallRecord,
|
|
150
|
+
|
|
151
|
+
// Team
|
|
152
|
+
TeamConfig,
|
|
153
|
+
TeamRunResult,
|
|
154
|
+
|
|
155
|
+
// Task
|
|
156
|
+
Task,
|
|
157
|
+
TaskStatus,
|
|
158
|
+
|
|
159
|
+
// Orchestrator
|
|
160
|
+
OrchestratorConfig,
|
|
161
|
+
OrchestratorEvent,
|
|
162
|
+
|
|
163
|
+
// Memory
|
|
164
|
+
MemoryEntry,
|
|
165
|
+
MemoryStore,
|
|
166
|
+
} from './types.js'
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview LLM adapter factory.
|
|
3
|
+
*
|
|
4
|
+
* Re-exports the {@link LLMAdapter} interface and provides a
|
|
5
|
+
* {@link createAdapter} factory that returns the correct concrete
|
|
6
|
+
* implementation based on the requested provider.
|
|
7
|
+
*
|
|
8
|
+
* @example
|
|
9
|
+
* ```ts
|
|
10
|
+
* import { createAdapter } from './adapter.js'
|
|
11
|
+
*
|
|
12
|
+
* const anthropic = createAdapter('anthropic')
|
|
13
|
+
* const openai = createAdapter('openai', process.env.OPENAI_API_KEY)
|
|
14
|
+
* ```
|
|
15
|
+
*/
|
|
16
|
+
|
|
17
|
+
export type {
|
|
18
|
+
LLMAdapter,
|
|
19
|
+
LLMChatOptions,
|
|
20
|
+
LLMStreamOptions,
|
|
21
|
+
LLMToolDef,
|
|
22
|
+
LLMMessage,
|
|
23
|
+
LLMResponse,
|
|
24
|
+
StreamEvent,
|
|
25
|
+
TokenUsage,
|
|
26
|
+
ContentBlock,
|
|
27
|
+
TextBlock,
|
|
28
|
+
ToolUseBlock,
|
|
29
|
+
ToolResultBlock,
|
|
30
|
+
ImageBlock,
|
|
31
|
+
} from '../types.js'
|
|
32
|
+
|
|
33
|
+
import type { LLMAdapter } from '../types.js'
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* The set of LLM providers supported out of the box.
|
|
37
|
+
* Additional providers can be integrated by implementing {@link LLMAdapter}
|
|
38
|
+
* directly and bypassing this factory.
|
|
39
|
+
*/
|
|
40
|
+
export type SupportedProvider = 'anthropic' | 'openai'
|
|
41
|
+
|
|
42
|
+
/**
|
|
43
|
+
* Instantiate the appropriate {@link LLMAdapter} for the given provider.
|
|
44
|
+
*
|
|
45
|
+
* API keys fall back to the standard environment variables
|
|
46
|
+
* (`ANTHROPIC_API_KEY` / `OPENAI_API_KEY`) when not supplied explicitly.
|
|
47
|
+
*
|
|
48
|
+
* Adapters are imported lazily so that projects using only one provider
|
|
49
|
+
* are not forced to install the SDK for the other.
|
|
50
|
+
*
|
|
51
|
+
* @param provider - Which LLM provider to target.
|
|
52
|
+
* @param apiKey - Optional API key override; falls back to env var.
|
|
53
|
+
* @throws {Error} When the provider string is not recognised.
|
|
54
|
+
*/
|
|
55
|
+
export async function createAdapter(
|
|
56
|
+
provider: SupportedProvider,
|
|
57
|
+
apiKey?: string,
|
|
58
|
+
): Promise<LLMAdapter> {
|
|
59
|
+
switch (provider) {
|
|
60
|
+
case 'anthropic': {
|
|
61
|
+
const { AnthropicAdapter } = await import('./anthropic.js')
|
|
62
|
+
return new AnthropicAdapter(apiKey)
|
|
63
|
+
}
|
|
64
|
+
case 'openai': {
|
|
65
|
+
const { OpenAIAdapter } = await import('./openai.js')
|
|
66
|
+
return new OpenAIAdapter(apiKey)
|
|
67
|
+
}
|
|
68
|
+
default: {
|
|
69
|
+
// The `never` cast here makes TypeScript enforce exhaustiveness.
|
|
70
|
+
const _exhaustive: never = provider
|
|
71
|
+
throw new Error(`Unsupported LLM provider: ${String(_exhaustive)}`)
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
}
|