@jackchen_me/open-multi-agent 0.2.0 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (117) hide show
  1. package/README.md +87 -20
  2. package/dist/agent/agent.d.ts +15 -1
  3. package/dist/agent/agent.d.ts.map +1 -1
  4. package/dist/agent/agent.js +144 -10
  5. package/dist/agent/agent.js.map +1 -1
  6. package/dist/agent/loop-detector.d.ts +39 -0
  7. package/dist/agent/loop-detector.d.ts.map +1 -0
  8. package/dist/agent/loop-detector.js +122 -0
  9. package/dist/agent/loop-detector.js.map +1 -0
  10. package/dist/agent/pool.d.ts +2 -1
  11. package/dist/agent/pool.d.ts.map +1 -1
  12. package/dist/agent/pool.js +4 -2
  13. package/dist/agent/pool.js.map +1 -1
  14. package/dist/agent/runner.d.ts +23 -1
  15. package/dist/agent/runner.d.ts.map +1 -1
  16. package/dist/agent/runner.js +113 -12
  17. package/dist/agent/runner.js.map +1 -1
  18. package/dist/index.d.ts +3 -1
  19. package/dist/index.d.ts.map +1 -1
  20. package/dist/index.js +2 -0
  21. package/dist/index.js.map +1 -1
  22. package/dist/llm/adapter.d.ts +4 -1
  23. package/dist/llm/adapter.d.ts.map +1 -1
  24. package/dist/llm/adapter.js +11 -0
  25. package/dist/llm/adapter.js.map +1 -1
  26. package/dist/llm/copilot.d.ts.map +1 -1
  27. package/dist/llm/copilot.js +2 -1
  28. package/dist/llm/copilot.js.map +1 -1
  29. package/dist/llm/gemini.d.ts +65 -0
  30. package/dist/llm/gemini.d.ts.map +1 -0
  31. package/dist/llm/gemini.js +317 -0
  32. package/dist/llm/gemini.js.map +1 -0
  33. package/dist/llm/grok.d.ts +21 -0
  34. package/dist/llm/grok.d.ts.map +1 -0
  35. package/dist/llm/grok.js +24 -0
  36. package/dist/llm/grok.js.map +1 -0
  37. package/dist/llm/openai-common.d.ts +8 -1
  38. package/dist/llm/openai-common.d.ts.map +1 -1
  39. package/dist/llm/openai-common.js +35 -2
  40. package/dist/llm/openai-common.js.map +1 -1
  41. package/dist/llm/openai.d.ts +1 -1
  42. package/dist/llm/openai.d.ts.map +1 -1
  43. package/dist/llm/openai.js +20 -2
  44. package/dist/llm/openai.js.map +1 -1
  45. package/dist/orchestrator/orchestrator.d.ts.map +1 -1
  46. package/dist/orchestrator/orchestrator.js +89 -9
  47. package/dist/orchestrator/orchestrator.js.map +1 -1
  48. package/dist/task/queue.d.ts +31 -2
  49. package/dist/task/queue.d.ts.map +1 -1
  50. package/dist/task/queue.js +69 -2
  51. package/dist/task/queue.js.map +1 -1
  52. package/dist/tool/text-tool-extractor.d.ts +32 -0
  53. package/dist/tool/text-tool-extractor.d.ts.map +1 -0
  54. package/dist/tool/text-tool-extractor.js +187 -0
  55. package/dist/tool/text-tool-extractor.js.map +1 -0
  56. package/dist/types.d.ts +139 -7
  57. package/dist/types.d.ts.map +1 -1
  58. package/dist/utils/trace.d.ts +12 -0
  59. package/dist/utils/trace.d.ts.map +1 -0
  60. package/dist/utils/trace.js +30 -0
  61. package/dist/utils/trace.js.map +1 -0
  62. package/package.json +18 -2
  63. package/.github/ISSUE_TEMPLATE/bug_report.md +0 -40
  64. package/.github/ISSUE_TEMPLATE/feature_request.md +0 -23
  65. package/.github/pull_request_template.md +0 -14
  66. package/.github/workflows/ci.yml +0 -23
  67. package/CLAUDE.md +0 -72
  68. package/CODE_OF_CONDUCT.md +0 -48
  69. package/CONTRIBUTING.md +0 -72
  70. package/DECISIONS.md +0 -43
  71. package/README_zh.md +0 -217
  72. package/SECURITY.md +0 -17
  73. package/examples/01-single-agent.ts +0 -131
  74. package/examples/02-team-collaboration.ts +0 -167
  75. package/examples/03-task-pipeline.ts +0 -201
  76. package/examples/04-multi-model-team.ts +0 -261
  77. package/examples/05-copilot-test.ts +0 -49
  78. package/examples/06-local-model.ts +0 -199
  79. package/examples/07-fan-out-aggregate.ts +0 -209
  80. package/examples/08-gemma4-local.ts +0 -203
  81. package/examples/09-gemma4-auto-orchestration.ts +0 -162
  82. package/src/agent/agent.ts +0 -473
  83. package/src/agent/pool.ts +0 -278
  84. package/src/agent/runner.ts +0 -413
  85. package/src/agent/structured-output.ts +0 -126
  86. package/src/index.ts +0 -167
  87. package/src/llm/adapter.ts +0 -87
  88. package/src/llm/anthropic.ts +0 -389
  89. package/src/llm/copilot.ts +0 -551
  90. package/src/llm/openai-common.ts +0 -255
  91. package/src/llm/openai.ts +0 -272
  92. package/src/memory/shared.ts +0 -181
  93. package/src/memory/store.ts +0 -124
  94. package/src/orchestrator/orchestrator.ts +0 -977
  95. package/src/orchestrator/scheduler.ts +0 -352
  96. package/src/task/queue.ts +0 -394
  97. package/src/task/task.ts +0 -239
  98. package/src/team/messaging.ts +0 -232
  99. package/src/team/team.ts +0 -334
  100. package/src/tool/built-in/bash.ts +0 -187
  101. package/src/tool/built-in/file-edit.ts +0 -154
  102. package/src/tool/built-in/file-read.ts +0 -105
  103. package/src/tool/built-in/file-write.ts +0 -81
  104. package/src/tool/built-in/grep.ts +0 -362
  105. package/src/tool/built-in/index.ts +0 -50
  106. package/src/tool/executor.ts +0 -178
  107. package/src/tool/framework.ts +0 -557
  108. package/src/types.ts +0 -391
  109. package/src/utils/semaphore.ts +0 -89
  110. package/tests/semaphore.test.ts +0 -57
  111. package/tests/shared-memory.test.ts +0 -122
  112. package/tests/structured-output.test.ts +0 -331
  113. package/tests/task-queue.test.ts +0 -244
  114. package/tests/task-retry.test.ts +0 -368
  115. package/tests/task-utils.test.ts +0 -155
  116. package/tests/tool-executor.test.ts +0 -193
  117. package/tsconfig.json +0 -25
@@ -1,389 +0,0 @@
1
- /**
2
- * @fileoverview Anthropic Claude adapter implementing {@link LLMAdapter}.
3
- *
4
- * Converts between the framework's internal {@link ContentBlock} types and the
5
- * Anthropic SDK's wire format, handling tool definitions, system prompts, and
6
- * both batch and streaming response paths.
7
- *
8
- * API key resolution order:
9
- * 1. `apiKey` constructor argument
10
- * 2. `ANTHROPIC_API_KEY` environment variable
11
- *
12
- * @example
13
- * ```ts
14
- * import { AnthropicAdapter } from './anthropic.js'
15
- *
16
- * const adapter = new AnthropicAdapter()
17
- * const response = await adapter.chat(messages, {
18
- * model: 'claude-opus-4-6',
19
- * maxTokens: 1024,
20
- * })
21
- * ```
22
- */
23
-
24
- import Anthropic from '@anthropic-ai/sdk'
25
- import type {
26
- ContentBlockParam,
27
- ImageBlockParam,
28
- MessageParam,
29
- TextBlockParam,
30
- ToolResultBlockParam,
31
- ToolUseBlockParam,
32
- Tool as AnthropicTool,
33
- } from '@anthropic-ai/sdk/resources/messages/messages.js'
34
-
35
- import type {
36
- ContentBlock,
37
- ImageBlock,
38
- LLMAdapter,
39
- LLMChatOptions,
40
- LLMMessage,
41
- LLMResponse,
42
- LLMStreamOptions,
43
- LLMToolDef,
44
- StreamEvent,
45
- TextBlock,
46
- ToolResultBlock,
47
- ToolUseBlock,
48
- } from '../types.js'
49
-
50
- // ---------------------------------------------------------------------------
51
- // Internal helpers
52
- // ---------------------------------------------------------------------------
53
-
54
- /**
55
- * Convert a single framework {@link ContentBlock} into an Anthropic
56
- * {@link ContentBlockParam} suitable for the `messages` array.
57
- *
58
- * `tool_result` blocks are only valid inside `user`-role messages, which is
59
- * handled by {@link toAnthropicMessages} based on role context.
60
- */
61
- function toAnthropicContentBlockParam(block: ContentBlock): ContentBlockParam {
62
- switch (block.type) {
63
- case 'text': {
64
- const param: TextBlockParam = { type: 'text', text: block.text }
65
- return param
66
- }
67
- case 'tool_use': {
68
- const param: ToolUseBlockParam = {
69
- type: 'tool_use',
70
- id: block.id,
71
- name: block.name,
72
- input: block.input,
73
- }
74
- return param
75
- }
76
- case 'tool_result': {
77
- const param: ToolResultBlockParam = {
78
- type: 'tool_result',
79
- tool_use_id: block.tool_use_id,
80
- content: block.content,
81
- is_error: block.is_error,
82
- }
83
- return param
84
- }
85
- case 'image': {
86
- // Anthropic only accepts a subset of MIME types; we pass them through
87
- // trusting the caller to supply a valid media_type value.
88
- const param: ImageBlockParam = {
89
- type: 'image',
90
- source: {
91
- type: 'base64',
92
- media_type: block.source.media_type as
93
- | 'image/jpeg'
94
- | 'image/png'
95
- | 'image/gif'
96
- | 'image/webp',
97
- data: block.source.data,
98
- },
99
- }
100
- return param
101
- }
102
- default: {
103
- // Exhaustiveness guard — TypeScript will flag this at compile time if a
104
- // new variant is added to ContentBlock without updating this switch.
105
- const _exhaustive: never = block
106
- throw new Error(`Unhandled content block type: ${JSON.stringify(_exhaustive)}`)
107
- }
108
- }
109
- }
110
-
111
- /**
112
- * Convert framework messages into Anthropic's `MessageParam[]` format.
113
- *
114
- * The Anthropic API requires strict user/assistant alternation. We do not
115
- * enforce that here — the caller is responsible for producing a valid
116
- * conversation history.
117
- */
118
- function toAnthropicMessages(messages: LLMMessage[]): MessageParam[] {
119
- return messages.map((msg): MessageParam => ({
120
- role: msg.role,
121
- content: msg.content.map(toAnthropicContentBlockParam),
122
- }))
123
- }
124
-
125
- /**
126
- * Convert framework {@link LLMToolDef}s into Anthropic's `Tool` objects.
127
- *
128
- * The `inputSchema` on {@link LLMToolDef} is already a plain JSON Schema
129
- * object, so we just need to reshape the wrapper.
130
- */
131
- function toAnthropicTools(tools: readonly LLMToolDef[]): AnthropicTool[] {
132
- return tools.map((t): AnthropicTool => ({
133
- name: t.name,
134
- description: t.description,
135
- input_schema: {
136
- type: 'object',
137
- ...(t.inputSchema as Record<string, unknown>),
138
- },
139
- }))
140
- }
141
-
142
- /**
143
- * Convert an Anthropic SDK `ContentBlock` into a framework {@link ContentBlock}.
144
- *
145
- * We only map the subset of SDK types that the framework exposes. Unknown
146
- * variants (thinking, server_tool_use, etc.) are converted to a text block
147
- * carrying a stringified representation so data is never silently dropped.
148
- */
149
- function fromAnthropicContentBlock(
150
- block: Anthropic.Messages.ContentBlock,
151
- ): ContentBlock {
152
- switch (block.type) {
153
- case 'text': {
154
- const text: TextBlock = { type: 'text', text: block.text }
155
- return text
156
- }
157
- case 'tool_use': {
158
- const toolUse: ToolUseBlock = {
159
- type: 'tool_use',
160
- id: block.id,
161
- name: block.name,
162
- input: block.input as Record<string, unknown>,
163
- }
164
- return toolUse
165
- }
166
- default: {
167
- // Graceful degradation for SDK types we don't model (thinking, etc.).
168
- const fallback: TextBlock = {
169
- type: 'text',
170
- text: `[unsupported block type: ${(block as { type: string }).type}]`,
171
- }
172
- return fallback
173
- }
174
- }
175
- }
176
-
177
- // ---------------------------------------------------------------------------
178
- // Adapter implementation
179
- // ---------------------------------------------------------------------------
180
-
181
- /**
182
- * LLM adapter backed by the Anthropic Claude API.
183
- *
184
- * Thread-safe — a single instance may be shared across concurrent agent runs.
185
- * The underlying SDK client is stateless across requests.
186
- */
187
- export class AnthropicAdapter implements LLMAdapter {
188
- readonly name = 'anthropic'
189
-
190
- readonly #client: Anthropic
191
-
192
- constructor(apiKey?: string, baseURL?: string) {
193
- this.#client = new Anthropic({
194
- apiKey: apiKey ?? process.env['ANTHROPIC_API_KEY'],
195
- baseURL,
196
- })
197
- }
198
-
199
- // -------------------------------------------------------------------------
200
- // chat()
201
- // -------------------------------------------------------------------------
202
-
203
- /**
204
- * Send a synchronous (non-streaming) chat request and return the complete
205
- * {@link LLMResponse}.
206
- *
207
- * Throws an `Anthropic.APIError` on non-2xx responses. Callers should catch
208
- * and handle these (e.g. rate limits, context window exceeded).
209
- */
210
- async chat(messages: LLMMessage[], options: LLMChatOptions): Promise<LLMResponse> {
211
- const anthropicMessages = toAnthropicMessages(messages)
212
-
213
- const response = await this.#client.messages.create(
214
- {
215
- model: options.model,
216
- max_tokens: options.maxTokens ?? 4096,
217
- messages: anthropicMessages,
218
- system: options.systemPrompt,
219
- tools: options.tools ? toAnthropicTools(options.tools) : undefined,
220
- temperature: options.temperature,
221
- },
222
- {
223
- signal: options.abortSignal,
224
- },
225
- )
226
-
227
- const content = response.content.map(fromAnthropicContentBlock)
228
-
229
- return {
230
- id: response.id,
231
- content,
232
- model: response.model,
233
- stop_reason: response.stop_reason ?? 'end_turn',
234
- usage: {
235
- input_tokens: response.usage.input_tokens,
236
- output_tokens: response.usage.output_tokens,
237
- },
238
- }
239
- }
240
-
241
- // -------------------------------------------------------------------------
242
- // stream()
243
- // -------------------------------------------------------------------------
244
-
245
- /**
246
- * Send a streaming chat request and yield {@link StreamEvent}s as they
247
- * arrive from the API.
248
- *
249
- * Sequence guarantees:
250
- * - Zero or more `text` events containing incremental deltas
251
- * - Zero or more `tool_use` events when the model calls a tool (emitted once
252
- * per tool use, after input JSON has been fully assembled)
253
- * - Exactly one terminal event: `done` (with the complete {@link LLMResponse}
254
- * as `data`) or `error` (with an `Error` as `data`)
255
- */
256
- async *stream(
257
- messages: LLMMessage[],
258
- options: LLMStreamOptions,
259
- ): AsyncIterable<StreamEvent> {
260
- const anthropicMessages = toAnthropicMessages(messages)
261
-
262
- // MessageStream gives us typed events and handles SSE reconnect internally.
263
- const stream = this.#client.messages.stream(
264
- {
265
- model: options.model,
266
- max_tokens: options.maxTokens ?? 4096,
267
- messages: anthropicMessages,
268
- system: options.systemPrompt,
269
- tools: options.tools ? toAnthropicTools(options.tools) : undefined,
270
- temperature: options.temperature,
271
- },
272
- {
273
- signal: options.abortSignal,
274
- },
275
- )
276
-
277
- // Accumulate tool-use input JSON as it streams in.
278
- // key = content block index, value = partially assembled input JSON string
279
- const toolInputBuffers = new Map<number, { id: string; name: string; json: string }>()
280
-
281
- try {
282
- for await (const event of stream) {
283
- switch (event.type) {
284
- case 'content_block_start': {
285
- const block = event.content_block
286
- if (block.type === 'tool_use') {
287
- toolInputBuffers.set(event.index, {
288
- id: block.id,
289
- name: block.name,
290
- json: '',
291
- })
292
- }
293
- break
294
- }
295
-
296
- case 'content_block_delta': {
297
- const delta = event.delta
298
-
299
- if (delta.type === 'text_delta') {
300
- const textEvent: StreamEvent = { type: 'text', data: delta.text }
301
- yield textEvent
302
- } else if (delta.type === 'input_json_delta') {
303
- const buf = toolInputBuffers.get(event.index)
304
- if (buf !== undefined) {
305
- buf.json += delta.partial_json
306
- }
307
- }
308
- break
309
- }
310
-
311
- case 'content_block_stop': {
312
- const buf = toolInputBuffers.get(event.index)
313
- if (buf !== undefined) {
314
- // Parse the accumulated JSON and emit a tool_use event.
315
- let parsedInput: Record<string, unknown> = {}
316
- try {
317
- const parsed: unknown = JSON.parse(buf.json)
318
- if (
319
- parsed !== null &&
320
- typeof parsed === 'object' &&
321
- !Array.isArray(parsed)
322
- ) {
323
- parsedInput = parsed as Record<string, unknown>
324
- }
325
- } catch {
326
- // Malformed JSON from the model — surface as an empty object
327
- // rather than crashing the stream.
328
- }
329
-
330
- const toolUseBlock: ToolUseBlock = {
331
- type: 'tool_use',
332
- id: buf.id,
333
- name: buf.name,
334
- input: parsedInput,
335
- }
336
- const toolUseEvent: StreamEvent = { type: 'tool_use', data: toolUseBlock }
337
- yield toolUseEvent
338
- toolInputBuffers.delete(event.index)
339
- }
340
- break
341
- }
342
-
343
- // message_start, message_delta, message_stop — we handle the final
344
- // response via stream.finalMessage() below rather than piecemeal.
345
- default:
346
- break
347
- }
348
- }
349
-
350
- // Await the fully assembled final message (token counts, stop_reason, etc.)
351
- const finalMessage = await stream.finalMessage()
352
- const content = finalMessage.content.map(fromAnthropicContentBlock)
353
-
354
- const finalResponse: LLMResponse = {
355
- id: finalMessage.id,
356
- content,
357
- model: finalMessage.model,
358
- stop_reason: finalMessage.stop_reason ?? 'end_turn',
359
- usage: {
360
- input_tokens: finalMessage.usage.input_tokens,
361
- output_tokens: finalMessage.usage.output_tokens,
362
- },
363
- }
364
-
365
- const doneEvent: StreamEvent = { type: 'done', data: finalResponse }
366
- yield doneEvent
367
- } catch (err) {
368
- const error = err instanceof Error ? err : new Error(String(err))
369
- const errorEvent: StreamEvent = { type: 'error', data: error }
370
- yield errorEvent
371
- }
372
- }
373
- }
374
-
375
- // Re-export types that consumers of this module commonly need alongside the adapter.
376
- export type {
377
- ContentBlock,
378
- ImageBlock,
379
- LLMAdapter,
380
- LLMChatOptions,
381
- LLMMessage,
382
- LLMResponse,
383
- LLMStreamOptions,
384
- LLMToolDef,
385
- StreamEvent,
386
- TextBlock,
387
- ToolResultBlock,
388
- ToolUseBlock,
389
- }