@jackchen_me/open-multi-agent 0.2.0 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (117) hide show
  1. package/README.md +87 -20
  2. package/dist/agent/agent.d.ts +15 -1
  3. package/dist/agent/agent.d.ts.map +1 -1
  4. package/dist/agent/agent.js +144 -10
  5. package/dist/agent/agent.js.map +1 -1
  6. package/dist/agent/loop-detector.d.ts +39 -0
  7. package/dist/agent/loop-detector.d.ts.map +1 -0
  8. package/dist/agent/loop-detector.js +122 -0
  9. package/dist/agent/loop-detector.js.map +1 -0
  10. package/dist/agent/pool.d.ts +2 -1
  11. package/dist/agent/pool.d.ts.map +1 -1
  12. package/dist/agent/pool.js +4 -2
  13. package/dist/agent/pool.js.map +1 -1
  14. package/dist/agent/runner.d.ts +23 -1
  15. package/dist/agent/runner.d.ts.map +1 -1
  16. package/dist/agent/runner.js +113 -12
  17. package/dist/agent/runner.js.map +1 -1
  18. package/dist/index.d.ts +3 -1
  19. package/dist/index.d.ts.map +1 -1
  20. package/dist/index.js +2 -0
  21. package/dist/index.js.map +1 -1
  22. package/dist/llm/adapter.d.ts +4 -1
  23. package/dist/llm/adapter.d.ts.map +1 -1
  24. package/dist/llm/adapter.js +11 -0
  25. package/dist/llm/adapter.js.map +1 -1
  26. package/dist/llm/copilot.d.ts.map +1 -1
  27. package/dist/llm/copilot.js +2 -1
  28. package/dist/llm/copilot.js.map +1 -1
  29. package/dist/llm/gemini.d.ts +65 -0
  30. package/dist/llm/gemini.d.ts.map +1 -0
  31. package/dist/llm/gemini.js +317 -0
  32. package/dist/llm/gemini.js.map +1 -0
  33. package/dist/llm/grok.d.ts +21 -0
  34. package/dist/llm/grok.d.ts.map +1 -0
  35. package/dist/llm/grok.js +24 -0
  36. package/dist/llm/grok.js.map +1 -0
  37. package/dist/llm/openai-common.d.ts +8 -1
  38. package/dist/llm/openai-common.d.ts.map +1 -1
  39. package/dist/llm/openai-common.js +35 -2
  40. package/dist/llm/openai-common.js.map +1 -1
  41. package/dist/llm/openai.d.ts +1 -1
  42. package/dist/llm/openai.d.ts.map +1 -1
  43. package/dist/llm/openai.js +20 -2
  44. package/dist/llm/openai.js.map +1 -1
  45. package/dist/orchestrator/orchestrator.d.ts.map +1 -1
  46. package/dist/orchestrator/orchestrator.js +89 -9
  47. package/dist/orchestrator/orchestrator.js.map +1 -1
  48. package/dist/task/queue.d.ts +31 -2
  49. package/dist/task/queue.d.ts.map +1 -1
  50. package/dist/task/queue.js +69 -2
  51. package/dist/task/queue.js.map +1 -1
  52. package/dist/tool/text-tool-extractor.d.ts +32 -0
  53. package/dist/tool/text-tool-extractor.d.ts.map +1 -0
  54. package/dist/tool/text-tool-extractor.js +187 -0
  55. package/dist/tool/text-tool-extractor.js.map +1 -0
  56. package/dist/types.d.ts +139 -7
  57. package/dist/types.d.ts.map +1 -1
  58. package/dist/utils/trace.d.ts +12 -0
  59. package/dist/utils/trace.d.ts.map +1 -0
  60. package/dist/utils/trace.js +30 -0
  61. package/dist/utils/trace.js.map +1 -0
  62. package/package.json +18 -2
  63. package/.github/ISSUE_TEMPLATE/bug_report.md +0 -40
  64. package/.github/ISSUE_TEMPLATE/feature_request.md +0 -23
  65. package/.github/pull_request_template.md +0 -14
  66. package/.github/workflows/ci.yml +0 -23
  67. package/CLAUDE.md +0 -72
  68. package/CODE_OF_CONDUCT.md +0 -48
  69. package/CONTRIBUTING.md +0 -72
  70. package/DECISIONS.md +0 -43
  71. package/README_zh.md +0 -217
  72. package/SECURITY.md +0 -17
  73. package/examples/01-single-agent.ts +0 -131
  74. package/examples/02-team-collaboration.ts +0 -167
  75. package/examples/03-task-pipeline.ts +0 -201
  76. package/examples/04-multi-model-team.ts +0 -261
  77. package/examples/05-copilot-test.ts +0 -49
  78. package/examples/06-local-model.ts +0 -199
  79. package/examples/07-fan-out-aggregate.ts +0 -209
  80. package/examples/08-gemma4-local.ts +0 -203
  81. package/examples/09-gemma4-auto-orchestration.ts +0 -162
  82. package/src/agent/agent.ts +0 -473
  83. package/src/agent/pool.ts +0 -278
  84. package/src/agent/runner.ts +0 -413
  85. package/src/agent/structured-output.ts +0 -126
  86. package/src/index.ts +0 -167
  87. package/src/llm/adapter.ts +0 -87
  88. package/src/llm/anthropic.ts +0 -389
  89. package/src/llm/copilot.ts +0 -551
  90. package/src/llm/openai-common.ts +0 -255
  91. package/src/llm/openai.ts +0 -272
  92. package/src/memory/shared.ts +0 -181
  93. package/src/memory/store.ts +0 -124
  94. package/src/orchestrator/orchestrator.ts +0 -977
  95. package/src/orchestrator/scheduler.ts +0 -352
  96. package/src/task/queue.ts +0 -394
  97. package/src/task/task.ts +0 -239
  98. package/src/team/messaging.ts +0 -232
  99. package/src/team/team.ts +0 -334
  100. package/src/tool/built-in/bash.ts +0 -187
  101. package/src/tool/built-in/file-edit.ts +0 -154
  102. package/src/tool/built-in/file-read.ts +0 -105
  103. package/src/tool/built-in/file-write.ts +0 -81
  104. package/src/tool/built-in/grep.ts +0 -362
  105. package/src/tool/built-in/index.ts +0 -50
  106. package/src/tool/executor.ts +0 -178
  107. package/src/tool/framework.ts +0 -557
  108. package/src/types.ts +0 -391
  109. package/src/utils/semaphore.ts +0 -89
  110. package/tests/semaphore.test.ts +0 -57
  111. package/tests/shared-memory.test.ts +0 -122
  112. package/tests/structured-output.test.ts +0 -331
  113. package/tests/task-queue.test.ts +0 -244
  114. package/tests/task-retry.test.ts +0 -368
  115. package/tests/task-utils.test.ts +0 -155
  116. package/tests/tool-executor.test.ts +0 -193
  117. package/tsconfig.json +0 -25
@@ -1,255 +0,0 @@
1
- /**
2
- * @fileoverview Shared OpenAI wire-format conversion helpers.
3
- *
4
- * Both the OpenAI and Copilot adapters use the OpenAI Chat Completions API
5
- * format. This module contains the common conversion logic so it isn't
6
- * duplicated across adapters.
7
- */
8
-
9
- import OpenAI from 'openai'
10
- import type {
11
- ChatCompletion,
12
- ChatCompletionAssistantMessageParam,
13
- ChatCompletionMessageParam,
14
- ChatCompletionMessageToolCall,
15
- ChatCompletionTool,
16
- ChatCompletionToolMessageParam,
17
- ChatCompletionUserMessageParam,
18
- } from 'openai/resources/chat/completions/index.js'
19
-
20
- import type {
21
- ContentBlock,
22
- LLMMessage,
23
- LLMResponse,
24
- LLMToolDef,
25
- TextBlock,
26
- ToolUseBlock,
27
- } from '../types.js'
28
-
29
- // ---------------------------------------------------------------------------
30
- // Framework → OpenAI
31
- // ---------------------------------------------------------------------------
32
-
33
- /**
34
- * Convert a framework {@link LLMToolDef} to an OpenAI {@link ChatCompletionTool}.
35
- */
36
- export function toOpenAITool(tool: LLMToolDef): ChatCompletionTool {
37
- return {
38
- type: 'function',
39
- function: {
40
- name: tool.name,
41
- description: tool.description,
42
- parameters: tool.inputSchema as Record<string, unknown>,
43
- },
44
- }
45
- }
46
-
47
- /**
48
- * Determine whether a framework message contains any `tool_result` content
49
- * blocks, which must be serialised as separate OpenAI `tool`-role messages.
50
- */
51
- function hasToolResults(msg: LLMMessage): boolean {
52
- return msg.content.some((b) => b.type === 'tool_result')
53
- }
54
-
55
- /**
56
- * Convert framework {@link LLMMessage}s into OpenAI
57
- * {@link ChatCompletionMessageParam} entries.
58
- *
59
- * `tool_result` blocks are expanded into top-level `tool`-role messages
60
- * because OpenAI uses a dedicated role for tool results rather than embedding
61
- * them inside user-content arrays.
62
- */
63
- export function toOpenAIMessages(messages: LLMMessage[]): ChatCompletionMessageParam[] {
64
- const result: ChatCompletionMessageParam[] = []
65
-
66
- for (const msg of messages) {
67
- if (msg.role === 'assistant') {
68
- result.push(toOpenAIAssistantMessage(msg))
69
- } else {
70
- // user role
71
- if (!hasToolResults(msg)) {
72
- result.push(toOpenAIUserMessage(msg))
73
- } else {
74
- const nonToolBlocks = msg.content.filter((b) => b.type !== 'tool_result')
75
- if (nonToolBlocks.length > 0) {
76
- result.push(toOpenAIUserMessage({ role: 'user', content: nonToolBlocks }))
77
- }
78
-
79
- for (const block of msg.content) {
80
- if (block.type === 'tool_result') {
81
- const toolMsg: ChatCompletionToolMessageParam = {
82
- role: 'tool',
83
- tool_call_id: block.tool_use_id,
84
- content: block.content,
85
- }
86
- result.push(toolMsg)
87
- }
88
- }
89
- }
90
- }
91
- }
92
-
93
- return result
94
- }
95
-
96
- /**
97
- * Convert a `user`-role framework message into an OpenAI user message.
98
- * Image blocks are converted to the OpenAI image_url content part format.
99
- */
100
- function toOpenAIUserMessage(msg: LLMMessage): ChatCompletionUserMessageParam {
101
- if (msg.content.length === 1 && msg.content[0]?.type === 'text') {
102
- return { role: 'user', content: msg.content[0].text }
103
- }
104
-
105
- type ContentPart = OpenAI.Chat.ChatCompletionContentPartText | OpenAI.Chat.ChatCompletionContentPartImage
106
- const parts: ContentPart[] = []
107
-
108
- for (const block of msg.content) {
109
- if (block.type === 'text') {
110
- parts.push({ type: 'text', text: block.text })
111
- } else if (block.type === 'image') {
112
- parts.push({
113
- type: 'image_url',
114
- image_url: {
115
- url: `data:${block.source.media_type};base64,${block.source.data}`,
116
- },
117
- })
118
- }
119
- // tool_result blocks are handled by the caller (toOpenAIMessages); skip here.
120
- }
121
-
122
- return { role: 'user', content: parts }
123
- }
124
-
125
- /**
126
- * Convert an `assistant`-role framework message into an OpenAI assistant message.
127
- * `tool_use` blocks become `tool_calls`; `text` blocks become message content.
128
- */
129
- function toOpenAIAssistantMessage(msg: LLMMessage): ChatCompletionAssistantMessageParam {
130
- const toolCalls: ChatCompletionMessageToolCall[] = []
131
- const textParts: string[] = []
132
-
133
- for (const block of msg.content) {
134
- if (block.type === 'tool_use') {
135
- toolCalls.push({
136
- id: block.id,
137
- type: 'function',
138
- function: {
139
- name: block.name,
140
- arguments: JSON.stringify(block.input),
141
- },
142
- })
143
- } else if (block.type === 'text') {
144
- textParts.push(block.text)
145
- }
146
- }
147
-
148
- const assistantMsg: ChatCompletionAssistantMessageParam = {
149
- role: 'assistant',
150
- content: textParts.length > 0 ? textParts.join('') : null,
151
- }
152
-
153
- if (toolCalls.length > 0) {
154
- assistantMsg.tool_calls = toolCalls
155
- }
156
-
157
- return assistantMsg
158
- }
159
-
160
- // ---------------------------------------------------------------------------
161
- // OpenAI → Framework
162
- // ---------------------------------------------------------------------------
163
-
164
- /**
165
- * Convert an OpenAI {@link ChatCompletion} into a framework {@link LLMResponse}.
166
- *
167
- * Takes only the first choice (index 0), consistent with how the framework
168
- * is designed for single-output agents.
169
- */
170
- export function fromOpenAICompletion(completion: ChatCompletion): LLMResponse {
171
- const choice = completion.choices[0]
172
- if (choice === undefined) {
173
- throw new Error('OpenAI returned a completion with no choices')
174
- }
175
-
176
- const content: ContentBlock[] = []
177
- const message = choice.message
178
-
179
- if (message.content !== null && message.content !== undefined) {
180
- const textBlock: TextBlock = { type: 'text', text: message.content }
181
- content.push(textBlock)
182
- }
183
-
184
- for (const toolCall of message.tool_calls ?? []) {
185
- let parsedInput: Record<string, unknown> = {}
186
- try {
187
- const parsed: unknown = JSON.parse(toolCall.function.arguments)
188
- if (parsed !== null && typeof parsed === 'object' && !Array.isArray(parsed)) {
189
- parsedInput = parsed as Record<string, unknown>
190
- }
191
- } catch {
192
- // Malformed arguments from the model — surface as empty object.
193
- }
194
-
195
- const toolUseBlock: ToolUseBlock = {
196
- type: 'tool_use',
197
- id: toolCall.id,
198
- name: toolCall.function.name,
199
- input: parsedInput,
200
- }
201
- content.push(toolUseBlock)
202
- }
203
-
204
- const stopReason = normalizeFinishReason(choice.finish_reason ?? 'stop')
205
-
206
- return {
207
- id: completion.id,
208
- content,
209
- model: completion.model,
210
- stop_reason: stopReason,
211
- usage: {
212
- input_tokens: completion.usage?.prompt_tokens ?? 0,
213
- output_tokens: completion.usage?.completion_tokens ?? 0,
214
- },
215
- }
216
- }
217
-
218
- /**
219
- * Normalize an OpenAI `finish_reason` string to the framework's canonical
220
- * stop-reason vocabulary.
221
- *
222
- * Mapping:
223
- * - `'stop'` → `'end_turn'`
224
- * - `'tool_calls'` → `'tool_use'`
225
- * - `'length'` → `'max_tokens'`
226
- * - `'content_filter'` → `'content_filter'`
227
- * - anything else → passed through unchanged
228
- */
229
- export function normalizeFinishReason(reason: string): string {
230
- switch (reason) {
231
- case 'stop': return 'end_turn'
232
- case 'tool_calls': return 'tool_use'
233
- case 'length': return 'max_tokens'
234
- case 'content_filter': return 'content_filter'
235
- default: return reason
236
- }
237
- }
238
-
239
- /**
240
- * Prepend a system message when `systemPrompt` is provided, then append the
241
- * converted conversation messages.
242
- */
243
- export function buildOpenAIMessageList(
244
- messages: LLMMessage[],
245
- systemPrompt: string | undefined,
246
- ): ChatCompletionMessageParam[] {
247
- const result: ChatCompletionMessageParam[] = []
248
-
249
- if (systemPrompt !== undefined && systemPrompt.length > 0) {
250
- result.push({ role: 'system', content: systemPrompt })
251
- }
252
-
253
- result.push(...toOpenAIMessages(messages))
254
- return result
255
- }
package/src/llm/openai.ts DELETED
@@ -1,272 +0,0 @@
1
- /**
2
- * @fileoverview OpenAI adapter implementing {@link LLMAdapter}.
3
- *
4
- * Converts between the framework's internal {@link ContentBlock} types and the
5
- * OpenAI Chat Completions wire format. Key mapping decisions:
6
- *
7
- * - Framework `tool_use` blocks in assistant messages → OpenAI `tool_calls`
8
- * - Framework `tool_result` blocks in user messages → OpenAI `tool` role messages
9
- * - Framework `image` blocks in user messages → OpenAI image content parts
10
- * - System prompt in {@link LLMChatOptions} → prepended `system` message
11
- *
12
- * Because OpenAI and Anthropic use fundamentally different role-based structures
13
- * for tool calling (Anthropic embeds tool results in user-role content arrays;
14
- * OpenAI uses a dedicated `tool` role), the conversion necessarily splits
15
- * `tool_result` blocks out into separate top-level messages.
16
- *
17
- * API key resolution order:
18
- * 1. `apiKey` constructor argument
19
- * 2. `OPENAI_API_KEY` environment variable
20
- *
21
- * @example
22
- * ```ts
23
- * import { OpenAIAdapter } from './openai.js'
24
- *
25
- * const adapter = new OpenAIAdapter()
26
- * const response = await adapter.chat(messages, {
27
- * model: 'gpt-5.4',
28
- * maxTokens: 1024,
29
- * })
30
- * ```
31
- */
32
-
33
- import OpenAI from 'openai'
34
- import type {
35
- ChatCompletionChunk,
36
- } from 'openai/resources/chat/completions/index.js'
37
-
38
- import type {
39
- ContentBlock,
40
- LLMAdapter,
41
- LLMChatOptions,
42
- LLMMessage,
43
- LLMResponse,
44
- LLMStreamOptions,
45
- LLMToolDef,
46
- StreamEvent,
47
- TextBlock,
48
- ToolUseBlock,
49
- } from '../types.js'
50
-
51
- import {
52
- toOpenAITool,
53
- fromOpenAICompletion,
54
- normalizeFinishReason,
55
- buildOpenAIMessageList,
56
- } from './openai-common.js'
57
-
58
- // ---------------------------------------------------------------------------
59
- // Adapter implementation
60
- // ---------------------------------------------------------------------------
61
-
62
- /**
63
- * LLM adapter backed by the OpenAI Chat Completions API.
64
- *
65
- * Thread-safe — a single instance may be shared across concurrent agent runs.
66
- */
67
- export class OpenAIAdapter implements LLMAdapter {
68
- readonly name = 'openai'
69
-
70
- readonly #client: OpenAI
71
-
72
- constructor(apiKey?: string, baseURL?: string) {
73
- this.#client = new OpenAI({
74
- apiKey: apiKey ?? process.env['OPENAI_API_KEY'],
75
- baseURL,
76
- })
77
- }
78
-
79
- // -------------------------------------------------------------------------
80
- // chat()
81
- // -------------------------------------------------------------------------
82
-
83
- /**
84
- * Send a synchronous (non-streaming) chat request and return the complete
85
- * {@link LLMResponse}.
86
- *
87
- * Throws an `OpenAI.APIError` on non-2xx responses. Callers should catch and
88
- * handle these (e.g. rate limits, context length exceeded).
89
- */
90
- async chat(messages: LLMMessage[], options: LLMChatOptions): Promise<LLMResponse> {
91
- const openAIMessages = buildOpenAIMessageList(messages, options.systemPrompt)
92
-
93
- const completion = await this.#client.chat.completions.create(
94
- {
95
- model: options.model,
96
- messages: openAIMessages,
97
- max_tokens: options.maxTokens,
98
- temperature: options.temperature,
99
- tools: options.tools ? options.tools.map(toOpenAITool) : undefined,
100
- stream: false,
101
- },
102
- {
103
- signal: options.abortSignal,
104
- },
105
- )
106
-
107
- return fromOpenAICompletion(completion)
108
- }
109
-
110
- // -------------------------------------------------------------------------
111
- // stream()
112
- // -------------------------------------------------------------------------
113
-
114
- /**
115
- * Send a streaming chat request and yield {@link StreamEvent}s incrementally.
116
- *
117
- * Sequence guarantees match {@link AnthropicAdapter.stream}:
118
- * - Zero or more `text` events
119
- * - Zero or more `tool_use` events (emitted once per tool call, after
120
- * arguments have been fully assembled)
121
- * - Exactly one terminal event: `done` or `error`
122
- */
123
- async *stream(
124
- messages: LLMMessage[],
125
- options: LLMStreamOptions,
126
- ): AsyncIterable<StreamEvent> {
127
- const openAIMessages = buildOpenAIMessageList(messages, options.systemPrompt)
128
-
129
- // We request usage in the final chunk so we can include it in the `done` event.
130
- const streamResponse = await this.#client.chat.completions.create(
131
- {
132
- model: options.model,
133
- messages: openAIMessages,
134
- max_tokens: options.maxTokens,
135
- temperature: options.temperature,
136
- tools: options.tools ? options.tools.map(toOpenAITool) : undefined,
137
- stream: true,
138
- stream_options: { include_usage: true },
139
- },
140
- {
141
- signal: options.abortSignal,
142
- },
143
- )
144
-
145
- // Accumulate state across chunks.
146
- let completionId = ''
147
- let completionModel = ''
148
- let finalFinishReason: string = 'stop'
149
- let inputTokens = 0
150
- let outputTokens = 0
151
-
152
- // tool_calls are streamed piecemeal; key = tool call index
153
- const toolCallBuffers = new Map<
154
- number,
155
- { id: string; name: string; argsJson: string }
156
- >()
157
-
158
- // Full text accumulator for the `done` response.
159
- let fullText = ''
160
-
161
- try {
162
- for await (const chunk of streamResponse) {
163
- completionId = chunk.id
164
- completionModel = chunk.model
165
-
166
- // Usage is only populated in the final chunk when stream_options.include_usage is set.
167
- if (chunk.usage !== null && chunk.usage !== undefined) {
168
- inputTokens = chunk.usage.prompt_tokens
169
- outputTokens = chunk.usage.completion_tokens
170
- }
171
-
172
- const choice: ChatCompletionChunk.Choice | undefined = chunk.choices[0]
173
- if (choice === undefined) continue
174
-
175
- const delta = choice.delta
176
-
177
- // --- text delta ---
178
- if (delta.content !== null && delta.content !== undefined) {
179
- fullText += delta.content
180
- const textEvent: StreamEvent = { type: 'text', data: delta.content }
181
- yield textEvent
182
- }
183
-
184
- // --- tool call delta ---
185
- for (const toolCallDelta of delta.tool_calls ?? []) {
186
- const idx = toolCallDelta.index
187
-
188
- if (!toolCallBuffers.has(idx)) {
189
- toolCallBuffers.set(idx, {
190
- id: toolCallDelta.id ?? '',
191
- name: toolCallDelta.function?.name ?? '',
192
- argsJson: '',
193
- })
194
- }
195
-
196
- const buf = toolCallBuffers.get(idx)
197
- // buf is guaranteed to exist: we just set it above.
198
- if (buf !== undefined) {
199
- if (toolCallDelta.id) buf.id = toolCallDelta.id
200
- if (toolCallDelta.function?.name) buf.name = toolCallDelta.function.name
201
- if (toolCallDelta.function?.arguments) {
202
- buf.argsJson += toolCallDelta.function.arguments
203
- }
204
- }
205
- }
206
-
207
- if (choice.finish_reason !== null && choice.finish_reason !== undefined) {
208
- finalFinishReason = choice.finish_reason
209
- }
210
- }
211
-
212
- // Emit accumulated tool_use events after the stream ends.
213
- const finalToolUseBlocks: ToolUseBlock[] = []
214
- for (const buf of toolCallBuffers.values()) {
215
- let parsedInput: Record<string, unknown> = {}
216
- try {
217
- const parsed: unknown = JSON.parse(buf.argsJson)
218
- if (parsed !== null && typeof parsed === 'object' && !Array.isArray(parsed)) {
219
- parsedInput = parsed as Record<string, unknown>
220
- }
221
- } catch {
222
- // Malformed JSON — surface as empty object.
223
- }
224
-
225
- const toolUseBlock: ToolUseBlock = {
226
- type: 'tool_use',
227
- id: buf.id,
228
- name: buf.name,
229
- input: parsedInput,
230
- }
231
- finalToolUseBlocks.push(toolUseBlock)
232
- const toolUseEvent: StreamEvent = { type: 'tool_use', data: toolUseBlock }
233
- yield toolUseEvent
234
- }
235
-
236
- // Build the complete content array for the done response.
237
- const doneContent: ContentBlock[] = []
238
- if (fullText.length > 0) {
239
- const textBlock: TextBlock = { type: 'text', text: fullText }
240
- doneContent.push(textBlock)
241
- }
242
- doneContent.push(...finalToolUseBlocks)
243
-
244
- const finalResponse: LLMResponse = {
245
- id: completionId,
246
- content: doneContent,
247
- model: completionModel,
248
- stop_reason: normalizeFinishReason(finalFinishReason),
249
- usage: { input_tokens: inputTokens, output_tokens: outputTokens },
250
- }
251
-
252
- const doneEvent: StreamEvent = { type: 'done', data: finalResponse }
253
- yield doneEvent
254
- } catch (err) {
255
- const error = err instanceof Error ? err : new Error(String(err))
256
- const errorEvent: StreamEvent = { type: 'error', data: error }
257
- yield errorEvent
258
- }
259
- }
260
- }
261
-
262
- // Re-export types that consumers of this module commonly need alongside the adapter.
263
- export type {
264
- ContentBlock,
265
- LLMAdapter,
266
- LLMChatOptions,
267
- LLMMessage,
268
- LLMResponse,
269
- LLMStreamOptions,
270
- LLMToolDef,
271
- StreamEvent,
272
- }
@@ -1,181 +0,0 @@
1
- /**
2
- * @fileoverview Shared memory layer for teams of cooperating agents.
3
- *
4
- * Each agent writes under its own namespace (`<agentName>/<key>`) so entries
5
- * remain attributable, while any agent may read any entry. The
6
- * {@link SharedMemory.getSummary} method produces a human-readable digest
7
- * suitable for injecting into an agent's context window.
8
- */
9
-
10
- import type { MemoryEntry, MemoryStore } from '../types.js'
11
- import { InMemoryStore } from './store.js'
12
-
13
- // ---------------------------------------------------------------------------
14
- // SharedMemory
15
- // ---------------------------------------------------------------------------
16
-
17
- /**
18
- * Namespaced shared memory for a team of agents.
19
- *
20
- * Writes are namespaced as `<agentName>/<key>` so that entries from different
21
- * agents never collide and are always attributable. Reads are namespace-aware
22
- * but also accept fully-qualified keys, making cross-agent reads straightforward.
23
- *
24
- * @example
25
- * ```ts
26
- * const mem = new SharedMemory()
27
- *
28
- * await mem.write('researcher', 'findings', 'TypeScript 5.5 ships const type params')
29
- * await mem.write('coder', 'plan', 'Implement feature X using const type params')
30
- *
31
- * const entry = await mem.read('researcher/findings')
32
- * const all = await mem.listByAgent('researcher')
33
- * const summary = await mem.getSummary()
34
- * ```
35
- */
36
- export class SharedMemory {
37
- private readonly store: InMemoryStore
38
-
39
- constructor() {
40
- this.store = new InMemoryStore()
41
- }
42
-
43
- // ---------------------------------------------------------------------------
44
- // Write
45
- // ---------------------------------------------------------------------------
46
-
47
- /**
48
- * Write `value` under the namespaced key `<agentName>/<key>`.
49
- *
50
- * Metadata is merged with a `{ agent: agentName }` marker so consumers can
51
- * identify provenance when iterating all entries.
52
- *
53
- * @param agentName - The writing agent's name (used as a namespace prefix).
54
- * @param key - Logical key within the agent's namespace.
55
- * @param value - String value to store (serialise objects before writing).
56
- * @param metadata - Optional extra metadata stored alongside the entry.
57
- */
58
- async write(
59
- agentName: string,
60
- key: string,
61
- value: string,
62
- metadata?: Record<string, unknown>,
63
- ): Promise<void> {
64
- const namespacedKey = SharedMemory.namespaceKey(agentName, key)
65
- await this.store.set(namespacedKey, value, {
66
- ...metadata,
67
- agent: agentName,
68
- })
69
- }
70
-
71
- // ---------------------------------------------------------------------------
72
- // Read
73
- // ---------------------------------------------------------------------------
74
-
75
- /**
76
- * Read an entry by its fully-qualified key (`<agentName>/<key>`).
77
- *
78
- * Returns `null` when the key is absent.
79
- */
80
- async read(key: string): Promise<MemoryEntry | null> {
81
- return this.store.get(key)
82
- }
83
-
84
- // ---------------------------------------------------------------------------
85
- // List
86
- // ---------------------------------------------------------------------------
87
-
88
- /** Returns every entry in the shared store, regardless of agent. */
89
- async listAll(): Promise<MemoryEntry[]> {
90
- return this.store.list()
91
- }
92
-
93
- /**
94
- * Returns all entries written by `agentName` (i.e. those whose key starts
95
- * with `<agentName>/`).
96
- */
97
- async listByAgent(agentName: string): Promise<MemoryEntry[]> {
98
- const prefix = SharedMemory.namespaceKey(agentName, '')
99
- const all = await this.store.list()
100
- return all.filter((entry) => entry.key.startsWith(prefix))
101
- }
102
-
103
- // ---------------------------------------------------------------------------
104
- // Summary
105
- // ---------------------------------------------------------------------------
106
-
107
- /**
108
- * Produces a human-readable summary of all entries in the store.
109
- *
110
- * The output is structured as a markdown-style block, grouped by agent, and
111
- * is designed to be prepended to an agent's system prompt or injected as a
112
- * user turn so the agent has context about what its teammates know.
113
- *
114
- * Returns an empty string when the store is empty.
115
- *
116
- * @example
117
- * ```
118
- * ## Shared Team Memory
119
- *
120
- * ### researcher
121
- * - findings: TypeScript 5.5 ships const type params
122
- *
123
- * ### coder
124
- * - plan: Implement feature X using const type params
125
- * ```
126
- */
127
- async getSummary(): Promise<string> {
128
- const all = await this.store.list()
129
- if (all.length === 0) return ''
130
-
131
- // Group entries by agent name.
132
- const byAgent = new Map<string, Array<{ localKey: string; value: string }>>()
133
- for (const entry of all) {
134
- const slashIdx = entry.key.indexOf('/')
135
- const agent = slashIdx === -1 ? '_unknown' : entry.key.slice(0, slashIdx)
136
- const localKey = slashIdx === -1 ? entry.key : entry.key.slice(slashIdx + 1)
137
-
138
- let group = byAgent.get(agent)
139
- if (!group) {
140
- group = []
141
- byAgent.set(agent, group)
142
- }
143
- group.push({ localKey, value: entry.value })
144
- }
145
-
146
- const lines: string[] = ['## Shared Team Memory', '']
147
- for (const [agent, entries] of byAgent) {
148
- lines.push(`### ${agent}`)
149
- for (const { localKey, value } of entries) {
150
- // Truncate long values so the summary stays readable in a context window.
151
- const displayValue =
152
- value.length > 200 ? `${value.slice(0, 197)}…` : value
153
- lines.push(`- ${localKey}: ${displayValue}`)
154
- }
155
- lines.push('')
156
- }
157
-
158
- return lines.join('\n').trimEnd()
159
- }
160
-
161
- // ---------------------------------------------------------------------------
162
- // Store access
163
- // ---------------------------------------------------------------------------
164
-
165
- /**
166
- * Returns the underlying {@link MemoryStore} so callers that only need the
167
- * raw key-value interface can receive a properly typed reference without
168
- * accessing private fields via bracket notation.
169
- */
170
- getStore(): MemoryStore {
171
- return this.store
172
- }
173
-
174
- // ---------------------------------------------------------------------------
175
- // Private helpers
176
- // ---------------------------------------------------------------------------
177
-
178
- private static namespaceKey(agentName: string, key: string): string {
179
- return `${agentName}/${key}`
180
- }
181
- }