@jackchen_me/open-multi-agent 1.0.0 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. package/package.json +8 -2
  2. package/.github/ISSUE_TEMPLATE/bug_report.md +0 -40
  3. package/.github/ISSUE_TEMPLATE/feature_request.md +0 -23
  4. package/.github/pull_request_template.md +0 -14
  5. package/.github/workflows/ci.yml +0 -23
  6. package/CLAUDE.md +0 -80
  7. package/CODE_OF_CONDUCT.md +0 -48
  8. package/CONTRIBUTING.md +0 -72
  9. package/DECISIONS.md +0 -43
  10. package/README_zh.md +0 -277
  11. package/SECURITY.md +0 -17
  12. package/examples/01-single-agent.ts +0 -131
  13. package/examples/02-team-collaboration.ts +0 -167
  14. package/examples/03-task-pipeline.ts +0 -201
  15. package/examples/04-multi-model-team.ts +0 -261
  16. package/examples/05-copilot-test.ts +0 -49
  17. package/examples/06-local-model.ts +0 -200
  18. package/examples/07-fan-out-aggregate.ts +0 -209
  19. package/examples/08-gemma4-local.ts +0 -192
  20. package/examples/09-structured-output.ts +0 -73
  21. package/examples/10-task-retry.ts +0 -132
  22. package/examples/11-trace-observability.ts +0 -133
  23. package/examples/12-grok.ts +0 -154
  24. package/examples/13-gemini.ts +0 -48
  25. package/src/agent/agent.ts +0 -622
  26. package/src/agent/loop-detector.ts +0 -137
  27. package/src/agent/pool.ts +0 -285
  28. package/src/agent/runner.ts +0 -542
  29. package/src/agent/structured-output.ts +0 -126
  30. package/src/index.ts +0 -182
  31. package/src/llm/adapter.ts +0 -98
  32. package/src/llm/anthropic.ts +0 -389
  33. package/src/llm/copilot.ts +0 -552
  34. package/src/llm/gemini.ts +0 -378
  35. package/src/llm/grok.ts +0 -29
  36. package/src/llm/openai-common.ts +0 -294
  37. package/src/llm/openai.ts +0 -292
  38. package/src/memory/shared.ts +0 -181
  39. package/src/memory/store.ts +0 -124
  40. package/src/orchestrator/orchestrator.ts +0 -1071
  41. package/src/orchestrator/scheduler.ts +0 -352
  42. package/src/task/queue.ts +0 -464
  43. package/src/task/task.ts +0 -239
  44. package/src/team/messaging.ts +0 -232
  45. package/src/team/team.ts +0 -334
  46. package/src/tool/built-in/bash.ts +0 -187
  47. package/src/tool/built-in/file-edit.ts +0 -154
  48. package/src/tool/built-in/file-read.ts +0 -105
  49. package/src/tool/built-in/file-write.ts +0 -81
  50. package/src/tool/built-in/grep.ts +0 -362
  51. package/src/tool/built-in/index.ts +0 -50
  52. package/src/tool/executor.ts +0 -178
  53. package/src/tool/framework.ts +0 -557
  54. package/src/tool/text-tool-extractor.ts +0 -219
  55. package/src/types.ts +0 -542
  56. package/src/utils/semaphore.ts +0 -89
  57. package/src/utils/trace.ts +0 -34
  58. package/tests/agent-hooks.test.ts +0 -473
  59. package/tests/agent-pool.test.ts +0 -212
  60. package/tests/approval.test.ts +0 -464
  61. package/tests/built-in-tools.test.ts +0 -393
  62. package/tests/gemini-adapter.test.ts +0 -97
  63. package/tests/grok-adapter.test.ts +0 -74
  64. package/tests/llm-adapters.test.ts +0 -357
  65. package/tests/loop-detection.test.ts +0 -456
  66. package/tests/openai-fallback.test.ts +0 -159
  67. package/tests/orchestrator.test.ts +0 -281
  68. package/tests/scheduler.test.ts +0 -221
  69. package/tests/semaphore.test.ts +0 -57
  70. package/tests/shared-memory.test.ts +0 -122
  71. package/tests/structured-output.test.ts +0 -331
  72. package/tests/task-queue.test.ts +0 -244
  73. package/tests/task-retry.test.ts +0 -368
  74. package/tests/task-utils.test.ts +0 -155
  75. package/tests/team-messaging.test.ts +0 -329
  76. package/tests/text-tool-extractor.test.ts +0 -170
  77. package/tests/tool-executor.test.ts +0 -193
  78. package/tests/trace.test.ts +0 -453
  79. package/tsconfig.json +0 -25
  80. package/vitest.config.ts +0 -9
package/src/llm/openai.ts DELETED
@@ -1,292 +0,0 @@
1
- /**
2
- * @fileoverview OpenAI adapter implementing {@link LLMAdapter}.
3
- *
4
- * Converts between the framework's internal {@link ContentBlock} types and the
5
- * OpenAI Chat Completions wire format. Key mapping decisions:
6
- *
7
- * - Framework `tool_use` blocks in assistant messages → OpenAI `tool_calls`
8
- * - Framework `tool_result` blocks in user messages → OpenAI `tool` role messages
9
- * - Framework `image` blocks in user messages → OpenAI image content parts
10
- * - System prompt in {@link LLMChatOptions} → prepended `system` message
11
- *
12
- * Because OpenAI and Anthropic use fundamentally different role-based structures
13
- * for tool calling (Anthropic embeds tool results in user-role content arrays;
14
- * OpenAI uses a dedicated `tool` role), the conversion necessarily splits
15
- * `tool_result` blocks out into separate top-level messages.
16
- *
17
- * API key resolution order:
18
- * 1. `apiKey` constructor argument
19
- * 2. `OPENAI_API_KEY` environment variable
20
- *
21
- * @example
22
- * ```ts
23
- * import { OpenAIAdapter } from './openai.js'
24
- *
25
- * const adapter = new OpenAIAdapter()
26
- * const response = await adapter.chat(messages, {
27
- * model: 'gpt-5.4',
28
- * maxTokens: 1024,
29
- * })
30
- * ```
31
- */
32
-
33
- import OpenAI from 'openai'
34
- import type {
35
- ChatCompletionChunk,
36
- } from 'openai/resources/chat/completions/index.js'
37
-
38
- import type {
39
- ContentBlock,
40
- LLMAdapter,
41
- LLMChatOptions,
42
- LLMMessage,
43
- LLMResponse,
44
- LLMStreamOptions,
45
- LLMToolDef,
46
- StreamEvent,
47
- TextBlock,
48
- ToolUseBlock,
49
- } from '../types.js'
50
-
51
- import {
52
- toOpenAITool,
53
- fromOpenAICompletion,
54
- normalizeFinishReason,
55
- buildOpenAIMessageList,
56
- } from './openai-common.js'
57
- import { extractToolCallsFromText } from '../tool/text-tool-extractor.js'
58
-
59
- // ---------------------------------------------------------------------------
60
- // Adapter implementation
61
- // ---------------------------------------------------------------------------
62
-
63
- /**
64
- * LLM adapter backed by the OpenAI Chat Completions API.
65
- *
66
- * Thread-safe — a single instance may be shared across concurrent agent runs.
67
- */
68
- export class OpenAIAdapter implements LLMAdapter {
69
- readonly name: string = 'openai'
70
-
71
- readonly #client: OpenAI
72
-
73
- constructor(apiKey?: string, baseURL?: string) {
74
- this.#client = new OpenAI({
75
- apiKey: apiKey ?? process.env['OPENAI_API_KEY'],
76
- baseURL,
77
- })
78
- }
79
-
80
- // -------------------------------------------------------------------------
81
- // chat()
82
- // -------------------------------------------------------------------------
83
-
84
- /**
85
- * Send a synchronous (non-streaming) chat request and return the complete
86
- * {@link LLMResponse}.
87
- *
88
- * Throws an `OpenAI.APIError` on non-2xx responses. Callers should catch and
89
- * handle these (e.g. rate limits, context length exceeded).
90
- */
91
- async chat(messages: LLMMessage[], options: LLMChatOptions): Promise<LLMResponse> {
92
- const openAIMessages = buildOpenAIMessageList(messages, options.systemPrompt)
93
-
94
- const completion = await this.#client.chat.completions.create(
95
- {
96
- model: options.model,
97
- messages: openAIMessages,
98
- max_tokens: options.maxTokens,
99
- temperature: options.temperature,
100
- tools: options.tools ? options.tools.map(toOpenAITool) : undefined,
101
- stream: false,
102
- },
103
- {
104
- signal: options.abortSignal,
105
- },
106
- )
107
-
108
- const toolNames = options.tools?.map(t => t.name)
109
- return fromOpenAICompletion(completion, toolNames)
110
- }
111
-
112
- // -------------------------------------------------------------------------
113
- // stream()
114
- // -------------------------------------------------------------------------
115
-
116
- /**
117
- * Send a streaming chat request and yield {@link StreamEvent}s incrementally.
118
- *
119
- * Sequence guarantees match {@link AnthropicAdapter.stream}:
120
- * - Zero or more `text` events
121
- * - Zero or more `tool_use` events (emitted once per tool call, after
122
- * arguments have been fully assembled)
123
- * - Exactly one terminal event: `done` or `error`
124
- */
125
- async *stream(
126
- messages: LLMMessage[],
127
- options: LLMStreamOptions,
128
- ): AsyncIterable<StreamEvent> {
129
- const openAIMessages = buildOpenAIMessageList(messages, options.systemPrompt)
130
-
131
- // We request usage in the final chunk so we can include it in the `done` event.
132
- const streamResponse = await this.#client.chat.completions.create(
133
- {
134
- model: options.model,
135
- messages: openAIMessages,
136
- max_tokens: options.maxTokens,
137
- temperature: options.temperature,
138
- tools: options.tools ? options.tools.map(toOpenAITool) : undefined,
139
- stream: true,
140
- stream_options: { include_usage: true },
141
- },
142
- {
143
- signal: options.abortSignal,
144
- },
145
- )
146
-
147
- // Accumulate state across chunks.
148
- let completionId = ''
149
- let completionModel = ''
150
- let finalFinishReason: string = 'stop'
151
- let inputTokens = 0
152
- let outputTokens = 0
153
-
154
- // tool_calls are streamed piecemeal; key = tool call index
155
- const toolCallBuffers = new Map<
156
- number,
157
- { id: string; name: string; argsJson: string }
158
- >()
159
-
160
- // Full text accumulator for the `done` response.
161
- let fullText = ''
162
-
163
- try {
164
- for await (const chunk of streamResponse) {
165
- completionId = chunk.id
166
- completionModel = chunk.model
167
-
168
- // Usage is only populated in the final chunk when stream_options.include_usage is set.
169
- if (chunk.usage !== null && chunk.usage !== undefined) {
170
- inputTokens = chunk.usage.prompt_tokens
171
- outputTokens = chunk.usage.completion_tokens
172
- }
173
-
174
- const choice: ChatCompletionChunk.Choice | undefined = chunk.choices[0]
175
- if (choice === undefined) continue
176
-
177
- const delta = choice.delta
178
-
179
- // --- text delta ---
180
- if (delta.content !== null && delta.content !== undefined) {
181
- fullText += delta.content
182
- const textEvent: StreamEvent = { type: 'text', data: delta.content }
183
- yield textEvent
184
- }
185
-
186
- // --- tool call delta ---
187
- for (const toolCallDelta of delta.tool_calls ?? []) {
188
- const idx = toolCallDelta.index
189
-
190
- if (!toolCallBuffers.has(idx)) {
191
- toolCallBuffers.set(idx, {
192
- id: toolCallDelta.id ?? '',
193
- name: toolCallDelta.function?.name ?? '',
194
- argsJson: '',
195
- })
196
- }
197
-
198
- const buf = toolCallBuffers.get(idx)
199
- // buf is guaranteed to exist: we just set it above.
200
- if (buf !== undefined) {
201
- if (toolCallDelta.id) buf.id = toolCallDelta.id
202
- if (toolCallDelta.function?.name) buf.name = toolCallDelta.function.name
203
- if (toolCallDelta.function?.arguments) {
204
- buf.argsJson += toolCallDelta.function.arguments
205
- }
206
- }
207
- }
208
-
209
- if (choice.finish_reason !== null && choice.finish_reason !== undefined) {
210
- finalFinishReason = choice.finish_reason
211
- }
212
- }
213
-
214
- // Emit accumulated tool_use events after the stream ends.
215
- const finalToolUseBlocks: ToolUseBlock[] = []
216
- for (const buf of toolCallBuffers.values()) {
217
- let parsedInput: Record<string, unknown> = {}
218
- try {
219
- const parsed: unknown = JSON.parse(buf.argsJson)
220
- if (parsed !== null && typeof parsed === 'object' && !Array.isArray(parsed)) {
221
- parsedInput = parsed as Record<string, unknown>
222
- }
223
- } catch {
224
- // Malformed JSON — surface as empty object.
225
- }
226
-
227
- const toolUseBlock: ToolUseBlock = {
228
- type: 'tool_use',
229
- id: buf.id,
230
- name: buf.name,
231
- input: parsedInput,
232
- }
233
- finalToolUseBlocks.push(toolUseBlock)
234
- const toolUseEvent: StreamEvent = { type: 'tool_use', data: toolUseBlock }
235
- yield toolUseEvent
236
- }
237
-
238
- // Build the complete content array for the done response.
239
- const doneContent: ContentBlock[] = []
240
- if (fullText.length > 0) {
241
- const textBlock: TextBlock = { type: 'text', text: fullText }
242
- doneContent.push(textBlock)
243
- }
244
- doneContent.push(...finalToolUseBlocks)
245
-
246
- // Fallback: extract tool calls from text when streaming produced no
247
- // native tool_calls (same logic as fromOpenAICompletion).
248
- if (finalToolUseBlocks.length === 0 && fullText.length > 0 && options.tools) {
249
- const toolNames = options.tools.map(t => t.name)
250
- const extracted = extractToolCallsFromText(fullText, toolNames)
251
- if (extracted.length > 0) {
252
- doneContent.push(...extracted)
253
- for (const block of extracted) {
254
- yield { type: 'tool_use', data: block } satisfies StreamEvent
255
- }
256
- }
257
- }
258
-
259
- const hasToolUseBlocks = doneContent.some(b => b.type === 'tool_use')
260
- const resolvedStopReason = hasToolUseBlocks && finalFinishReason === 'stop'
261
- ? 'tool_use'
262
- : normalizeFinishReason(finalFinishReason)
263
-
264
- const finalResponse: LLMResponse = {
265
- id: completionId,
266
- content: doneContent,
267
- model: completionModel,
268
- stop_reason: resolvedStopReason,
269
- usage: { input_tokens: inputTokens, output_tokens: outputTokens },
270
- }
271
-
272
- const doneEvent: StreamEvent = { type: 'done', data: finalResponse }
273
- yield doneEvent
274
- } catch (err) {
275
- const error = err instanceof Error ? err : new Error(String(err))
276
- const errorEvent: StreamEvent = { type: 'error', data: error }
277
- yield errorEvent
278
- }
279
- }
280
- }
281
-
282
- // Re-export types that consumers of this module commonly need alongside the adapter.
283
- export type {
284
- ContentBlock,
285
- LLMAdapter,
286
- LLMChatOptions,
287
- LLMMessage,
288
- LLMResponse,
289
- LLMStreamOptions,
290
- LLMToolDef,
291
- StreamEvent,
292
- }
@@ -1,181 +0,0 @@
1
- /**
2
- * @fileoverview Shared memory layer for teams of cooperating agents.
3
- *
4
- * Each agent writes under its own namespace (`<agentName>/<key>`) so entries
5
- * remain attributable, while any agent may read any entry. The
6
- * {@link SharedMemory.getSummary} method produces a human-readable digest
7
- * suitable for injecting into an agent's context window.
8
- */
9
-
10
- import type { MemoryEntry, MemoryStore } from '../types.js'
11
- import { InMemoryStore } from './store.js'
12
-
13
- // ---------------------------------------------------------------------------
14
- // SharedMemory
15
- // ---------------------------------------------------------------------------
16
-
17
- /**
18
- * Namespaced shared memory for a team of agents.
19
- *
20
- * Writes are namespaced as `<agentName>/<key>` so that entries from different
21
- * agents never collide and are always attributable. Reads are namespace-aware
22
- * but also accept fully-qualified keys, making cross-agent reads straightforward.
23
- *
24
- * @example
25
- * ```ts
26
- * const mem = new SharedMemory()
27
- *
28
- * await mem.write('researcher', 'findings', 'TypeScript 5.5 ships const type params')
29
- * await mem.write('coder', 'plan', 'Implement feature X using const type params')
30
- *
31
- * const entry = await mem.read('researcher/findings')
32
- * const all = await mem.listByAgent('researcher')
33
- * const summary = await mem.getSummary()
34
- * ```
35
- */
36
- export class SharedMemory {
37
- private readonly store: InMemoryStore
38
-
39
- constructor() {
40
- this.store = new InMemoryStore()
41
- }
42
-
43
- // ---------------------------------------------------------------------------
44
- // Write
45
- // ---------------------------------------------------------------------------
46
-
47
- /**
48
- * Write `value` under the namespaced key `<agentName>/<key>`.
49
- *
50
- * Metadata is merged with a `{ agent: agentName }` marker so consumers can
51
- * identify provenance when iterating all entries.
52
- *
53
- * @param agentName - The writing agent's name (used as a namespace prefix).
54
- * @param key - Logical key within the agent's namespace.
55
- * @param value - String value to store (serialise objects before writing).
56
- * @param metadata - Optional extra metadata stored alongside the entry.
57
- */
58
- async write(
59
- agentName: string,
60
- key: string,
61
- value: string,
62
- metadata?: Record<string, unknown>,
63
- ): Promise<void> {
64
- const namespacedKey = SharedMemory.namespaceKey(agentName, key)
65
- await this.store.set(namespacedKey, value, {
66
- ...metadata,
67
- agent: agentName,
68
- })
69
- }
70
-
71
- // ---------------------------------------------------------------------------
72
- // Read
73
- // ---------------------------------------------------------------------------
74
-
75
- /**
76
- * Read an entry by its fully-qualified key (`<agentName>/<key>`).
77
- *
78
- * Returns `null` when the key is absent.
79
- */
80
- async read(key: string): Promise<MemoryEntry | null> {
81
- return this.store.get(key)
82
- }
83
-
84
- // ---------------------------------------------------------------------------
85
- // List
86
- // ---------------------------------------------------------------------------
87
-
88
- /** Returns every entry in the shared store, regardless of agent. */
89
- async listAll(): Promise<MemoryEntry[]> {
90
- return this.store.list()
91
- }
92
-
93
- /**
94
- * Returns all entries written by `agentName` (i.e. those whose key starts
95
- * with `<agentName>/`).
96
- */
97
- async listByAgent(agentName: string): Promise<MemoryEntry[]> {
98
- const prefix = SharedMemory.namespaceKey(agentName, '')
99
- const all = await this.store.list()
100
- return all.filter((entry) => entry.key.startsWith(prefix))
101
- }
102
-
103
- // ---------------------------------------------------------------------------
104
- // Summary
105
- // ---------------------------------------------------------------------------
106
-
107
- /**
108
- * Produces a human-readable summary of all entries in the store.
109
- *
110
- * The output is structured as a markdown-style block, grouped by agent, and
111
- * is designed to be prepended to an agent's system prompt or injected as a
112
- * user turn so the agent has context about what its teammates know.
113
- *
114
- * Returns an empty string when the store is empty.
115
- *
116
- * @example
117
- * ```
118
- * ## Shared Team Memory
119
- *
120
- * ### researcher
121
- * - findings: TypeScript 5.5 ships const type params
122
- *
123
- * ### coder
124
- * - plan: Implement feature X using const type params
125
- * ```
126
- */
127
- async getSummary(): Promise<string> {
128
- const all = await this.store.list()
129
- if (all.length === 0) return ''
130
-
131
- // Group entries by agent name.
132
- const byAgent = new Map<string, Array<{ localKey: string; value: string }>>()
133
- for (const entry of all) {
134
- const slashIdx = entry.key.indexOf('/')
135
- const agent = slashIdx === -1 ? '_unknown' : entry.key.slice(0, slashIdx)
136
- const localKey = slashIdx === -1 ? entry.key : entry.key.slice(slashIdx + 1)
137
-
138
- let group = byAgent.get(agent)
139
- if (!group) {
140
- group = []
141
- byAgent.set(agent, group)
142
- }
143
- group.push({ localKey, value: entry.value })
144
- }
145
-
146
- const lines: string[] = ['## Shared Team Memory', '']
147
- for (const [agent, entries] of byAgent) {
148
- lines.push(`### ${agent}`)
149
- for (const { localKey, value } of entries) {
150
- // Truncate long values so the summary stays readable in a context window.
151
- const displayValue =
152
- value.length > 200 ? `${value.slice(0, 197)}…` : value
153
- lines.push(`- ${localKey}: ${displayValue}`)
154
- }
155
- lines.push('')
156
- }
157
-
158
- return lines.join('\n').trimEnd()
159
- }
160
-
161
- // ---------------------------------------------------------------------------
162
- // Store access
163
- // ---------------------------------------------------------------------------
164
-
165
- /**
166
- * Returns the underlying {@link MemoryStore} so callers that only need the
167
- * raw key-value interface can receive a properly typed reference without
168
- * accessing private fields via bracket notation.
169
- */
170
- getStore(): MemoryStore {
171
- return this.store
172
- }
173
-
174
- // ---------------------------------------------------------------------------
175
- // Private helpers
176
- // ---------------------------------------------------------------------------
177
-
178
- private static namespaceKey(agentName: string, key: string): string {
179
- return `${agentName}/${key}`
180
- }
181
- }
@@ -1,124 +0,0 @@
1
- /**
2
- * @fileoverview In-memory implementation of {@link MemoryStore}.
3
- *
4
- * All data lives in a plain `Map` and is never persisted to disk. This is the
5
- * default store used by {@link SharedMemory} and is suitable for testing and
6
- * single-process use-cases. Swap it for a Redis or SQLite-backed implementation
7
- * in production by satisfying the same {@link MemoryStore} interface.
8
- */
9
-
10
- import type { MemoryEntry, MemoryStore } from '../types.js'
11
-
12
- // ---------------------------------------------------------------------------
13
- // InMemoryStore
14
- // ---------------------------------------------------------------------------
15
-
16
- /**
17
- * Synchronous-under-the-hood key/value store that exposes an `async` surface
18
- * so implementations can be swapped for async-native backends without changing
19
- * callers.
20
- *
21
- * All keys are treated as opaque strings. Values are always strings; structured
22
- * data must be serialised by the caller (e.g. `JSON.stringify`).
23
- *
24
- * @example
25
- * ```ts
26
- * const store = new InMemoryStore()
27
- * await store.set('config', JSON.stringify({ model: 'claude-opus-4-6' }))
28
- * const entry = await store.get('config')
29
- * ```
30
- */
31
- export class InMemoryStore implements MemoryStore {
32
- private readonly data = new Map<string, MemoryEntry>()
33
-
34
- // ---------------------------------------------------------------------------
35
- // MemoryStore interface
36
- // ---------------------------------------------------------------------------
37
-
38
- /** Returns the entry for `key`, or `null` if not present. */
39
- async get(key: string): Promise<MemoryEntry | null> {
40
- return this.data.get(key) ?? null
41
- }
42
-
43
- /**
44
- * Upserts `key` with `value` and optional `metadata`.
45
- *
46
- * If the key already exists its `createdAt` is **preserved** so callers can
47
- * detect when a value was first written.
48
- */
49
- async set(
50
- key: string,
51
- value: string,
52
- metadata?: Record<string, unknown>,
53
- ): Promise<void> {
54
- const existing = this.data.get(key)
55
- const entry: MemoryEntry = {
56
- key,
57
- value,
58
- metadata: metadata !== undefined ? { ...metadata } : undefined,
59
- createdAt: existing?.createdAt ?? new Date(),
60
- }
61
- this.data.set(key, entry)
62
- }
63
-
64
- /** Returns a snapshot of all entries in insertion order. */
65
- async list(): Promise<MemoryEntry[]> {
66
- return Array.from(this.data.values())
67
- }
68
-
69
- /**
70
- * Removes the entry for `key`.
71
- * Deleting a non-existent key is a no-op.
72
- */
73
- async delete(key: string): Promise<void> {
74
- this.data.delete(key)
75
- }
76
-
77
- /** Removes **all** entries from the store. */
78
- async clear(): Promise<void> {
79
- this.data.clear()
80
- }
81
-
82
- // ---------------------------------------------------------------------------
83
- // Extensions beyond the base MemoryStore interface
84
- // ---------------------------------------------------------------------------
85
-
86
- /**
87
- * Returns entries whose `key` starts with `query` **or** whose `value`
88
- * contains `query` (case-insensitive substring match).
89
- *
90
- * This is a simple linear scan; it is not suitable for very large stores
91
- * without an index layer on top.
92
- *
93
- * @example
94
- * ```ts
95
- * // Find all entries related to "research"
96
- * const hits = await store.search('research')
97
- * ```
98
- */
99
- async search(query: string): Promise<MemoryEntry[]> {
100
- if (query.length === 0) {
101
- return this.list()
102
- }
103
- const lower = query.toLowerCase()
104
- return Array.from(this.data.values()).filter(
105
- (entry) =>
106
- entry.key.toLowerCase().includes(lower) ||
107
- entry.value.toLowerCase().includes(lower),
108
- )
109
- }
110
-
111
- // ---------------------------------------------------------------------------
112
- // Convenience helpers (not part of MemoryStore)
113
- // ---------------------------------------------------------------------------
114
-
115
- /** Returns the number of entries currently held in the store. */
116
- get size(): number {
117
- return this.data.size
118
- }
119
-
120
- /** Returns `true` if `key` exists in the store. */
121
- has(key: string): boolean {
122
- return this.data.has(key)
123
- }
124
- }