@strav/brain 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +32 -0
- package/README.md +82 -0
- package/package.json +28 -0
- package/src/agent.ts +73 -0
- package/src/brain_manager.ts +136 -0
- package/src/brain_provider.ts +16 -0
- package/src/helpers.ts +903 -0
- package/src/index.ts +42 -0
- package/src/memory/context_budget.ts +120 -0
- package/src/memory/index.ts +17 -0
- package/src/memory/memory_manager.ts +168 -0
- package/src/memory/semantic_memory.ts +89 -0
- package/src/memory/strategies/sliding_window.ts +20 -0
- package/src/memory/strategies/summarize.ts +157 -0
- package/src/memory/thread_store.ts +56 -0
- package/src/memory/token_counter.ts +101 -0
- package/src/memory/types.ts +68 -0
- package/src/providers/anthropic_provider.ts +276 -0
- package/src/providers/openai_provider.ts +509 -0
- package/src/providers/openai_responses_provider.ts +319 -0
- package/src/tool.ts +50 -0
- package/src/types.ts +182 -0
- package/src/utils/retry.ts +100 -0
- package/src/utils/schema.ts +27 -0
- package/src/utils/sse_parser.ts +62 -0
- package/src/workflow.ts +180 -0
- package/tsconfig.json +5 -0
package/src/helpers.ts
ADDED
|
@@ -0,0 +1,903 @@
|
|
|
1
|
+
import BrainManager from './brain_manager.ts'
|
|
2
|
+
import { Agent } from './agent.ts'
|
|
3
|
+
import { Workflow } from './workflow.ts'
|
|
4
|
+
import { zodToJsonSchema } from './utils/schema.ts'
|
|
5
|
+
import { MemoryManager } from './memory/memory_manager.ts'
|
|
6
|
+
import { ContextBudget } from './memory/context_budget.ts'
|
|
7
|
+
import type { MemoryConfig, SerializedMemoryThread, Fact } from './memory/types.ts'
|
|
8
|
+
import type { SemanticMemory } from './memory/semantic_memory.ts'
|
|
9
|
+
import type {
|
|
10
|
+
AIProvider,
|
|
11
|
+
CompletionRequest,
|
|
12
|
+
CompletionResponse,
|
|
13
|
+
StreamChunk,
|
|
14
|
+
Message,
|
|
15
|
+
ToolCall,
|
|
16
|
+
ToolCallRecord,
|
|
17
|
+
ToolDefinition,
|
|
18
|
+
AgentResult,
|
|
19
|
+
AgentEvent,
|
|
20
|
+
Usage,
|
|
21
|
+
JsonSchema,
|
|
22
|
+
SerializedThread,
|
|
23
|
+
} from './types.ts'
|
|
24
|
+
|
|
25
|
+
// ── Shared tool executor ─────────────────────────────────────────────────────
|
|
26
|
+
|
|
27
|
+
/** Execute a single tool call, returning the result and the tool message. */
|
|
28
|
+
async function executeTool(
|
|
29
|
+
tools: ToolDefinition[] | undefined,
|
|
30
|
+
toolCall: ToolCall
|
|
31
|
+
): Promise<{ result: unknown; message: Message }> {
|
|
32
|
+
const toolDef = tools?.find(t => t.name === toolCall.name)
|
|
33
|
+
let result: unknown
|
|
34
|
+
|
|
35
|
+
if (!toolDef) {
|
|
36
|
+
result = `Error: Tool "${toolCall.name}" not found`
|
|
37
|
+
} else {
|
|
38
|
+
try {
|
|
39
|
+
result = await toolDef.execute(toolCall.arguments)
|
|
40
|
+
} catch (err) {
|
|
41
|
+
result = `Error: ${err instanceof Error ? err.message : String(err)}`
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
return {
|
|
46
|
+
result,
|
|
47
|
+
message: {
|
|
48
|
+
role: 'tool',
|
|
49
|
+
toolCallId: toolCall.id,
|
|
50
|
+
content: typeof result === 'string' ? result : JSON.stringify(result),
|
|
51
|
+
},
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
// ── Helper Options ───────────────────────────────────────────────────────────
|
|
56
|
+
|
|
57
|
+
export interface ChatOptions {
|
|
58
|
+
provider?: string
|
|
59
|
+
model?: string
|
|
60
|
+
system?: string
|
|
61
|
+
maxTokens?: number
|
|
62
|
+
temperature?: number
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
export interface GenerateOptions<T = any> {
|
|
66
|
+
prompt: string
|
|
67
|
+
schema: any
|
|
68
|
+
provider?: string
|
|
69
|
+
model?: string
|
|
70
|
+
system?: string
|
|
71
|
+
maxTokens?: number
|
|
72
|
+
temperature?: number
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
export interface GenerateResult<T = any> {
|
|
76
|
+
data: T
|
|
77
|
+
text: string
|
|
78
|
+
usage: Usage
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
export interface EmbedOptions {
|
|
82
|
+
provider?: string
|
|
83
|
+
model?: string
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
// ── brain Helper Object ─────────────────────────────────────────────────────
|
|
87
|
+
|
|
88
|
+
export const brain = {
|
|
89
|
+
/**
|
|
90
|
+
* One-shot chat completion. Returns the text response.
|
|
91
|
+
*
|
|
92
|
+
* @example
|
|
93
|
+
* const answer = await brain.chat('What is the capital of France?')
|
|
94
|
+
* const answer = await brain.chat('Explain X', { provider: 'openai', model: 'gpt-4o-mini' })
|
|
95
|
+
*/
|
|
96
|
+
async chat(prompt: string, options: ChatOptions = {}): Promise<string> {
|
|
97
|
+
const config = BrainManager.config
|
|
98
|
+
const providerName = options.provider ?? config.default
|
|
99
|
+
|
|
100
|
+
const response = await BrainManager.complete(providerName, {
|
|
101
|
+
model:
|
|
102
|
+
(options.model ?? BrainManager.provider(providerName).name === 'anthropic')
|
|
103
|
+
? (BrainManager.config.providers[providerName]?.model ?? config.default)
|
|
104
|
+
: (BrainManager.config.providers[providerName]?.model ?? ''),
|
|
105
|
+
messages: [{ role: 'user', content: prompt }],
|
|
106
|
+
system: options.system,
|
|
107
|
+
maxTokens: options.maxTokens ?? config.maxTokens,
|
|
108
|
+
temperature: options.temperature ?? config.temperature,
|
|
109
|
+
})
|
|
110
|
+
|
|
111
|
+
return response.content
|
|
112
|
+
},
|
|
113
|
+
|
|
114
|
+
/**
|
|
115
|
+
* One-shot streaming completion.
|
|
116
|
+
*
|
|
117
|
+
* @example
|
|
118
|
+
* for await (const chunk of brain.stream('Write a poem')) {
|
|
119
|
+
* if (chunk.type === 'text') process.stdout.write(chunk.text!)
|
|
120
|
+
* }
|
|
121
|
+
*/
|
|
122
|
+
async *stream(prompt: string, options: ChatOptions = {}): AsyncIterable<StreamChunk> {
|
|
123
|
+
const config = BrainManager.config
|
|
124
|
+
const providerName = options.provider ?? config.default
|
|
125
|
+
const provider = BrainManager.provider(providerName)
|
|
126
|
+
const providerConfig = config.providers[providerName]
|
|
127
|
+
|
|
128
|
+
yield* provider.stream({
|
|
129
|
+
model: options.model ?? providerConfig?.model ?? '',
|
|
130
|
+
messages: [{ role: 'user', content: prompt }],
|
|
131
|
+
system: options.system,
|
|
132
|
+
maxTokens: options.maxTokens ?? config.maxTokens,
|
|
133
|
+
temperature: options.temperature ?? config.temperature,
|
|
134
|
+
})
|
|
135
|
+
},
|
|
136
|
+
|
|
137
|
+
/**
|
|
138
|
+
* Structured output completion. Returns typed data validated against the schema.
|
|
139
|
+
*
|
|
140
|
+
* @example
|
|
141
|
+
* const { data } = await brain.generate({
|
|
142
|
+
* prompt: 'Extract: "John is 30"',
|
|
143
|
+
* schema: z.object({ name: z.string(), age: z.number() }),
|
|
144
|
+
* })
|
|
145
|
+
* // data.name === 'John', data.age === 30
|
|
146
|
+
*/
|
|
147
|
+
async generate<T>(options: GenerateOptions<T>): Promise<GenerateResult<T>> {
|
|
148
|
+
const config = BrainManager.config
|
|
149
|
+
const providerName = options.provider ?? config.default
|
|
150
|
+
const providerConfig = config.providers[providerName]
|
|
151
|
+
const jsonSchema = zodToJsonSchema(options.schema)
|
|
152
|
+
|
|
153
|
+
const response = await BrainManager.complete(providerName, {
|
|
154
|
+
model: options.model ?? providerConfig?.model ?? '',
|
|
155
|
+
messages: [{ role: 'user', content: options.prompt }],
|
|
156
|
+
system: options.system,
|
|
157
|
+
schema: jsonSchema,
|
|
158
|
+
maxTokens: options.maxTokens ?? config.maxTokens,
|
|
159
|
+
temperature: options.temperature ?? config.temperature,
|
|
160
|
+
})
|
|
161
|
+
|
|
162
|
+
const parsed = JSON.parse(response.content)
|
|
163
|
+
const data = options.schema?.parse ? options.schema.parse(parsed) : parsed
|
|
164
|
+
|
|
165
|
+
return {
|
|
166
|
+
data,
|
|
167
|
+
text: response.content,
|
|
168
|
+
usage: response.usage,
|
|
169
|
+
}
|
|
170
|
+
},
|
|
171
|
+
|
|
172
|
+
/**
|
|
173
|
+
* Generate embeddings for the given text(s).
|
|
174
|
+
*
|
|
175
|
+
* @example
|
|
176
|
+
* const vectors = await brain.embed('Hello world', { provider: 'openai' })
|
|
177
|
+
*/
|
|
178
|
+
async embed(input: string | string[], options: EmbedOptions = {}): Promise<number[][]> {
|
|
179
|
+
const providerName = options.provider ?? BrainManager.config.default
|
|
180
|
+
const provider = BrainManager.provider(providerName)
|
|
181
|
+
|
|
182
|
+
if (!provider.embed) {
|
|
183
|
+
throw new Error(`Provider "${providerName}" does not support embeddings.`)
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
const result = await provider.embed(input, options.model)
|
|
187
|
+
return result.embeddings
|
|
188
|
+
},
|
|
189
|
+
|
|
190
|
+
/** Create a fluent agent runner. */
|
|
191
|
+
agent<T extends Agent>(AgentClass: new () => T): AgentRunner<T> {
|
|
192
|
+
return new AgentRunner(AgentClass)
|
|
193
|
+
},
|
|
194
|
+
|
|
195
|
+
/** Create a multi-turn conversation thread. */
|
|
196
|
+
thread(AgentClass?: new () => Agent): Thread {
|
|
197
|
+
return new Thread(AgentClass)
|
|
198
|
+
},
|
|
199
|
+
|
|
200
|
+
/** Create a multi-agent workflow. */
|
|
201
|
+
workflow(name: string): Workflow {
|
|
202
|
+
return new Workflow(name)
|
|
203
|
+
},
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
// ── AgentRunner ──────────────────────────────────────────────────────────────
|
|
207
|
+
|
|
208
|
+
/**
|
|
209
|
+
* Fluent builder for running an agent. Handles the tool-use loop,
|
|
210
|
+
* structured output parsing, and lifecycle hooks.
|
|
211
|
+
*
|
|
212
|
+
* @example
|
|
213
|
+
* const result = await brain.agent(SupportAgent)
|
|
214
|
+
* .input('Where is my order #12345?')
|
|
215
|
+
* .with({ orderId: '12345' })
|
|
216
|
+
* .run()
|
|
217
|
+
*/
|
|
218
|
+
export class AgentRunner<T extends Agent = Agent> {
|
|
219
|
+
private _input = ''
|
|
220
|
+
private _context: Record<string, unknown> = {}
|
|
221
|
+
private _provider?: string
|
|
222
|
+
private _model?: string
|
|
223
|
+
private _tools?: ToolDefinition[]
|
|
224
|
+
|
|
225
|
+
constructor(private AgentClass: new () => T) {}
|
|
226
|
+
|
|
227
|
+
/** Set the user input / prompt for the agent. */
|
|
228
|
+
input(text: string): this {
|
|
229
|
+
this._input = text
|
|
230
|
+
return this
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
/** Add context variables. Available as `{{key}}` in agent instructions. */
|
|
234
|
+
with(context: Record<string, unknown>): this {
|
|
235
|
+
Object.assign(this._context, context)
|
|
236
|
+
return this
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
/** Override the provider (and optionally model) for this run. */
|
|
240
|
+
using(provider: string, model?: string): this {
|
|
241
|
+
this._provider = provider
|
|
242
|
+
if (model) this._model = model
|
|
243
|
+
return this
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
/** Set or override the tools available to the agent for this run. */
|
|
247
|
+
tools(tools: ToolDefinition[]): this {
|
|
248
|
+
this._tools = tools
|
|
249
|
+
return this
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
/** Run the agent to completion. */
|
|
253
|
+
async run(): Promise<AgentResult> {
|
|
254
|
+
const agent = new this.AgentClass()
|
|
255
|
+
const config = BrainManager.config
|
|
256
|
+
|
|
257
|
+
// Runner-level tools override agent-level tools
|
|
258
|
+
if (this._tools) {
|
|
259
|
+
agent.tools = this._tools
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
const providerName = this._provider ?? agent.provider ?? config.default
|
|
263
|
+
const providerConfig = config.providers[providerName]
|
|
264
|
+
const model = this._model ?? agent.model ?? providerConfig?.model ?? ''
|
|
265
|
+
const maxIterations = agent.maxIterations ?? config.maxIterations
|
|
266
|
+
const maxTokens = agent.maxTokens ?? config.maxTokens
|
|
267
|
+
const temperature = agent.temperature ?? config.temperature
|
|
268
|
+
|
|
269
|
+
try {
|
|
270
|
+
await agent.onStart?.(this._input, this._context)
|
|
271
|
+
} catch (err) {
|
|
272
|
+
await agent.onError?.(err instanceof Error ? err : new Error(String(err)))
|
|
273
|
+
throw err
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
// Build system prompt with context interpolation
|
|
277
|
+
let system: string | undefined = agent.instructions || undefined
|
|
278
|
+
if (system) {
|
|
279
|
+
for (const [key, value] of Object.entries(this._context)) {
|
|
280
|
+
system = system.replaceAll(`{{${key}}}`, String(value))
|
|
281
|
+
}
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
// Prepare structured output schema
|
|
285
|
+
let schema: JsonSchema | undefined
|
|
286
|
+
if (agent.output) {
|
|
287
|
+
schema = zodToJsonSchema(agent.output)
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
const messages: Message[] = [{ role: 'user', content: this._input }]
|
|
291
|
+
const allToolCalls: ToolCallRecord[] = []
|
|
292
|
+
const totalUsage: Usage = { inputTokens: 0, outputTokens: 0, totalTokens: 0 }
|
|
293
|
+
let iterations = 0
|
|
294
|
+
|
|
295
|
+
// Tool loop
|
|
296
|
+
while (iterations < maxIterations) {
|
|
297
|
+
iterations++
|
|
298
|
+
|
|
299
|
+
const request: CompletionRequest = {
|
|
300
|
+
model,
|
|
301
|
+
messages: [...messages],
|
|
302
|
+
system,
|
|
303
|
+
maxTokens,
|
|
304
|
+
temperature,
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
// Only send tools if the agent has them
|
|
308
|
+
if (agent.tools?.length) {
|
|
309
|
+
request.tools = agent.tools
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
// Only send schema when we're not mid-tool-loop (avoid conflicting constraints)
|
|
313
|
+
if (schema && (!agent.tools?.length || iterations > 1)) {
|
|
314
|
+
request.schema = schema
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
let response: CompletionResponse
|
|
318
|
+
try {
|
|
319
|
+
response = await BrainManager.complete(providerName, request)
|
|
320
|
+
} catch (err) {
|
|
321
|
+
await agent.onError?.(err instanceof Error ? err : new Error(String(err)))
|
|
322
|
+
throw err
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
// Accumulate usage
|
|
326
|
+
totalUsage.inputTokens += response.usage.inputTokens
|
|
327
|
+
totalUsage.outputTokens += response.usage.outputTokens
|
|
328
|
+
totalUsage.totalTokens += response.usage.totalTokens
|
|
329
|
+
|
|
330
|
+
// Append assistant message
|
|
331
|
+
messages.push({
|
|
332
|
+
role: 'assistant',
|
|
333
|
+
content: response.content,
|
|
334
|
+
toolCalls: response.toolCalls.length > 0 ? response.toolCalls : undefined,
|
|
335
|
+
})
|
|
336
|
+
|
|
337
|
+
// If no tool calls, we're done
|
|
338
|
+
if (response.stopReason !== 'tool_use' || response.toolCalls.length === 0) {
|
|
339
|
+
let data: any = response.content
|
|
340
|
+
if (agent.output && response.content) {
|
|
341
|
+
try {
|
|
342
|
+
const parsed = JSON.parse(response.content)
|
|
343
|
+
data = agent.output.parse ? agent.output.parse(parsed) : parsed
|
|
344
|
+
} catch {
|
|
345
|
+
data = response.content
|
|
346
|
+
}
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
const result: AgentResult = {
|
|
350
|
+
data,
|
|
351
|
+
text: response.content,
|
|
352
|
+
toolCalls: allToolCalls,
|
|
353
|
+
messages,
|
|
354
|
+
usage: totalUsage,
|
|
355
|
+
iterations,
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
await agent.onComplete?.(result)
|
|
359
|
+
return result
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
// Execute tool calls
|
|
363
|
+
await this.executeTools(agent, response.toolCalls, messages, allToolCalls)
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
// Max iterations reached — return what we have
|
|
367
|
+
const lastAssistant = [...messages].reverse().find(m => m.role === 'assistant')
|
|
368
|
+
const text = typeof lastAssistant?.content === 'string' ? lastAssistant.content : ''
|
|
369
|
+
|
|
370
|
+
const result: AgentResult = {
|
|
371
|
+
data: null,
|
|
372
|
+
text,
|
|
373
|
+
toolCalls: allToolCalls,
|
|
374
|
+
messages,
|
|
375
|
+
usage: totalUsage,
|
|
376
|
+
iterations,
|
|
377
|
+
}
|
|
378
|
+
|
|
379
|
+
await agent.onComplete?.(result)
|
|
380
|
+
return result
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
/** Run the agent with streaming, yielding events for each text chunk and tool execution. */
|
|
384
|
+
async *stream(): AsyncIterable<AgentEvent> {
|
|
385
|
+
const agent = new this.AgentClass()
|
|
386
|
+
const config = BrainManager.config
|
|
387
|
+
|
|
388
|
+
// Runner-level tools override agent-level tools
|
|
389
|
+
if (this._tools) {
|
|
390
|
+
agent.tools = this._tools
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
const providerName = this._provider ?? agent.provider ?? config.default
|
|
394
|
+
const providerConfig = config.providers[providerName]
|
|
395
|
+
const model = this._model ?? agent.model ?? providerConfig?.model ?? ''
|
|
396
|
+
const maxIterations = agent.maxIterations ?? config.maxIterations
|
|
397
|
+
const maxTokens = agent.maxTokens ?? config.maxTokens
|
|
398
|
+
const temperature = agent.temperature ?? config.temperature
|
|
399
|
+
const provider = BrainManager.provider(providerName)
|
|
400
|
+
|
|
401
|
+
await agent.onStart?.(this._input, this._context)
|
|
402
|
+
|
|
403
|
+
let system: string | undefined = agent.instructions || undefined
|
|
404
|
+
if (system) {
|
|
405
|
+
for (const [key, value] of Object.entries(this._context)) {
|
|
406
|
+
system = system.replaceAll(`{{${key}}}`, String(value))
|
|
407
|
+
}
|
|
408
|
+
}
|
|
409
|
+
|
|
410
|
+
let schema: JsonSchema | undefined
|
|
411
|
+
if (agent.output) {
|
|
412
|
+
schema = zodToJsonSchema(agent.output)
|
|
413
|
+
}
|
|
414
|
+
|
|
415
|
+
const messages: Message[] = [{ role: 'user', content: this._input }]
|
|
416
|
+
const allToolCalls: ToolCallRecord[] = []
|
|
417
|
+
const totalUsage: Usage = { inputTokens: 0, outputTokens: 0, totalTokens: 0 }
|
|
418
|
+
let iterations = 0
|
|
419
|
+
|
|
420
|
+
while (iterations < maxIterations) {
|
|
421
|
+
iterations++
|
|
422
|
+
|
|
423
|
+
if (iterations > 1) {
|
|
424
|
+
yield { type: 'iteration', iteration: iterations }
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
const request: CompletionRequest = {
|
|
428
|
+
model,
|
|
429
|
+
messages: [...messages],
|
|
430
|
+
system,
|
|
431
|
+
maxTokens,
|
|
432
|
+
temperature,
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
if (agent.tools?.length) request.tools = agent.tools
|
|
436
|
+
if (schema && (!agent.tools?.length || iterations > 1)) request.schema = schema
|
|
437
|
+
|
|
438
|
+
// Stream the response
|
|
439
|
+
let fullText = ''
|
|
440
|
+
const pendingToolCalls: Map<number, { id: string; name: string; args: string }> = new Map()
|
|
441
|
+
|
|
442
|
+
for await (const chunk of provider.stream(request)) {
|
|
443
|
+
if (chunk.type === 'text' && chunk.text) {
|
|
444
|
+
fullText += chunk.text
|
|
445
|
+
yield { type: 'text', text: chunk.text }
|
|
446
|
+
} else if (chunk.type === 'tool_start' && chunk.toolCall) {
|
|
447
|
+
pendingToolCalls.set(chunk.toolIndex ?? 0, {
|
|
448
|
+
id: chunk.toolCall.id ?? '',
|
|
449
|
+
name: chunk.toolCall.name ?? '',
|
|
450
|
+
args: '',
|
|
451
|
+
})
|
|
452
|
+
} else if (chunk.type === 'tool_delta' && chunk.text) {
|
|
453
|
+
const pending = pendingToolCalls.get(chunk.toolIndex ?? 0)
|
|
454
|
+
if (pending) pending.args += chunk.text
|
|
455
|
+
} else if (chunk.type === 'usage' && chunk.usage) {
|
|
456
|
+
totalUsage.inputTokens += chunk.usage.inputTokens
|
|
457
|
+
totalUsage.outputTokens += chunk.usage.outputTokens
|
|
458
|
+
totalUsage.totalTokens += chunk.usage.totalTokens
|
|
459
|
+
}
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
// Build tool calls from accumulated stream data
|
|
463
|
+
const toolCalls: ToolCall[] = []
|
|
464
|
+
for (const [, pending] of pendingToolCalls) {
|
|
465
|
+
let args: Record<string, unknown> = {}
|
|
466
|
+
try {
|
|
467
|
+
args = JSON.parse(pending.args)
|
|
468
|
+
} catch {
|
|
469
|
+
args = pending.args ? { _raw: pending.args } : {}
|
|
470
|
+
}
|
|
471
|
+
toolCalls.push({ id: pending.id, name: pending.name, arguments: args })
|
|
472
|
+
}
|
|
473
|
+
|
|
474
|
+
// Append assistant message
|
|
475
|
+
messages.push({
|
|
476
|
+
role: 'assistant',
|
|
477
|
+
content: fullText,
|
|
478
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
479
|
+
})
|
|
480
|
+
|
|
481
|
+
// If no tool calls, done
|
|
482
|
+
if (toolCalls.length === 0) {
|
|
483
|
+
let data: any = fullText
|
|
484
|
+
if (agent.output && fullText) {
|
|
485
|
+
try {
|
|
486
|
+
const parsed = JSON.parse(fullText)
|
|
487
|
+
data = agent.output.parse ? agent.output.parse(parsed) : parsed
|
|
488
|
+
} catch {
|
|
489
|
+
data = fullText
|
|
490
|
+
}
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
const result: AgentResult = {
|
|
494
|
+
data,
|
|
495
|
+
text: fullText,
|
|
496
|
+
toolCalls: allToolCalls,
|
|
497
|
+
messages,
|
|
498
|
+
usage: totalUsage,
|
|
499
|
+
iterations,
|
|
500
|
+
}
|
|
501
|
+
|
|
502
|
+
await agent.onComplete?.(result)
|
|
503
|
+
yield { type: 'done', result }
|
|
504
|
+
return
|
|
505
|
+
}
|
|
506
|
+
|
|
507
|
+
// Execute tools and yield events
|
|
508
|
+
for (const toolCall of toolCalls) {
|
|
509
|
+
await agent.onToolCall?.(toolCall)
|
|
510
|
+
|
|
511
|
+
const start = performance.now()
|
|
512
|
+
const { result: toolResult, message } = await executeTool(agent.tools, toolCall)
|
|
513
|
+
const duration = performance.now() - start
|
|
514
|
+
|
|
515
|
+
const record: ToolCallRecord = {
|
|
516
|
+
name: toolCall.name,
|
|
517
|
+
arguments: toolCall.arguments,
|
|
518
|
+
result: toolResult,
|
|
519
|
+
duration,
|
|
520
|
+
}
|
|
521
|
+
allToolCalls.push(record)
|
|
522
|
+
await agent.onToolResult?.(record)
|
|
523
|
+
|
|
524
|
+
yield { type: 'tool_result', toolCall: record }
|
|
525
|
+
|
|
526
|
+
messages.push(message)
|
|
527
|
+
}
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
// Max iterations
|
|
531
|
+
const lastAssistant = [...messages].reverse().find(m => m.role === 'assistant')
|
|
532
|
+
const text = typeof lastAssistant?.content === 'string' ? lastAssistant.content : ''
|
|
533
|
+
|
|
534
|
+
const result: AgentResult = {
|
|
535
|
+
data: null,
|
|
536
|
+
text,
|
|
537
|
+
toolCalls: allToolCalls,
|
|
538
|
+
messages,
|
|
539
|
+
usage: totalUsage,
|
|
540
|
+
iterations,
|
|
541
|
+
}
|
|
542
|
+
|
|
543
|
+
await agent.onComplete?.(result)
|
|
544
|
+
yield { type: 'done', result }
|
|
545
|
+
}
|
|
546
|
+
|
|
547
|
+
// ── Private ────────────────────────────────────────────────────────────────
|
|
548
|
+
|
|
549
|
+
private async executeTools(
|
|
550
|
+
agent: Agent,
|
|
551
|
+
toolCalls: ToolCall[],
|
|
552
|
+
messages: Message[],
|
|
553
|
+
allToolCalls: ToolCallRecord[]
|
|
554
|
+
): Promise<void> {
|
|
555
|
+
for (const toolCall of toolCalls) {
|
|
556
|
+
await agent.onToolCall?.(toolCall)
|
|
557
|
+
|
|
558
|
+
const start = performance.now()
|
|
559
|
+
const { result, message } = await executeTool(agent.tools, toolCall)
|
|
560
|
+
const duration = performance.now() - start
|
|
561
|
+
|
|
562
|
+
const record: ToolCallRecord = {
|
|
563
|
+
name: toolCall.name,
|
|
564
|
+
arguments: toolCall.arguments,
|
|
565
|
+
result,
|
|
566
|
+
duration,
|
|
567
|
+
}
|
|
568
|
+
allToolCalls.push(record)
|
|
569
|
+
await agent.onToolResult?.(record)
|
|
570
|
+
|
|
571
|
+
messages.push(message)
|
|
572
|
+
}
|
|
573
|
+
}
|
|
574
|
+
}
|
|
575
|
+
|
|
576
|
+
// ── Thread ────────────────────────────────────────────────────────────────────
|
|
577
|
+
|
|
578
|
+
/**
|
|
579
|
+
* Multi-turn conversation thread with optional agent configuration.
|
|
580
|
+
*
|
|
581
|
+
* @example
|
|
582
|
+
* const thread = brain.thread()
|
|
583
|
+
* thread.system('You are a helpful assistant.')
|
|
584
|
+
* const r1 = await thread.send('My name is Alice')
|
|
585
|
+
* const r2 = await thread.send('What is my name?')
|
|
586
|
+
*
|
|
587
|
+
* // With agent
|
|
588
|
+
* const thread = brain.thread(SupportAgent)
|
|
589
|
+
* const r1 = await thread.send('I need help with order #123')
|
|
590
|
+
*/
|
|
591
|
+
export class Thread {
|
|
592
|
+
private messages: Message[] = []
|
|
593
|
+
private _provider?: string
|
|
594
|
+
private _model?: string
|
|
595
|
+
private _system?: string
|
|
596
|
+
private _tools?: ToolDefinition[]
|
|
597
|
+
private _maxTokens?: number
|
|
598
|
+
private _temperature?: number
|
|
599
|
+
private _memoryManager?: MemoryManager
|
|
600
|
+
private _id?: string
|
|
601
|
+
private _autoPersist = false
|
|
602
|
+
|
|
603
|
+
constructor(AgentClass?: new () => Agent) {
|
|
604
|
+
if (AgentClass) {
|
|
605
|
+
const agent = new AgentClass()
|
|
606
|
+
this._provider = agent.provider
|
|
607
|
+
this._model = agent.model
|
|
608
|
+
this._system = agent.instructions || undefined
|
|
609
|
+
this._tools = agent.tools
|
|
610
|
+
this._maxTokens = agent.maxTokens
|
|
611
|
+
this._temperature = agent.temperature
|
|
612
|
+
}
|
|
613
|
+
}
|
|
614
|
+
|
|
615
|
+
/** Set or override the system prompt. */
|
|
616
|
+
system(prompt: string): this {
|
|
617
|
+
this._system = prompt
|
|
618
|
+
return this
|
|
619
|
+
}
|
|
620
|
+
|
|
621
|
+
/** Override the provider (and optionally model). */
|
|
622
|
+
using(provider: string, model?: string): this {
|
|
623
|
+
this._provider = provider
|
|
624
|
+
if (model) this._model = model
|
|
625
|
+
return this
|
|
626
|
+
}
|
|
627
|
+
|
|
628
|
+
/** Set tools available in this thread. */
|
|
629
|
+
tools(tools: ToolDefinition[]): this {
|
|
630
|
+
this._tools = tools
|
|
631
|
+
return this
|
|
632
|
+
}
|
|
633
|
+
|
|
634
|
+
/**
|
|
635
|
+
* Enable memory management with optional config overrides.
|
|
636
|
+
*
|
|
637
|
+
* When enabled, the thread automatically:
|
|
638
|
+
* - Tracks token usage against the context window budget
|
|
639
|
+
* - Compacts older messages into summaries when approaching the limit
|
|
640
|
+
* - Extracts and injects semantic facts into the system prompt
|
|
641
|
+
*
|
|
642
|
+
* Memory is opt-in — without calling `.memory()`, Thread behaves
|
|
643
|
+
* exactly as before (sends full messages array every turn).
|
|
644
|
+
*/
|
|
645
|
+
memory(config?: Partial<MemoryConfig>): this {
|
|
646
|
+
const memConfig: MemoryConfig = { ...BrainManager.memoryConfig, ...config }
|
|
647
|
+
const providerName = this._provider ?? BrainManager.config.default
|
|
648
|
+
const providerConfig = BrainManager.config.providers[providerName]
|
|
649
|
+
const model = this._model ?? providerConfig?.model ?? ''
|
|
650
|
+
const budget = new ContextBudget(memConfig, model)
|
|
651
|
+
this._memoryManager = new MemoryManager(memConfig, budget)
|
|
652
|
+
return this
|
|
653
|
+
}
|
|
654
|
+
|
|
655
|
+
/** Set a thread ID (required for persistence). */
|
|
656
|
+
id(threadId: string): this {
|
|
657
|
+
this._id = threadId
|
|
658
|
+
return this
|
|
659
|
+
}
|
|
660
|
+
|
|
661
|
+
/** Enable auto-persistence to the configured ThreadStore after each send(). */
|
|
662
|
+
persist(auto = true): this {
|
|
663
|
+
this._autoPersist = auto
|
|
664
|
+
return this
|
|
665
|
+
}
|
|
666
|
+
|
|
667
|
+
/** Access the semantic memory (facts), if memory management is enabled. */
|
|
668
|
+
get facts(): SemanticMemory | undefined {
|
|
669
|
+
return this._memoryManager?.facts
|
|
670
|
+
}
|
|
671
|
+
|
|
672
|
+
/** Get the current episodic summary, if memory management is enabled. */
|
|
673
|
+
get episodicSummary(): string | undefined {
|
|
674
|
+
return this._memoryManager?.episodicSummary
|
|
675
|
+
}
|
|
676
|
+
|
|
677
|
+
/** Send a message and get the assistant's response. Handles tool calls automatically. */
|
|
678
|
+
async send(message: string): Promise<string> {
|
|
679
|
+
const config = BrainManager.config
|
|
680
|
+
const providerName = this._provider ?? config.default
|
|
681
|
+
const providerConfig = config.providers[providerName]
|
|
682
|
+
const model = this._model ?? providerConfig?.model ?? ''
|
|
683
|
+
|
|
684
|
+
this.messages.push({ role: 'user', content: message })
|
|
685
|
+
|
|
686
|
+
const maxIterations = 10
|
|
687
|
+
let iterations = 0
|
|
688
|
+
|
|
689
|
+
while (iterations < maxIterations) {
|
|
690
|
+
iterations++
|
|
691
|
+
|
|
692
|
+
let contextSystem = this._system
|
|
693
|
+
let contextMessages = [...this.messages]
|
|
694
|
+
|
|
695
|
+
// Memory management: prepare context within budget
|
|
696
|
+
if (this._memoryManager) {
|
|
697
|
+
const prepared = await this._memoryManager.prepareContext(this._system, this.messages, {
|
|
698
|
+
provider: providerName,
|
|
699
|
+
model,
|
|
700
|
+
})
|
|
701
|
+
contextSystem = prepared.system
|
|
702
|
+
contextMessages = prepared.messages
|
|
703
|
+
}
|
|
704
|
+
|
|
705
|
+
const request: CompletionRequest = {
|
|
706
|
+
model,
|
|
707
|
+
messages: contextMessages,
|
|
708
|
+
system: contextSystem,
|
|
709
|
+
maxTokens: this._maxTokens ?? config.maxTokens,
|
|
710
|
+
temperature: this._temperature ?? config.temperature,
|
|
711
|
+
}
|
|
712
|
+
|
|
713
|
+
if (this._tools?.length) request.tools = this._tools
|
|
714
|
+
|
|
715
|
+
const response = await BrainManager.complete(providerName, request)
|
|
716
|
+
|
|
717
|
+
this.messages.push({
|
|
718
|
+
role: 'assistant',
|
|
719
|
+
content: response.content,
|
|
720
|
+
toolCalls: response.toolCalls.length > 0 ? response.toolCalls : undefined,
|
|
721
|
+
})
|
|
722
|
+
|
|
723
|
+
// If no tool calls, return
|
|
724
|
+
if (response.stopReason !== 'tool_use' || response.toolCalls.length === 0) {
|
|
725
|
+
await this.autoPersist()
|
|
726
|
+
return response.content
|
|
727
|
+
}
|
|
728
|
+
|
|
729
|
+
// Execute tools
|
|
730
|
+
for (const toolCall of response.toolCalls) {
|
|
731
|
+
const { message: toolMessage } = await executeTool(this._tools, toolCall)
|
|
732
|
+
this.messages.push(toolMessage)
|
|
733
|
+
}
|
|
734
|
+
}
|
|
735
|
+
|
|
736
|
+
// Return last assistant content
|
|
737
|
+
const last = [...this.messages].reverse().find(m => m.role === 'assistant')
|
|
738
|
+
await this.autoPersist()
|
|
739
|
+
return typeof last?.content === 'string' ? last.content : ''
|
|
740
|
+
}
|
|
741
|
+
|
|
742
|
+
/** Stream a message response. Handles tool calls automatically for multi-turn. */
|
|
743
|
+
async *stream(message: string): AsyncIterable<StreamChunk> {
|
|
744
|
+
const config = BrainManager.config
|
|
745
|
+
const providerName = this._provider ?? config.default
|
|
746
|
+
const providerConfig = config.providers[providerName]
|
|
747
|
+
const model = this._model ?? providerConfig?.model ?? ''
|
|
748
|
+
const provider = BrainManager.provider(providerName)
|
|
749
|
+
|
|
750
|
+
this.messages.push({ role: 'user', content: message })
|
|
751
|
+
|
|
752
|
+
const maxIterations = 10
|
|
753
|
+
let iterations = 0
|
|
754
|
+
|
|
755
|
+
while (iterations < maxIterations) {
|
|
756
|
+
iterations++
|
|
757
|
+
|
|
758
|
+
let contextSystem = this._system
|
|
759
|
+
let contextMessages = [...this.messages]
|
|
760
|
+
|
|
761
|
+
// Memory management: prepare context within budget
|
|
762
|
+
if (this._memoryManager) {
|
|
763
|
+
const prepared = await this._memoryManager.prepareContext(this._system, this.messages, {
|
|
764
|
+
provider: providerName,
|
|
765
|
+
model,
|
|
766
|
+
})
|
|
767
|
+
contextSystem = prepared.system
|
|
768
|
+
contextMessages = prepared.messages
|
|
769
|
+
}
|
|
770
|
+
|
|
771
|
+
let fullText = ''
|
|
772
|
+
const pendingToolCalls: Map<number, { id: string; name: string; args: string }> = new Map()
|
|
773
|
+
|
|
774
|
+
for await (const chunk of provider.stream({
|
|
775
|
+
model,
|
|
776
|
+
messages: contextMessages,
|
|
777
|
+
system: contextSystem,
|
|
778
|
+
tools: this._tools,
|
|
779
|
+
maxTokens: this._maxTokens ?? config.maxTokens,
|
|
780
|
+
temperature: this._temperature ?? config.temperature,
|
|
781
|
+
})) {
|
|
782
|
+
yield chunk
|
|
783
|
+
|
|
784
|
+
if (chunk.type === 'text' && chunk.text) {
|
|
785
|
+
fullText += chunk.text
|
|
786
|
+
} else if (chunk.type === 'tool_start' && chunk.toolCall) {
|
|
787
|
+
pendingToolCalls.set(chunk.toolIndex ?? 0, {
|
|
788
|
+
id: chunk.toolCall.id ?? '',
|
|
789
|
+
name: chunk.toolCall.name ?? '',
|
|
790
|
+
args: '',
|
|
791
|
+
})
|
|
792
|
+
} else if (chunk.type === 'tool_delta' && chunk.text) {
|
|
793
|
+
const pending = pendingToolCalls.get(chunk.toolIndex ?? 0)
|
|
794
|
+
if (pending) pending.args += chunk.text
|
|
795
|
+
}
|
|
796
|
+
}
|
|
797
|
+
|
|
798
|
+
// Build tool calls from accumulated stream data
|
|
799
|
+
const toolCalls: ToolCall[] = []
|
|
800
|
+
for (const [, pending] of pendingToolCalls) {
|
|
801
|
+
let args: Record<string, unknown> = {}
|
|
802
|
+
try {
|
|
803
|
+
args = JSON.parse(pending.args)
|
|
804
|
+
} catch {
|
|
805
|
+
args = pending.args ? { _raw: pending.args } : {}
|
|
806
|
+
}
|
|
807
|
+
toolCalls.push({ id: pending.id, name: pending.name, arguments: args })
|
|
808
|
+
}
|
|
809
|
+
|
|
810
|
+
// Append assistant message
|
|
811
|
+
this.messages.push({
|
|
812
|
+
role: 'assistant',
|
|
813
|
+
content: fullText,
|
|
814
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
815
|
+
})
|
|
816
|
+
|
|
817
|
+
// No tool calls — done
|
|
818
|
+
if (toolCalls.length === 0) {
|
|
819
|
+
await this.autoPersist()
|
|
820
|
+
return
|
|
821
|
+
}
|
|
822
|
+
|
|
823
|
+
// Execute tools
|
|
824
|
+
for (const toolCall of toolCalls) {
|
|
825
|
+
const { message: toolMessage } = await executeTool(this._tools, toolCall)
|
|
826
|
+
this.messages.push(toolMessage)
|
|
827
|
+
}
|
|
828
|
+
}
|
|
829
|
+
|
|
830
|
+
await this.autoPersist()
|
|
831
|
+
}
|
|
832
|
+
|
|
833
|
+
/** Get a copy of all messages in this thread. */
|
|
834
|
+
getMessages(): Message[] {
|
|
835
|
+
return [...this.messages]
|
|
836
|
+
}
|
|
837
|
+
|
|
838
|
+
/** Serialize the thread for persistence (session, database, cache). */
|
|
839
|
+
serialize(): SerializedThread {
|
|
840
|
+
return {
|
|
841
|
+
messages: [...this.messages],
|
|
842
|
+
system: this._system,
|
|
843
|
+
}
|
|
844
|
+
}
|
|
845
|
+
|
|
846
|
+
/** Restore a previously serialized thread. */
|
|
847
|
+
restore(data: SerializedThread): this {
|
|
848
|
+
this.messages = [...data.messages]
|
|
849
|
+
this._system = data.system
|
|
850
|
+
return this
|
|
851
|
+
}
|
|
852
|
+
|
|
853
|
+
/**
|
|
854
|
+
* Extended serialization that includes memory state (summary, facts).
|
|
855
|
+
* Use this instead of serialize() when memory management is enabled.
|
|
856
|
+
*/
|
|
857
|
+
serializeMemory(): SerializedMemoryThread {
|
|
858
|
+
const memState = this._memoryManager?.serialize()
|
|
859
|
+
return {
|
|
860
|
+
id: this._id ?? crypto.randomUUID(),
|
|
861
|
+
messages: [...this.messages],
|
|
862
|
+
system: this._system,
|
|
863
|
+
summary: memState?.summary,
|
|
864
|
+
facts: memState?.facts,
|
|
865
|
+
updatedAt: new Date().toISOString(),
|
|
866
|
+
createdAt: new Date().toISOString(),
|
|
867
|
+
}
|
|
868
|
+
}
|
|
869
|
+
|
|
870
|
+
/**
|
|
871
|
+
* Restore from extended serialization that includes memory state.
|
|
872
|
+
* Use this instead of restore() when memory management is enabled.
|
|
873
|
+
*/
|
|
874
|
+
restoreMemory(data: SerializedMemoryThread): this {
|
|
875
|
+
this.messages = [...data.messages]
|
|
876
|
+
this._system = data.system
|
|
877
|
+
this._id = data.id
|
|
878
|
+
|
|
879
|
+
if (this._memoryManager && (data.summary || data.facts)) {
|
|
880
|
+
this._memoryManager.restore({
|
|
881
|
+
summary: data.summary,
|
|
882
|
+
facts: data.facts,
|
|
883
|
+
})
|
|
884
|
+
}
|
|
885
|
+
|
|
886
|
+
return this
|
|
887
|
+
}
|
|
888
|
+
|
|
889
|
+
/** Clear all messages and memory state from the thread. */
|
|
890
|
+
clear(): this {
|
|
891
|
+
this.messages = []
|
|
892
|
+
return this
|
|
893
|
+
}
|
|
894
|
+
|
|
895
|
+
// ── Private ────────────────────────────────────────────────────────────────
|
|
896
|
+
|
|
897
|
+
/** Persist the thread if auto-persist is enabled and a store is configured. */
|
|
898
|
+
private async autoPersist(): Promise<void> {
|
|
899
|
+
if (this._autoPersist && this._id && BrainManager.threadStore) {
|
|
900
|
+
await BrainManager.threadStore.save(this.serializeMemory())
|
|
901
|
+
}
|
|
902
|
+
}
|
|
903
|
+
}
|