@stravigor/saina 0.4.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +7 -0
- package/README.md +82 -0
- package/package.json +25 -0
- package/src/agent.ts +73 -0
- package/src/helpers.ts +756 -0
- package/src/index.ts +38 -0
- package/src/providers/anthropic_provider.ts +278 -0
- package/src/providers/openai_provider.ts +351 -0
- package/src/saina_manager.ts +116 -0
- package/src/saina_provider.ts +16 -0
- package/src/tool.ts +50 -0
- package/src/types.ts +179 -0
- package/src/utils/schema.ts +27 -0
- package/src/utils/sse_parser.ts +62 -0
- package/src/workflow.ts +180 -0
- package/tsconfig.json +4 -0
package/src/helpers.ts
ADDED
|
@@ -0,0 +1,756 @@
|
|
|
1
|
+
import SainaManager from './saina_manager.ts'
|
|
2
|
+
import { Agent } from './agent.ts'
|
|
3
|
+
import { Workflow } from './workflow.ts'
|
|
4
|
+
import { zodToJsonSchema } from './utils/schema.ts'
|
|
5
|
+
import type {
|
|
6
|
+
AIProvider,
|
|
7
|
+
CompletionRequest,
|
|
8
|
+
CompletionResponse,
|
|
9
|
+
StreamChunk,
|
|
10
|
+
Message,
|
|
11
|
+
ToolCall,
|
|
12
|
+
ToolCallRecord,
|
|
13
|
+
ToolDefinition,
|
|
14
|
+
AgentResult,
|
|
15
|
+
AgentEvent,
|
|
16
|
+
Usage,
|
|
17
|
+
JsonSchema,
|
|
18
|
+
SerializedThread,
|
|
19
|
+
} from './types.ts'
|
|
20
|
+
|
|
21
|
+
// ── Shared tool executor ─────────────────────────────────────────────────────
|
|
22
|
+
|
|
23
|
+
/** Execute a single tool call, returning the result and the tool message. */
|
|
24
|
+
async function executeTool(
|
|
25
|
+
tools: ToolDefinition[] | undefined,
|
|
26
|
+
toolCall: ToolCall
|
|
27
|
+
): Promise<{ result: unknown; message: Message }> {
|
|
28
|
+
const toolDef = tools?.find(t => t.name === toolCall.name)
|
|
29
|
+
let result: unknown
|
|
30
|
+
|
|
31
|
+
if (!toolDef) {
|
|
32
|
+
result = `Error: Tool "${toolCall.name}" not found`
|
|
33
|
+
} else {
|
|
34
|
+
try {
|
|
35
|
+
result = await toolDef.execute(toolCall.arguments)
|
|
36
|
+
} catch (err) {
|
|
37
|
+
result = `Error: ${err instanceof Error ? err.message : String(err)}`
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
return {
|
|
42
|
+
result,
|
|
43
|
+
message: {
|
|
44
|
+
role: 'tool',
|
|
45
|
+
toolCallId: toolCall.id,
|
|
46
|
+
content: typeof result === 'string' ? result : JSON.stringify(result),
|
|
47
|
+
},
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
// ── Helper Options ───────────────────────────────────────────────────────────
|
|
52
|
+
|
|
53
|
+
export interface ChatOptions {
|
|
54
|
+
provider?: string
|
|
55
|
+
model?: string
|
|
56
|
+
system?: string
|
|
57
|
+
maxTokens?: number
|
|
58
|
+
temperature?: number
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
export interface GenerateOptions<T = any> {
|
|
62
|
+
prompt: string
|
|
63
|
+
schema: any
|
|
64
|
+
provider?: string
|
|
65
|
+
model?: string
|
|
66
|
+
system?: string
|
|
67
|
+
maxTokens?: number
|
|
68
|
+
temperature?: number
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
export interface GenerateResult<T = any> {
|
|
72
|
+
data: T
|
|
73
|
+
text: string
|
|
74
|
+
usage: Usage
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
export interface EmbedOptions {
|
|
78
|
+
provider?: string
|
|
79
|
+
model?: string
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
// ── saina Helper Object ─────────────────────────────────────────────────────
|
|
83
|
+
|
|
84
|
+
export const saina = {
|
|
85
|
+
/**
|
|
86
|
+
* One-shot chat completion. Returns the text response.
|
|
87
|
+
*
|
|
88
|
+
* @example
|
|
89
|
+
* const answer = await saina.chat('What is the capital of France?')
|
|
90
|
+
* const answer = await saina.chat('Explain X', { provider: 'openai', model: 'gpt-4o-mini' })
|
|
91
|
+
*/
|
|
92
|
+
async chat(prompt: string, options: ChatOptions = {}): Promise<string> {
|
|
93
|
+
const config = SainaManager.config
|
|
94
|
+
const providerName = options.provider ?? config.default
|
|
95
|
+
|
|
96
|
+
const response = await SainaManager.complete(providerName, {
|
|
97
|
+
model:
|
|
98
|
+
(options.model ?? SainaManager.provider(providerName).name === 'anthropic')
|
|
99
|
+
? (SainaManager.config.providers[providerName]?.model ?? config.default)
|
|
100
|
+
: (SainaManager.config.providers[providerName]?.model ?? ''),
|
|
101
|
+
messages: [{ role: 'user', content: prompt }],
|
|
102
|
+
system: options.system,
|
|
103
|
+
maxTokens: options.maxTokens ?? config.maxTokens,
|
|
104
|
+
temperature: options.temperature ?? config.temperature,
|
|
105
|
+
})
|
|
106
|
+
|
|
107
|
+
return response.content
|
|
108
|
+
},
|
|
109
|
+
|
|
110
|
+
/**
|
|
111
|
+
* One-shot streaming completion.
|
|
112
|
+
*
|
|
113
|
+
* @example
|
|
114
|
+
* for await (const chunk of saina.stream('Write a poem')) {
|
|
115
|
+
* if (chunk.type === 'text') process.stdout.write(chunk.text!)
|
|
116
|
+
* }
|
|
117
|
+
*/
|
|
118
|
+
async *stream(prompt: string, options: ChatOptions = {}): AsyncIterable<StreamChunk> {
|
|
119
|
+
const config = SainaManager.config
|
|
120
|
+
const providerName = options.provider ?? config.default
|
|
121
|
+
const provider = SainaManager.provider(providerName)
|
|
122
|
+
const providerConfig = config.providers[providerName]
|
|
123
|
+
|
|
124
|
+
yield* provider.stream({
|
|
125
|
+
model: options.model ?? providerConfig?.model ?? '',
|
|
126
|
+
messages: [{ role: 'user', content: prompt }],
|
|
127
|
+
system: options.system,
|
|
128
|
+
maxTokens: options.maxTokens ?? config.maxTokens,
|
|
129
|
+
temperature: options.temperature ?? config.temperature,
|
|
130
|
+
})
|
|
131
|
+
},
|
|
132
|
+
|
|
133
|
+
/**
|
|
134
|
+
* Structured output completion. Returns typed data validated against the schema.
|
|
135
|
+
*
|
|
136
|
+
* @example
|
|
137
|
+
* const { data } = await saina.generate({
|
|
138
|
+
* prompt: 'Extract: "John is 30"',
|
|
139
|
+
* schema: z.object({ name: z.string(), age: z.number() }),
|
|
140
|
+
* })
|
|
141
|
+
* // data.name === 'John', data.age === 30
|
|
142
|
+
*/
|
|
143
|
+
async generate<T>(options: GenerateOptions<T>): Promise<GenerateResult<T>> {
|
|
144
|
+
const config = SainaManager.config
|
|
145
|
+
const providerName = options.provider ?? config.default
|
|
146
|
+
const providerConfig = config.providers[providerName]
|
|
147
|
+
const jsonSchema = zodToJsonSchema(options.schema)
|
|
148
|
+
|
|
149
|
+
const response = await SainaManager.complete(providerName, {
|
|
150
|
+
model: options.model ?? providerConfig?.model ?? '',
|
|
151
|
+
messages: [{ role: 'user', content: options.prompt }],
|
|
152
|
+
system: options.system,
|
|
153
|
+
schema: jsonSchema,
|
|
154
|
+
maxTokens: options.maxTokens ?? config.maxTokens,
|
|
155
|
+
temperature: options.temperature ?? config.temperature,
|
|
156
|
+
})
|
|
157
|
+
|
|
158
|
+
const parsed = JSON.parse(response.content)
|
|
159
|
+
const data = options.schema?.parse ? options.schema.parse(parsed) : parsed
|
|
160
|
+
|
|
161
|
+
return {
|
|
162
|
+
data,
|
|
163
|
+
text: response.content,
|
|
164
|
+
usage: response.usage,
|
|
165
|
+
}
|
|
166
|
+
},
|
|
167
|
+
|
|
168
|
+
/**
|
|
169
|
+
* Generate embeddings for the given text(s).
|
|
170
|
+
*
|
|
171
|
+
* @example
|
|
172
|
+
* const vectors = await saina.embed('Hello world', { provider: 'openai' })
|
|
173
|
+
*/
|
|
174
|
+
async embed(input: string | string[], options: EmbedOptions = {}): Promise<number[][]> {
|
|
175
|
+
const providerName = options.provider ?? SainaManager.config.default
|
|
176
|
+
const provider = SainaManager.provider(providerName)
|
|
177
|
+
|
|
178
|
+
if (!provider.embed) {
|
|
179
|
+
throw new Error(`Provider "${providerName}" does not support embeddings.`)
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
const result = await provider.embed(input, options.model)
|
|
183
|
+
return result.embeddings
|
|
184
|
+
},
|
|
185
|
+
|
|
186
|
+
/** Create a fluent agent runner. */
|
|
187
|
+
agent<T extends Agent>(AgentClass: new () => T): AgentRunner<T> {
|
|
188
|
+
return new AgentRunner(AgentClass)
|
|
189
|
+
},
|
|
190
|
+
|
|
191
|
+
/** Create a multi-turn conversation thread. */
|
|
192
|
+
thread(AgentClass?: new () => Agent): Thread {
|
|
193
|
+
return new Thread(AgentClass)
|
|
194
|
+
},
|
|
195
|
+
|
|
196
|
+
/** Create a multi-agent workflow. */
|
|
197
|
+
workflow(name: string): Workflow {
|
|
198
|
+
return new Workflow(name)
|
|
199
|
+
},
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
// ── AgentRunner ──────────────────────────────────────────────────────────────
|
|
203
|
+
|
|
204
|
+
/**
|
|
205
|
+
* Fluent builder for running an agent. Handles the tool-use loop,
|
|
206
|
+
* structured output parsing, and lifecycle hooks.
|
|
207
|
+
*
|
|
208
|
+
* @example
|
|
209
|
+
* const result = await saina.agent(SupportAgent)
|
|
210
|
+
* .input('Where is my order #12345?')
|
|
211
|
+
* .with({ orderId: '12345' })
|
|
212
|
+
* .run()
|
|
213
|
+
*/
|
|
214
|
+
export class AgentRunner<T extends Agent = Agent> {
|
|
215
|
+
private _input = ''
|
|
216
|
+
private _context: Record<string, unknown> = {}
|
|
217
|
+
private _provider?: string
|
|
218
|
+
private _model?: string
|
|
219
|
+
|
|
220
|
+
constructor(private AgentClass: new () => T) {}
|
|
221
|
+
|
|
222
|
+
/** Set the user input / prompt for the agent. */
|
|
223
|
+
input(text: string): this {
|
|
224
|
+
this._input = text
|
|
225
|
+
return this
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
/** Add context variables. Available as `{{key}}` in agent instructions. */
|
|
229
|
+
with(context: Record<string, unknown>): this {
|
|
230
|
+
Object.assign(this._context, context)
|
|
231
|
+
return this
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
/** Override the provider (and optionally model) for this run. */
|
|
235
|
+
using(provider: string, model?: string): this {
|
|
236
|
+
this._provider = provider
|
|
237
|
+
if (model) this._model = model
|
|
238
|
+
return this
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
/** Run the agent to completion. */
|
|
242
|
+
async run(): Promise<AgentResult> {
|
|
243
|
+
const agent = new this.AgentClass()
|
|
244
|
+
const config = SainaManager.config
|
|
245
|
+
|
|
246
|
+
const providerName = this._provider ?? agent.provider ?? config.default
|
|
247
|
+
const providerConfig = config.providers[providerName]
|
|
248
|
+
const model = this._model ?? agent.model ?? providerConfig?.model ?? ''
|
|
249
|
+
const maxIterations = agent.maxIterations ?? config.maxIterations
|
|
250
|
+
const maxTokens = agent.maxTokens ?? config.maxTokens
|
|
251
|
+
const temperature = agent.temperature ?? config.temperature
|
|
252
|
+
|
|
253
|
+
try {
|
|
254
|
+
await agent.onStart?.(this._input, this._context)
|
|
255
|
+
} catch (err) {
|
|
256
|
+
await agent.onError?.(err instanceof Error ? err : new Error(String(err)))
|
|
257
|
+
throw err
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
// Build system prompt with context interpolation
|
|
261
|
+
let system: string | undefined = agent.instructions || undefined
|
|
262
|
+
if (system) {
|
|
263
|
+
for (const [key, value] of Object.entries(this._context)) {
|
|
264
|
+
system = system.replaceAll(`{{${key}}}`, String(value))
|
|
265
|
+
}
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
// Prepare structured output schema
|
|
269
|
+
let schema: JsonSchema | undefined
|
|
270
|
+
if (agent.output) {
|
|
271
|
+
schema = zodToJsonSchema(agent.output)
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
const messages: Message[] = [{ role: 'user', content: this._input }]
|
|
275
|
+
const allToolCalls: ToolCallRecord[] = []
|
|
276
|
+
const totalUsage: Usage = { inputTokens: 0, outputTokens: 0, totalTokens: 0 }
|
|
277
|
+
let iterations = 0
|
|
278
|
+
|
|
279
|
+
// Tool loop
|
|
280
|
+
while (iterations < maxIterations) {
|
|
281
|
+
iterations++
|
|
282
|
+
|
|
283
|
+
const request: CompletionRequest = {
|
|
284
|
+
model,
|
|
285
|
+
messages: [...messages],
|
|
286
|
+
system,
|
|
287
|
+
maxTokens,
|
|
288
|
+
temperature,
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
// Only send tools if the agent has them
|
|
292
|
+
if (agent.tools?.length) {
|
|
293
|
+
request.tools = agent.tools
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
// Only send schema when we're not mid-tool-loop (avoid conflicting constraints)
|
|
297
|
+
if (schema && (!agent.tools?.length || iterations > 1)) {
|
|
298
|
+
request.schema = schema
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
let response: CompletionResponse
|
|
302
|
+
try {
|
|
303
|
+
response = await SainaManager.complete(providerName, request)
|
|
304
|
+
} catch (err) {
|
|
305
|
+
await agent.onError?.(err instanceof Error ? err : new Error(String(err)))
|
|
306
|
+
throw err
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
// Accumulate usage
|
|
310
|
+
totalUsage.inputTokens += response.usage.inputTokens
|
|
311
|
+
totalUsage.outputTokens += response.usage.outputTokens
|
|
312
|
+
totalUsage.totalTokens += response.usage.totalTokens
|
|
313
|
+
|
|
314
|
+
// Append assistant message
|
|
315
|
+
messages.push({
|
|
316
|
+
role: 'assistant',
|
|
317
|
+
content: response.content,
|
|
318
|
+
toolCalls: response.toolCalls.length > 0 ? response.toolCalls : undefined,
|
|
319
|
+
})
|
|
320
|
+
|
|
321
|
+
// If no tool calls, we're done
|
|
322
|
+
if (response.stopReason !== 'tool_use' || response.toolCalls.length === 0) {
|
|
323
|
+
let data: any = response.content
|
|
324
|
+
if (agent.output && response.content) {
|
|
325
|
+
try {
|
|
326
|
+
const parsed = JSON.parse(response.content)
|
|
327
|
+
data = agent.output.parse ? agent.output.parse(parsed) : parsed
|
|
328
|
+
} catch {
|
|
329
|
+
data = response.content
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
const result: AgentResult = {
|
|
334
|
+
data,
|
|
335
|
+
text: response.content,
|
|
336
|
+
toolCalls: allToolCalls,
|
|
337
|
+
messages,
|
|
338
|
+
usage: totalUsage,
|
|
339
|
+
iterations,
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
await agent.onComplete?.(result)
|
|
343
|
+
return result
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
// Execute tool calls
|
|
347
|
+
await this.executeTools(agent, response.toolCalls, messages, allToolCalls)
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
// Max iterations reached — return what we have
|
|
351
|
+
const lastAssistant = [...messages].reverse().find(m => m.role === 'assistant')
|
|
352
|
+
const text = typeof lastAssistant?.content === 'string' ? lastAssistant.content : ''
|
|
353
|
+
|
|
354
|
+
const result: AgentResult = {
|
|
355
|
+
data: null,
|
|
356
|
+
text,
|
|
357
|
+
toolCalls: allToolCalls,
|
|
358
|
+
messages,
|
|
359
|
+
usage: totalUsage,
|
|
360
|
+
iterations,
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
await agent.onComplete?.(result)
|
|
364
|
+
return result
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
/** Run the agent with streaming, yielding events for each text chunk and tool execution. */
|
|
368
|
+
async *stream(): AsyncIterable<AgentEvent> {
|
|
369
|
+
const agent = new this.AgentClass()
|
|
370
|
+
const config = SainaManager.config
|
|
371
|
+
|
|
372
|
+
const providerName = this._provider ?? agent.provider ?? config.default
|
|
373
|
+
const providerConfig = config.providers[providerName]
|
|
374
|
+
const model = this._model ?? agent.model ?? providerConfig?.model ?? ''
|
|
375
|
+
const maxIterations = agent.maxIterations ?? config.maxIterations
|
|
376
|
+
const maxTokens = agent.maxTokens ?? config.maxTokens
|
|
377
|
+
const temperature = agent.temperature ?? config.temperature
|
|
378
|
+
const provider = SainaManager.provider(providerName)
|
|
379
|
+
|
|
380
|
+
await agent.onStart?.(this._input, this._context)
|
|
381
|
+
|
|
382
|
+
let system: string | undefined = agent.instructions || undefined
|
|
383
|
+
if (system) {
|
|
384
|
+
for (const [key, value] of Object.entries(this._context)) {
|
|
385
|
+
system = system.replaceAll(`{{${key}}}`, String(value))
|
|
386
|
+
}
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
let schema: JsonSchema | undefined
|
|
390
|
+
if (agent.output) {
|
|
391
|
+
schema = zodToJsonSchema(agent.output)
|
|
392
|
+
}
|
|
393
|
+
|
|
394
|
+
const messages: Message[] = [{ role: 'user', content: this._input }]
|
|
395
|
+
const allToolCalls: ToolCallRecord[] = []
|
|
396
|
+
const totalUsage: Usage = { inputTokens: 0, outputTokens: 0, totalTokens: 0 }
|
|
397
|
+
let iterations = 0
|
|
398
|
+
|
|
399
|
+
while (iterations < maxIterations) {
|
|
400
|
+
iterations++
|
|
401
|
+
|
|
402
|
+
if (iterations > 1) {
|
|
403
|
+
yield { type: 'iteration', iteration: iterations }
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
const request: CompletionRequest = {
|
|
407
|
+
model,
|
|
408
|
+
messages: [...messages],
|
|
409
|
+
system,
|
|
410
|
+
maxTokens,
|
|
411
|
+
temperature,
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
if (agent.tools?.length) request.tools = agent.tools
|
|
415
|
+
if (schema && (!agent.tools?.length || iterations > 1)) request.schema = schema
|
|
416
|
+
|
|
417
|
+
// Stream the response
|
|
418
|
+
let fullText = ''
|
|
419
|
+
const pendingToolCalls: Map<number, { id: string; name: string; args: string }> = new Map()
|
|
420
|
+
|
|
421
|
+
for await (const chunk of provider.stream(request)) {
|
|
422
|
+
if (chunk.type === 'text' && chunk.text) {
|
|
423
|
+
fullText += chunk.text
|
|
424
|
+
yield { type: 'text', text: chunk.text }
|
|
425
|
+
} else if (chunk.type === 'tool_start' && chunk.toolCall) {
|
|
426
|
+
pendingToolCalls.set(chunk.toolIndex ?? 0, {
|
|
427
|
+
id: chunk.toolCall.id ?? '',
|
|
428
|
+
name: chunk.toolCall.name ?? '',
|
|
429
|
+
args: '',
|
|
430
|
+
})
|
|
431
|
+
} else if (chunk.type === 'tool_delta' && chunk.text) {
|
|
432
|
+
const pending = pendingToolCalls.get(chunk.toolIndex ?? 0)
|
|
433
|
+
if (pending) pending.args += chunk.text
|
|
434
|
+
} else if (chunk.type === 'usage' && chunk.usage) {
|
|
435
|
+
totalUsage.inputTokens += chunk.usage.inputTokens
|
|
436
|
+
totalUsage.outputTokens += chunk.usage.outputTokens
|
|
437
|
+
totalUsage.totalTokens += chunk.usage.totalTokens
|
|
438
|
+
}
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
// Build tool calls from accumulated stream data
|
|
442
|
+
const toolCalls: ToolCall[] = []
|
|
443
|
+
for (const [, pending] of pendingToolCalls) {
|
|
444
|
+
let args: Record<string, unknown> = {}
|
|
445
|
+
try {
|
|
446
|
+
args = JSON.parse(pending.args)
|
|
447
|
+
} catch {
|
|
448
|
+
args = pending.args ? { _raw: pending.args } : {}
|
|
449
|
+
}
|
|
450
|
+
toolCalls.push({ id: pending.id, name: pending.name, arguments: args })
|
|
451
|
+
}
|
|
452
|
+
|
|
453
|
+
// Append assistant message
|
|
454
|
+
messages.push({
|
|
455
|
+
role: 'assistant',
|
|
456
|
+
content: fullText,
|
|
457
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
458
|
+
})
|
|
459
|
+
|
|
460
|
+
// If no tool calls, done
|
|
461
|
+
if (toolCalls.length === 0) {
|
|
462
|
+
let data: any = fullText
|
|
463
|
+
if (agent.output && fullText) {
|
|
464
|
+
try {
|
|
465
|
+
const parsed = JSON.parse(fullText)
|
|
466
|
+
data = agent.output.parse ? agent.output.parse(parsed) : parsed
|
|
467
|
+
} catch {
|
|
468
|
+
data = fullText
|
|
469
|
+
}
|
|
470
|
+
}
|
|
471
|
+
|
|
472
|
+
const result: AgentResult = {
|
|
473
|
+
data,
|
|
474
|
+
text: fullText,
|
|
475
|
+
toolCalls: allToolCalls,
|
|
476
|
+
messages,
|
|
477
|
+
usage: totalUsage,
|
|
478
|
+
iterations,
|
|
479
|
+
}
|
|
480
|
+
|
|
481
|
+
await agent.onComplete?.(result)
|
|
482
|
+
yield { type: 'done', result }
|
|
483
|
+
return
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
// Execute tools and yield events
|
|
487
|
+
for (const toolCall of toolCalls) {
|
|
488
|
+
await agent.onToolCall?.(toolCall)
|
|
489
|
+
|
|
490
|
+
const start = performance.now()
|
|
491
|
+
const { result: toolResult, message } = await executeTool(agent.tools, toolCall)
|
|
492
|
+
const duration = performance.now() - start
|
|
493
|
+
|
|
494
|
+
const record: ToolCallRecord = {
|
|
495
|
+
name: toolCall.name,
|
|
496
|
+
arguments: toolCall.arguments,
|
|
497
|
+
result: toolResult,
|
|
498
|
+
duration,
|
|
499
|
+
}
|
|
500
|
+
allToolCalls.push(record)
|
|
501
|
+
await agent.onToolResult?.(record)
|
|
502
|
+
|
|
503
|
+
yield { type: 'tool_result', toolCall: record }
|
|
504
|
+
|
|
505
|
+
messages.push(message)
|
|
506
|
+
}
|
|
507
|
+
}
|
|
508
|
+
|
|
509
|
+
// Max iterations
|
|
510
|
+
const lastAssistant = [...messages].reverse().find(m => m.role === 'assistant')
|
|
511
|
+
const text = typeof lastAssistant?.content === 'string' ? lastAssistant.content : ''
|
|
512
|
+
|
|
513
|
+
const result: AgentResult = {
|
|
514
|
+
data: null,
|
|
515
|
+
text,
|
|
516
|
+
toolCalls: allToolCalls,
|
|
517
|
+
messages,
|
|
518
|
+
usage: totalUsage,
|
|
519
|
+
iterations,
|
|
520
|
+
}
|
|
521
|
+
|
|
522
|
+
await agent.onComplete?.(result)
|
|
523
|
+
yield { type: 'done', result }
|
|
524
|
+
}
|
|
525
|
+
|
|
526
|
+
// ── Private ────────────────────────────────────────────────────────────────
|
|
527
|
+
|
|
528
|
+
private async executeTools(
|
|
529
|
+
agent: Agent,
|
|
530
|
+
toolCalls: ToolCall[],
|
|
531
|
+
messages: Message[],
|
|
532
|
+
allToolCalls: ToolCallRecord[]
|
|
533
|
+
): Promise<void> {
|
|
534
|
+
for (const toolCall of toolCalls) {
|
|
535
|
+
await agent.onToolCall?.(toolCall)
|
|
536
|
+
|
|
537
|
+
const start = performance.now()
|
|
538
|
+
const { result, message } = await executeTool(agent.tools, toolCall)
|
|
539
|
+
const duration = performance.now() - start
|
|
540
|
+
|
|
541
|
+
const record: ToolCallRecord = {
|
|
542
|
+
name: toolCall.name,
|
|
543
|
+
arguments: toolCall.arguments,
|
|
544
|
+
result,
|
|
545
|
+
duration,
|
|
546
|
+
}
|
|
547
|
+
allToolCalls.push(record)
|
|
548
|
+
await agent.onToolResult?.(record)
|
|
549
|
+
|
|
550
|
+
messages.push(message)
|
|
551
|
+
}
|
|
552
|
+
}
|
|
553
|
+
}
|
|
554
|
+
|
|
555
|
+
// ── Thread ────────────────────────────────────────────────────────────────────
|
|
556
|
+
|
|
557
|
+
/**
|
|
558
|
+
* Multi-turn conversation thread with optional agent configuration.
|
|
559
|
+
*
|
|
560
|
+
* @example
|
|
561
|
+
* const thread = saina.thread()
|
|
562
|
+
* thread.system('You are a helpful assistant.')
|
|
563
|
+
* const r1 = await thread.send('My name is Alice')
|
|
564
|
+
* const r2 = await thread.send('What is my name?')
|
|
565
|
+
*
|
|
566
|
+
* // With agent
|
|
567
|
+
* const thread = saina.thread(SupportAgent)
|
|
568
|
+
* const r1 = await thread.send('I need help with order #123')
|
|
569
|
+
*/
|
|
570
|
+
export class Thread {
|
|
571
|
+
private messages: Message[] = []
|
|
572
|
+
private _provider?: string
|
|
573
|
+
private _model?: string
|
|
574
|
+
private _system?: string
|
|
575
|
+
private _tools?: ToolDefinition[]
|
|
576
|
+
private _maxTokens?: number
|
|
577
|
+
private _temperature?: number
|
|
578
|
+
|
|
579
|
+
constructor(AgentClass?: new () => Agent) {
|
|
580
|
+
if (AgentClass) {
|
|
581
|
+
const agent = new AgentClass()
|
|
582
|
+
this._provider = agent.provider
|
|
583
|
+
this._model = agent.model
|
|
584
|
+
this._system = agent.instructions || undefined
|
|
585
|
+
this._tools = agent.tools
|
|
586
|
+
this._maxTokens = agent.maxTokens
|
|
587
|
+
this._temperature = agent.temperature
|
|
588
|
+
}
|
|
589
|
+
}
|
|
590
|
+
|
|
591
|
+
/** Set or override the system prompt. */
|
|
592
|
+
system(prompt: string): this {
|
|
593
|
+
this._system = prompt
|
|
594
|
+
return this
|
|
595
|
+
}
|
|
596
|
+
|
|
597
|
+
/** Override the provider (and optionally model). */
|
|
598
|
+
using(provider: string, model?: string): this {
|
|
599
|
+
this._provider = provider
|
|
600
|
+
if (model) this._model = model
|
|
601
|
+
return this
|
|
602
|
+
}
|
|
603
|
+
|
|
604
|
+
/** Set tools available in this thread. */
|
|
605
|
+
tools(tools: ToolDefinition[]): this {
|
|
606
|
+
this._tools = tools
|
|
607
|
+
return this
|
|
608
|
+
}
|
|
609
|
+
|
|
610
|
+
/** Send a message and get the assistant's response. Handles tool calls automatically. */
|
|
611
|
+
async send(message: string): Promise<string> {
|
|
612
|
+
const config = SainaManager.config
|
|
613
|
+
const providerName = this._provider ?? config.default
|
|
614
|
+
const providerConfig = config.providers[providerName]
|
|
615
|
+
|
|
616
|
+
this.messages.push({ role: 'user', content: message })
|
|
617
|
+
|
|
618
|
+
const maxIterations = 10
|
|
619
|
+
let iterations = 0
|
|
620
|
+
|
|
621
|
+
while (iterations < maxIterations) {
|
|
622
|
+
iterations++
|
|
623
|
+
|
|
624
|
+
const request: CompletionRequest = {
|
|
625
|
+
model: this._model ?? providerConfig?.model ?? '',
|
|
626
|
+
messages: [...this.messages],
|
|
627
|
+
system: this._system,
|
|
628
|
+
maxTokens: this._maxTokens ?? config.maxTokens,
|
|
629
|
+
temperature: this._temperature ?? config.temperature,
|
|
630
|
+
}
|
|
631
|
+
|
|
632
|
+
if (this._tools?.length) request.tools = this._tools
|
|
633
|
+
|
|
634
|
+
const response = await SainaManager.complete(providerName, request)
|
|
635
|
+
|
|
636
|
+
this.messages.push({
|
|
637
|
+
role: 'assistant',
|
|
638
|
+
content: response.content,
|
|
639
|
+
toolCalls: response.toolCalls.length > 0 ? response.toolCalls : undefined,
|
|
640
|
+
})
|
|
641
|
+
|
|
642
|
+
// If no tool calls, return
|
|
643
|
+
if (response.stopReason !== 'tool_use' || response.toolCalls.length === 0) {
|
|
644
|
+
return response.content
|
|
645
|
+
}
|
|
646
|
+
|
|
647
|
+
// Execute tools
|
|
648
|
+
for (const toolCall of response.toolCalls) {
|
|
649
|
+
const { message } = await executeTool(this._tools, toolCall)
|
|
650
|
+
this.messages.push(message)
|
|
651
|
+
}
|
|
652
|
+
}
|
|
653
|
+
|
|
654
|
+
// Return last assistant content
|
|
655
|
+
const last = [...this.messages].reverse().find(m => m.role === 'assistant')
|
|
656
|
+
return typeof last?.content === 'string' ? last.content : ''
|
|
657
|
+
}
|
|
658
|
+
|
|
659
|
+
/** Stream a message response. Handles tool calls automatically for multi-turn. */
|
|
660
|
+
async *stream(message: string): AsyncIterable<StreamChunk> {
|
|
661
|
+
const config = SainaManager.config
|
|
662
|
+
const providerName = this._provider ?? config.default
|
|
663
|
+
const providerConfig = config.providers[providerName]
|
|
664
|
+
const provider = SainaManager.provider(providerName)
|
|
665
|
+
|
|
666
|
+
this.messages.push({ role: 'user', content: message })
|
|
667
|
+
|
|
668
|
+
const maxIterations = 10
|
|
669
|
+
let iterations = 0
|
|
670
|
+
|
|
671
|
+
while (iterations < maxIterations) {
|
|
672
|
+
iterations++
|
|
673
|
+
|
|
674
|
+
let fullText = ''
|
|
675
|
+
const pendingToolCalls: Map<number, { id: string; name: string; args: string }> = new Map()
|
|
676
|
+
|
|
677
|
+
for await (const chunk of provider.stream({
|
|
678
|
+
model: this._model ?? providerConfig?.model ?? '',
|
|
679
|
+
messages: [...this.messages],
|
|
680
|
+
system: this._system,
|
|
681
|
+
tools: this._tools,
|
|
682
|
+
maxTokens: this._maxTokens ?? config.maxTokens,
|
|
683
|
+
temperature: this._temperature ?? config.temperature,
|
|
684
|
+
})) {
|
|
685
|
+
yield chunk
|
|
686
|
+
|
|
687
|
+
if (chunk.type === 'text' && chunk.text) {
|
|
688
|
+
fullText += chunk.text
|
|
689
|
+
} else if (chunk.type === 'tool_start' && chunk.toolCall) {
|
|
690
|
+
pendingToolCalls.set(chunk.toolIndex ?? 0, {
|
|
691
|
+
id: chunk.toolCall.id ?? '',
|
|
692
|
+
name: chunk.toolCall.name ?? '',
|
|
693
|
+
args: '',
|
|
694
|
+
})
|
|
695
|
+
} else if (chunk.type === 'tool_delta' && chunk.text) {
|
|
696
|
+
const pending = pendingToolCalls.get(chunk.toolIndex ?? 0)
|
|
697
|
+
if (pending) pending.args += chunk.text
|
|
698
|
+
}
|
|
699
|
+
}
|
|
700
|
+
|
|
701
|
+
// Build tool calls from accumulated stream data
|
|
702
|
+
const toolCalls: ToolCall[] = []
|
|
703
|
+
for (const [, pending] of pendingToolCalls) {
|
|
704
|
+
let args: Record<string, unknown> = {}
|
|
705
|
+
try {
|
|
706
|
+
args = JSON.parse(pending.args)
|
|
707
|
+
} catch {
|
|
708
|
+
args = pending.args ? { _raw: pending.args } : {}
|
|
709
|
+
}
|
|
710
|
+
toolCalls.push({ id: pending.id, name: pending.name, arguments: args })
|
|
711
|
+
}
|
|
712
|
+
|
|
713
|
+
// Append assistant message
|
|
714
|
+
this.messages.push({
|
|
715
|
+
role: 'assistant',
|
|
716
|
+
content: fullText,
|
|
717
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
718
|
+
})
|
|
719
|
+
|
|
720
|
+
// No tool calls — done
|
|
721
|
+
if (toolCalls.length === 0) return
|
|
722
|
+
|
|
723
|
+
// Execute tools
|
|
724
|
+
for (const toolCall of toolCalls) {
|
|
725
|
+
const { message: toolMessage } = await executeTool(this._tools, toolCall)
|
|
726
|
+
this.messages.push(toolMessage)
|
|
727
|
+
}
|
|
728
|
+
}
|
|
729
|
+
}
|
|
730
|
+
|
|
731
|
+
/** Get a copy of all messages in this thread. */
|
|
732
|
+
getMessages(): Message[] {
|
|
733
|
+
return [...this.messages]
|
|
734
|
+
}
|
|
735
|
+
|
|
736
|
+
/** Serialize the thread for persistence (session, database, cache). */
|
|
737
|
+
serialize(): SerializedThread {
|
|
738
|
+
return {
|
|
739
|
+
messages: [...this.messages],
|
|
740
|
+
system: this._system,
|
|
741
|
+
}
|
|
742
|
+
}
|
|
743
|
+
|
|
744
|
+
/** Restore a previously serialized thread. */
|
|
745
|
+
restore(data: SerializedThread): this {
|
|
746
|
+
this.messages = [...data.messages]
|
|
747
|
+
this._system = data.system
|
|
748
|
+
return this
|
|
749
|
+
}
|
|
750
|
+
|
|
751
|
+
/** Clear all messages from the thread. */
|
|
752
|
+
clear(): this {
|
|
753
|
+
this.messages = []
|
|
754
|
+
return this
|
|
755
|
+
}
|
|
756
|
+
}
|