@stravigor/saina 0.4.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/index.ts ADDED
@@ -0,0 +1,38 @@
1
+ export { default, default as SainaManager } from './saina_manager.ts'
2
+
3
+ // Provider
4
+ export { default as SainaProvider } from './saina_provider.ts'
5
+ export { saina, AgentRunner, Thread } from './helpers.ts'
6
+ export { Agent } from './agent.ts'
7
+ export { defineTool, defineToolbox } from './tool.ts'
8
+ export { Workflow } from './workflow.ts'
9
+ export { AnthropicProvider } from './providers/anthropic_provider.ts'
10
+ export { OpenAIProvider } from './providers/openai_provider.ts'
11
+ export { parseSSE } from './utils/sse_parser.ts'
12
+ export { zodToJsonSchema } from './utils/schema.ts'
13
+ export type {
14
+ AIProvider,
15
+ SainaConfig,
16
+ ProviderConfig,
17
+ CompletionRequest,
18
+ CompletionResponse,
19
+ Message,
20
+ ContentBlock,
21
+ ToolCall,
22
+ ToolDefinition,
23
+ StreamChunk,
24
+ Usage,
25
+ AgentResult,
26
+ ToolCallRecord,
27
+ AgentEvent,
28
+ WorkflowResult,
29
+ EmbeddingResponse,
30
+ JsonSchema,
31
+ SSEEvent,
32
+ BeforeHook,
33
+ AfterHook,
34
+ SerializedThread,
35
+ OutputSchema,
36
+ } from './types.ts'
37
+ export type { ChatOptions, GenerateOptions, GenerateResult, EmbedOptions } from './helpers.ts'
38
+ export type { WorkflowContext } from './workflow.ts'
@@ -0,0 +1,278 @@
1
+ import { parseSSE } from '../utils/sse_parser.ts'
2
+ import { ExternalServiceError } from '@stravigor/core/exceptions/errors'
3
+ import type {
4
+ AIProvider,
5
+ CompletionRequest,
6
+ CompletionResponse,
7
+ StreamChunk,
8
+ ProviderConfig,
9
+ Message,
10
+ ToolCall,
11
+ Usage,
12
+ } from '../types.ts'
13
+
14
+ /**
15
+ * Anthropic Messages API provider.
16
+ *
17
+ * Translates the framework's normalized CompletionRequest/Response
18
+ * to/from the Anthropic wire format. Uses raw `fetch()`.
19
+ */
20
+ export class AnthropicProvider implements AIProvider {
21
+ readonly name: string
22
+ private apiKey: string
23
+ private baseUrl: string
24
+ private defaultModel: string
25
+ private defaultMaxTokens: number
26
+
27
+ constructor(config: ProviderConfig) {
28
+ this.name = 'anthropic'
29
+ this.apiKey = config.apiKey
30
+ this.baseUrl = (config.baseUrl ?? 'https://api.anthropic.com').replace(/\/$/, '')
31
+ this.defaultModel = config.model
32
+ this.defaultMaxTokens = config.maxTokens ?? 4096
33
+ }
34
+
35
+ async complete(request: CompletionRequest): Promise<CompletionResponse> {
36
+ const body = this.buildRequestBody(request, false)
37
+
38
+ const response = await fetch(`${this.baseUrl}/v1/messages`, {
39
+ method: 'POST',
40
+ headers: this.buildHeaders(),
41
+ body: JSON.stringify(body),
42
+ })
43
+
44
+ if (!response.ok) {
45
+ const text = await response.text()
46
+ throw new ExternalServiceError('Anthropic', response.status, text)
47
+ }
48
+
49
+ const data: any = await response.json()
50
+ return this.parseResponse(data)
51
+ }
52
+
53
+ async *stream(request: CompletionRequest): AsyncIterable<StreamChunk> {
54
+ const body = this.buildRequestBody(request, true)
55
+
56
+ const response = await fetch(`${this.baseUrl}/v1/messages`, {
57
+ method: 'POST',
58
+ headers: this.buildHeaders(),
59
+ body: JSON.stringify(body),
60
+ })
61
+
62
+ if (!response.ok) {
63
+ const text = await response.text()
64
+ throw new ExternalServiceError('Anthropic', response.status, text)
65
+ }
66
+
67
+ if (!response.body) {
68
+ throw new ExternalServiceError('Anthropic', undefined, 'No stream body returned')
69
+ }
70
+
71
+ let currentBlockIndex = -1
72
+
73
+ for await (const sse of parseSSE(response.body)) {
74
+ if (sse.data === '[DONE]') break
75
+
76
+ let parsed: any
77
+ try {
78
+ parsed = JSON.parse(sse.data)
79
+ } catch {
80
+ continue
81
+ }
82
+
83
+ const type = parsed.type ?? sse.event
84
+
85
+ if (type === 'content_block_start') {
86
+ currentBlockIndex = parsed.index ?? currentBlockIndex + 1
87
+ const block = parsed.content_block
88
+ if (block?.type === 'tool_use') {
89
+ yield {
90
+ type: 'tool_start',
91
+ toolCall: { id: block.id, name: block.name },
92
+ toolIndex: currentBlockIndex,
93
+ }
94
+ }
95
+ } else if (type === 'content_block_delta') {
96
+ const delta = parsed.delta
97
+ if (delta?.type === 'text_delta') {
98
+ yield { type: 'text', text: delta.text }
99
+ } else if (delta?.type === 'input_json_delta') {
100
+ yield {
101
+ type: 'tool_delta',
102
+ text: delta.partial_json,
103
+ toolIndex: parsed.index ?? currentBlockIndex,
104
+ }
105
+ }
106
+ } else if (type === 'content_block_stop') {
107
+ // If we were accumulating a tool call, signal end
108
+ if (currentBlockIndex >= 0) {
109
+ yield { type: 'tool_end', toolIndex: parsed.index ?? currentBlockIndex }
110
+ }
111
+ } else if (type === 'message_delta') {
112
+ const usage = parsed.usage
113
+ if (usage) {
114
+ yield {
115
+ type: 'usage',
116
+ usage: {
117
+ inputTokens: usage.input_tokens ?? 0,
118
+ outputTokens: usage.output_tokens ?? 0,
119
+ totalTokens: (usage.input_tokens ?? 0) + (usage.output_tokens ?? 0),
120
+ },
121
+ }
122
+ }
123
+ } else if (type === 'message_stop') {
124
+ yield { type: 'done' }
125
+ }
126
+ }
127
+ }
128
+
129
+ // ── Private helpers ──────────────────────────────────────────────────────
130
+
131
+ private buildHeaders(): Record<string, string> {
132
+ return {
133
+ 'content-type': 'application/json',
134
+ 'x-api-key': this.apiKey,
135
+ 'anthropic-version': '2023-06-01',
136
+ }
137
+ }
138
+
139
+ private buildRequestBody(request: CompletionRequest, stream: boolean): Record<string, unknown> {
140
+ const body: Record<string, unknown> = {
141
+ model: request.model ?? this.defaultModel,
142
+ max_tokens: request.maxTokens ?? this.defaultMaxTokens,
143
+ messages: this.mapMessages(request.messages),
144
+ }
145
+
146
+ if (stream) body.stream = true
147
+ if (request.system) body.system = request.system
148
+ if (request.temperature !== undefined) body.temperature = request.temperature
149
+ if (request.stopSequences?.length) body.stop_sequences = request.stopSequences
150
+
151
+ // Tools
152
+ if (request.tools?.length) {
153
+ body.tools = request.tools.map(t => ({
154
+ name: t.name,
155
+ description: t.description,
156
+ input_schema: t.parameters,
157
+ }))
158
+ }
159
+
160
+ // Tool choice
161
+ if (request.toolChoice) {
162
+ if (request.toolChoice === 'auto') {
163
+ body.tool_choice = { type: 'auto' }
164
+ } else if (request.toolChoice === 'required') {
165
+ body.tool_choice = { type: 'any' }
166
+ } else {
167
+ body.tool_choice = { type: 'tool', name: request.toolChoice.name }
168
+ }
169
+ }
170
+
171
+ // Structured output
172
+ if (request.schema) {
173
+ body.output_format = { type: 'json_schema', schema: request.schema }
174
+ }
175
+
176
+ return body
177
+ }
178
+
179
+ private mapMessages(messages: Message[]): any[] {
180
+ const result: any[] = []
181
+
182
+ for (const msg of messages) {
183
+ if (msg.role === 'tool') {
184
+ // Tool results go as user messages with tool_result content blocks
185
+ result.push({
186
+ role: 'user',
187
+ content: [
188
+ {
189
+ type: 'tool_result',
190
+ tool_use_id: msg.toolCallId,
191
+ content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content),
192
+ },
193
+ ],
194
+ })
195
+ } else if (msg.role === 'assistant') {
196
+ const content: any[] = []
197
+
198
+ // Add text content if present
199
+ const text = typeof msg.content === 'string' ? msg.content : ''
200
+ if (text) {
201
+ content.push({ type: 'text', text })
202
+ }
203
+
204
+ // Add tool use blocks
205
+ if (msg.toolCalls?.length) {
206
+ for (const tc of msg.toolCalls) {
207
+ content.push({
208
+ type: 'tool_use',
209
+ id: tc.id,
210
+ name: tc.name,
211
+ input: tc.arguments,
212
+ })
213
+ }
214
+ }
215
+
216
+ result.push({
217
+ role: 'assistant',
218
+ content: content.length === 1 && content[0].type === 'text' ? content[0].text : content,
219
+ })
220
+ } else {
221
+ // User messages
222
+ result.push({
223
+ role: 'user',
224
+ content: typeof msg.content === 'string' ? msg.content : msg.content,
225
+ })
226
+ }
227
+ }
228
+
229
+ return result
230
+ }
231
+
232
+ private parseResponse(data: any): CompletionResponse {
233
+ let content = ''
234
+ const toolCalls: ToolCall[] = []
235
+
236
+ if (Array.isArray(data.content)) {
237
+ for (const block of data.content) {
238
+ if (block.type === 'text') {
239
+ content += block.text
240
+ } else if (block.type === 'tool_use') {
241
+ toolCalls.push({
242
+ id: block.id,
243
+ name: block.name,
244
+ arguments: block.input ?? {},
245
+ })
246
+ }
247
+ }
248
+ }
249
+
250
+ const usage: Usage = {
251
+ inputTokens: data.usage?.input_tokens ?? 0,
252
+ outputTokens: data.usage?.output_tokens ?? 0,
253
+ totalTokens: (data.usage?.input_tokens ?? 0) + (data.usage?.output_tokens ?? 0),
254
+ }
255
+
256
+ let stopReason: CompletionResponse['stopReason'] = 'end'
257
+ switch (data.stop_reason) {
258
+ case 'tool_use':
259
+ stopReason = 'tool_use'
260
+ break
261
+ case 'max_tokens':
262
+ stopReason = 'max_tokens'
263
+ break
264
+ case 'stop_sequence':
265
+ stopReason = 'stop_sequence'
266
+ break
267
+ }
268
+
269
+ return {
270
+ id: data.id ?? '',
271
+ content,
272
+ toolCalls,
273
+ stopReason,
274
+ usage,
275
+ raw: data,
276
+ }
277
+ }
278
+ }
@@ -0,0 +1,351 @@
1
+ import { parseSSE } from '../utils/sse_parser.ts'
2
+ import { ExternalServiceError } from '@stravigor/core/exceptions/errors'
3
+ import type {
4
+ AIProvider,
5
+ CompletionRequest,
6
+ CompletionResponse,
7
+ StreamChunk,
8
+ EmbeddingResponse,
9
+ ProviderConfig,
10
+ Message,
11
+ ToolCall,
12
+ Usage,
13
+ } from '../types.ts'
14
+
15
+ /**
16
+ * OpenAI Chat Completions API provider.
17
+ *
18
+ * Also serves DeepSeek and any OpenAI-compatible API by setting `baseUrl`
19
+ * in the provider config. Uses raw `fetch()`.
20
+ */
21
+ export class OpenAIProvider implements AIProvider {
22
+ readonly name: string
23
+ private apiKey: string
24
+ private baseUrl: string
25
+ private defaultModel: string
26
+ private defaultMaxTokens?: number
27
+
28
+ constructor(config: ProviderConfig, name?: string) {
29
+ this.name = name ?? 'openai'
30
+ this.apiKey = config.apiKey
31
+ this.baseUrl = (config.baseUrl ?? 'https://api.openai.com').replace(/\/$/, '')
32
+ this.defaultModel = config.model
33
+ this.defaultMaxTokens = config.maxTokens
34
+ }
35
+
36
+ /** Whether this provider supports OpenAI's native json_schema response format. */
37
+ private get supportsJsonSchema(): boolean {
38
+ return this.baseUrl === 'https://api.openai.com'
39
+ }
40
+
41
+ async complete(request: CompletionRequest): Promise<CompletionResponse> {
42
+ const body = this.buildRequestBody(request, false)
43
+
44
+ const response = await fetch(`${this.baseUrl}/v1/chat/completions`, {
45
+ method: 'POST',
46
+ headers: this.buildHeaders(),
47
+ body: JSON.stringify(body),
48
+ })
49
+
50
+ if (!response.ok) {
51
+ const text = await response.text()
52
+ throw new ExternalServiceError('OpenAI', response.status, text)
53
+ }
54
+
55
+ const data: any = await response.json()
56
+ return this.parseResponse(data)
57
+ }
58
+
59
+ async *stream(request: CompletionRequest): AsyncIterable<StreamChunk> {
60
+ const body = this.buildRequestBody(request, true)
61
+
62
+ const response = await fetch(`${this.baseUrl}/v1/chat/completions`, {
63
+ method: 'POST',
64
+ headers: this.buildHeaders(),
65
+ body: JSON.stringify(body),
66
+ })
67
+
68
+ if (!response.ok) {
69
+ const text = await response.text()
70
+ throw new ExternalServiceError('OpenAI', response.status, text)
71
+ }
72
+
73
+ if (!response.body) {
74
+ throw new ExternalServiceError('OpenAI', undefined, 'No stream body returned')
75
+ }
76
+
77
+ // Track in-progress tool calls for tool_start vs tool_delta distinction
78
+ const seenTools = new Set<number>()
79
+
80
+ for await (const sse of parseSSE(response.body)) {
81
+ if (sse.data === '[DONE]') {
82
+ yield { type: 'done' }
83
+ break
84
+ }
85
+
86
+ let parsed: any
87
+ try {
88
+ parsed = JSON.parse(sse.data)
89
+ } catch {
90
+ continue
91
+ }
92
+
93
+ const choice = parsed.choices?.[0]
94
+ if (!choice) continue
95
+
96
+ const delta = choice.delta
97
+ if (!delta) continue
98
+
99
+ // Text content
100
+ if (delta.content) {
101
+ yield { type: 'text', text: delta.content }
102
+ }
103
+
104
+ // Tool calls
105
+ if (delta.tool_calls) {
106
+ for (const tc of delta.tool_calls) {
107
+ const index: number = tc.index ?? 0
108
+
109
+ if (!seenTools.has(index)) {
110
+ // First chunk for this tool — emit tool_start
111
+ seenTools.add(index)
112
+ yield {
113
+ type: 'tool_start',
114
+ toolCall: { id: tc.id, name: tc.function?.name },
115
+ toolIndex: index,
116
+ }
117
+ }
118
+
119
+ // Argument fragments
120
+ if (tc.function?.arguments) {
121
+ yield {
122
+ type: 'tool_delta',
123
+ text: tc.function.arguments,
124
+ toolIndex: index,
125
+ }
126
+ }
127
+ }
128
+ }
129
+
130
+ // Finish reason
131
+ if (choice.finish_reason) {
132
+ if (choice.finish_reason === 'tool_calls') {
133
+ // Emit tool_end for all tracked tools
134
+ for (const idx of seenTools) {
135
+ yield { type: 'tool_end', toolIndex: idx }
136
+ }
137
+ }
138
+
139
+ // Usage in final chunk (if stream_options.include_usage is set)
140
+ if (parsed.usage) {
141
+ yield {
142
+ type: 'usage',
143
+ usage: {
144
+ inputTokens: parsed.usage.prompt_tokens ?? 0,
145
+ outputTokens: parsed.usage.completion_tokens ?? 0,
146
+ totalTokens: parsed.usage.total_tokens ?? 0,
147
+ },
148
+ }
149
+ }
150
+ }
151
+ }
152
+ }
153
+
154
+ async embed(input: string | string[], model?: string): Promise<EmbeddingResponse> {
155
+ const body = {
156
+ input: Array.isArray(input) ? input : [input],
157
+ model: model ?? 'text-embedding-3-small',
158
+ }
159
+
160
+ const response = await fetch(`${this.baseUrl}/v1/embeddings`, {
161
+ method: 'POST',
162
+ headers: this.buildHeaders(),
163
+ body: JSON.stringify(body),
164
+ })
165
+
166
+ if (!response.ok) {
167
+ const text = await response.text()
168
+ throw new ExternalServiceError('OpenAI', response.status, text)
169
+ }
170
+
171
+ const data: any = await response.json()
172
+
173
+ return {
174
+ embeddings: data.data.map((d: any) => d.embedding),
175
+ model: data.model,
176
+ usage: { totalTokens: data.usage?.total_tokens ?? 0 },
177
+ }
178
+ }
179
+
180
+ // ── Private helpers ──────────────────────────────────────────────────────
181
+
182
+ private buildHeaders(): Record<string, string> {
183
+ return {
184
+ 'content-type': 'application/json',
185
+ authorization: `Bearer ${this.apiKey}`,
186
+ }
187
+ }
188
+
189
+ private buildRequestBody(request: CompletionRequest, stream: boolean): Record<string, unknown> {
190
+ const body: Record<string, unknown> = {
191
+ model: request.model ?? this.defaultModel,
192
+ messages: this.mapMessages(request.messages, request.system),
193
+ }
194
+
195
+ if (stream) body.stream = true
196
+ if (request.maxTokens ?? this.defaultMaxTokens) {
197
+ body.max_tokens = request.maxTokens ?? this.defaultMaxTokens
198
+ }
199
+ if (request.temperature !== undefined) body.temperature = request.temperature
200
+ if (request.stopSequences?.length) body.stop = request.stopSequences
201
+
202
+ // Tools
203
+ if (request.tools?.length) {
204
+ body.tools = request.tools.map(t => ({
205
+ type: 'function',
206
+ function: {
207
+ name: t.name,
208
+ description: t.description,
209
+ parameters: t.parameters,
210
+ },
211
+ }))
212
+ }
213
+
214
+ // Tool choice
215
+ if (request.toolChoice) {
216
+ if (typeof request.toolChoice === 'string') {
217
+ body.tool_choice = request.toolChoice
218
+ } else {
219
+ body.tool_choice = {
220
+ type: 'function',
221
+ function: { name: request.toolChoice.name },
222
+ }
223
+ }
224
+ }
225
+
226
+ // Structured output
227
+ if (request.schema) {
228
+ if (this.supportsJsonSchema) {
229
+ body.response_format = {
230
+ type: 'json_schema',
231
+ json_schema: {
232
+ name: 'response',
233
+ schema: request.schema,
234
+ strict: true,
235
+ },
236
+ }
237
+ } else {
238
+ // Fallback for providers that don't support json_schema (e.g. DeepSeek)
239
+ body.response_format = { type: 'json_object' }
240
+ // Inject schema into system prompt so the model knows the expected format
241
+ const schemaHint = `\n\nYou MUST respond with valid JSON matching this schema:\n${JSON.stringify(request.schema, null, 2)}`
242
+ const messages = body.messages as any[]
243
+ if (messages[0]?.role === 'system') {
244
+ messages[0].content += schemaHint
245
+ } else {
246
+ messages.unshift({ role: 'system', content: `Respond with valid JSON.${schemaHint}` })
247
+ }
248
+ }
249
+ }
250
+
251
+ return body
252
+ }
253
+
254
+ private mapMessages(messages: Message[], system?: string): any[] {
255
+ const result: any[] = []
256
+
257
+ // System prompt as first message
258
+ if (system) {
259
+ result.push({ role: 'system', content: system })
260
+ }
261
+
262
+ for (const msg of messages) {
263
+ if (msg.role === 'tool') {
264
+ result.push({
265
+ role: 'tool',
266
+ tool_call_id: msg.toolCallId,
267
+ content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content),
268
+ })
269
+ } else if (msg.role === 'assistant') {
270
+ const mapped: any = {
271
+ role: 'assistant',
272
+ content: typeof msg.content === 'string' ? msg.content : null,
273
+ }
274
+
275
+ if (msg.toolCalls?.length) {
276
+ mapped.tool_calls = msg.toolCalls.map(tc => ({
277
+ id: tc.id,
278
+ type: 'function',
279
+ function: {
280
+ name: tc.name,
281
+ arguments: JSON.stringify(tc.arguments),
282
+ },
283
+ }))
284
+ }
285
+
286
+ result.push(mapped)
287
+ } else {
288
+ result.push({
289
+ role: 'user',
290
+ content: typeof msg.content === 'string' ? msg.content : msg.content,
291
+ })
292
+ }
293
+ }
294
+
295
+ return result
296
+ }
297
+
298
+ private parseResponse(data: any): CompletionResponse {
299
+ const choice = data.choices?.[0]
300
+ const message = choice?.message
301
+
302
+ const content: string = message?.content ?? ''
303
+ const toolCalls: ToolCall[] = []
304
+
305
+ if (message?.tool_calls) {
306
+ for (const tc of message.tool_calls) {
307
+ let args: Record<string, unknown> = {}
308
+ try {
309
+ args = JSON.parse(tc.function.arguments)
310
+ } catch {
311
+ // Invalid JSON from the model — pass as-is in a wrapper
312
+ args = { _raw: tc.function.arguments }
313
+ }
314
+
315
+ toolCalls.push({
316
+ id: tc.id,
317
+ name: tc.function.name,
318
+ arguments: args,
319
+ })
320
+ }
321
+ }
322
+
323
+ const usage: Usage = {
324
+ inputTokens: data.usage?.prompt_tokens ?? 0,
325
+ outputTokens: data.usage?.completion_tokens ?? 0,
326
+ totalTokens: data.usage?.total_tokens ?? 0,
327
+ }
328
+
329
+ let stopReason: CompletionResponse['stopReason'] = 'end'
330
+ switch (choice?.finish_reason) {
331
+ case 'tool_calls':
332
+ stopReason = 'tool_use'
333
+ break
334
+ case 'length':
335
+ stopReason = 'max_tokens'
336
+ break
337
+ case 'stop':
338
+ stopReason = 'end'
339
+ break
340
+ }
341
+
342
+ return {
343
+ id: data.id ?? '',
344
+ content,
345
+ toolCalls,
346
+ stopReason,
347
+ usage,
348
+ raw: data,
349
+ }
350
+ }
351
+ }