@voltx/ai 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +135 -0
- package/dist/index.cjs +972 -0
- package/dist/index.d.cts +326 -0
- package/dist/index.d.ts +326 -0
- package/dist/index.js +925 -0
- package/package.json +38 -0
package/dist/index.js
ADDED
|
@@ -0,0 +1,925 @@
|
|
|
1
|
+
// src/providers/registry.ts
|
|
2
|
+
var providers = /* @__PURE__ */ new Map();
|
|
3
|
+
function registerProvider(name, factory) {
|
|
4
|
+
providers.set(name, factory);
|
|
5
|
+
}
|
|
6
|
+
function getProvider(name, config = {}) {
|
|
7
|
+
const factory = providers.get(name);
|
|
8
|
+
if (!factory) {
|
|
9
|
+
const available = Array.from(providers.keys()).join(", ");
|
|
10
|
+
throw new Error(
|
|
11
|
+
`[voltx/ai] Unknown provider "${name}". Available: ${available}`
|
|
12
|
+
);
|
|
13
|
+
}
|
|
14
|
+
return factory(config);
|
|
15
|
+
}
|
|
16
|
+
function resolveModel(model) {
|
|
17
|
+
if (typeof model !== "string") return model;
|
|
18
|
+
const colonIndex = model.indexOf(":");
|
|
19
|
+
if (colonIndex === -1) {
|
|
20
|
+
return { provider: "openai", model, config: {} };
|
|
21
|
+
}
|
|
22
|
+
const provider = model.slice(0, colonIndex);
|
|
23
|
+
const modelName = model.slice(colonIndex + 1);
|
|
24
|
+
return { provider, model: modelName, config: {} };
|
|
25
|
+
}
|
|
26
|
+
var ENV_KEY_MAP = {
|
|
27
|
+
openai: "OPENAI_API_KEY",
|
|
28
|
+
anthropic: "ANTHROPIC_API_KEY",
|
|
29
|
+
google: "GOOGLE_AI_API_KEY",
|
|
30
|
+
cerebras: "CEREBRAS_API_KEY",
|
|
31
|
+
openrouter: "OPENROUTER_API_KEY",
|
|
32
|
+
ollama: ""
|
|
33
|
+
// no key needed
|
|
34
|
+
};
|
|
35
|
+
function resolveApiKey(provider, explicit) {
|
|
36
|
+
if (explicit) return explicit;
|
|
37
|
+
const envKey = ENV_KEY_MAP[provider];
|
|
38
|
+
if (!envKey) return void 0;
|
|
39
|
+
return process.env[envKey];
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
// src/utils.ts
|
|
43
|
+
function toProviderMessages(messages, system, prompt) {
|
|
44
|
+
const result = [];
|
|
45
|
+
if (system) {
|
|
46
|
+
result.push({ role: "system", content: system });
|
|
47
|
+
}
|
|
48
|
+
if (messages) {
|
|
49
|
+
for (const msg of messages) {
|
|
50
|
+
let content;
|
|
51
|
+
if (typeof msg.content === "string") {
|
|
52
|
+
content = msg.content;
|
|
53
|
+
} else if (Array.isArray(msg.content)) {
|
|
54
|
+
const hasNonText = msg.content.some((p) => p.type !== "text");
|
|
55
|
+
if (hasNonText) {
|
|
56
|
+
result.push({
|
|
57
|
+
role: msg.role,
|
|
58
|
+
content: msg.content,
|
|
59
|
+
tool_calls: msg.tool_calls,
|
|
60
|
+
tool_call_id: msg.tool_call_id
|
|
61
|
+
});
|
|
62
|
+
continue;
|
|
63
|
+
}
|
|
64
|
+
content = msg.content.map((p) => p.type === "text" ? p.text : "").join("");
|
|
65
|
+
} else {
|
|
66
|
+
content = null;
|
|
67
|
+
}
|
|
68
|
+
result.push({
|
|
69
|
+
role: msg.role,
|
|
70
|
+
content,
|
|
71
|
+
tool_calls: msg.tool_calls,
|
|
72
|
+
tool_call_id: msg.tool_call_id
|
|
73
|
+
});
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
if (prompt) {
|
|
77
|
+
result.push({ role: "user", content: prompt });
|
|
78
|
+
}
|
|
79
|
+
return result;
|
|
80
|
+
}
|
|
81
|
+
function toProviderTools(tools) {
|
|
82
|
+
if (!tools) return void 0;
|
|
83
|
+
const entries = Object.entries(tools);
|
|
84
|
+
if (entries.length === 0) return void 0;
|
|
85
|
+
return entries.map(([key, tool]) => ({
|
|
86
|
+
type: "function",
|
|
87
|
+
function: {
|
|
88
|
+
name: tool.name ?? key,
|
|
89
|
+
description: tool.description,
|
|
90
|
+
parameters: isZodType(tool.parameters) ? zodToJsonSchema(tool.parameters) : tool.parameters
|
|
91
|
+
}
|
|
92
|
+
}));
|
|
93
|
+
}
|
|
94
|
+
function isZodType(value) {
|
|
95
|
+
return value !== null && typeof value === "object" && "_def" in value;
|
|
96
|
+
}
|
|
97
|
+
function zodToJsonSchema(schema) {
|
|
98
|
+
const def = schema._def;
|
|
99
|
+
const typeName = def.typeName;
|
|
100
|
+
switch (typeName) {
|
|
101
|
+
case "ZodObject": {
|
|
102
|
+
const shape = def.shape;
|
|
103
|
+
const properties = {};
|
|
104
|
+
const required = [];
|
|
105
|
+
for (const [key, value] of Object.entries(shape())) {
|
|
106
|
+
properties[key] = zodToJsonSchema(value);
|
|
107
|
+
const innerDef = value._def;
|
|
108
|
+
if (innerDef.typeName !== "ZodOptional") {
|
|
109
|
+
required.push(key);
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
return {
|
|
113
|
+
type: "object",
|
|
114
|
+
properties,
|
|
115
|
+
required: Object.keys(properties),
|
|
116
|
+
// strict mode requires ALL properties in required
|
|
117
|
+
additionalProperties: false
|
|
118
|
+
};
|
|
119
|
+
}
|
|
120
|
+
case "ZodString":
|
|
121
|
+
return { type: "string", ...def.description ? { description: def.description } : {} };
|
|
122
|
+
case "ZodNumber":
|
|
123
|
+
return { type: "number" };
|
|
124
|
+
case "ZodBoolean":
|
|
125
|
+
return { type: "boolean" };
|
|
126
|
+
case "ZodArray":
|
|
127
|
+
return { type: "array", items: zodToJsonSchema(def.type) };
|
|
128
|
+
case "ZodEnum":
|
|
129
|
+
return { type: "string", enum: def.values };
|
|
130
|
+
case "ZodOptional":
|
|
131
|
+
return zodToJsonSchema(def.innerType);
|
|
132
|
+
case "ZodDefault":
|
|
133
|
+
return zodToJsonSchema(def.innerType);
|
|
134
|
+
default:
|
|
135
|
+
return { type: "string" };
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
// src/generate-text.ts
|
|
140
|
+
async function generateText(options) {
|
|
141
|
+
const ref = resolveModel(options.model);
|
|
142
|
+
const apiKey = resolveApiKey(ref.provider, ref.config.apiKey);
|
|
143
|
+
const provider = getProvider(ref.provider, { ...ref.config, apiKey });
|
|
144
|
+
const messages = toProviderMessages(options.messages, options.system, options.prompt);
|
|
145
|
+
const tools = toProviderTools(options.tools);
|
|
146
|
+
const response = await provider.chat({
|
|
147
|
+
model: ref.model,
|
|
148
|
+
messages,
|
|
149
|
+
tools,
|
|
150
|
+
temperature: options.temperature,
|
|
151
|
+
maxTokens: options.maxTokens,
|
|
152
|
+
topP: options.topP,
|
|
153
|
+
stop: options.stop,
|
|
154
|
+
signal: options.signal
|
|
155
|
+
});
|
|
156
|
+
const toolCalls = response.toolCalls.map((tc) => {
|
|
157
|
+
let args = {};
|
|
158
|
+
try {
|
|
159
|
+
args = JSON.parse(tc.function.arguments);
|
|
160
|
+
} catch {
|
|
161
|
+
args = { _raw: tc.function.arguments };
|
|
162
|
+
}
|
|
163
|
+
return {
|
|
164
|
+
id: tc.id,
|
|
165
|
+
name: tc.function.name,
|
|
166
|
+
args
|
|
167
|
+
};
|
|
168
|
+
});
|
|
169
|
+
return {
|
|
170
|
+
text: response.text ?? "",
|
|
171
|
+
toolCalls,
|
|
172
|
+
usage: response.usage,
|
|
173
|
+
finishReason: response.finishReason,
|
|
174
|
+
raw: response.raw
|
|
175
|
+
};
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
// src/stream-text.ts
|
|
179
|
+
async function streamText(options) {
|
|
180
|
+
const ref = resolveModel(options.model);
|
|
181
|
+
const apiKey = resolveApiKey(ref.provider, ref.config.apiKey);
|
|
182
|
+
const provider = getProvider(ref.provider, { ...ref.config, apiKey });
|
|
183
|
+
const messages = toProviderMessages(options.messages, options.system, options.prompt);
|
|
184
|
+
const tools = toProviderTools(options.tools);
|
|
185
|
+
const abortController = new AbortController();
|
|
186
|
+
const signal = options.signal ?? abortController.signal;
|
|
187
|
+
const response = await provider.stream({
|
|
188
|
+
model: ref.model,
|
|
189
|
+
messages,
|
|
190
|
+
tools,
|
|
191
|
+
temperature: options.temperature,
|
|
192
|
+
maxTokens: options.maxTokens,
|
|
193
|
+
topP: options.topP,
|
|
194
|
+
stop: options.stop,
|
|
195
|
+
signal
|
|
196
|
+
});
|
|
197
|
+
let fullText = "";
|
|
198
|
+
let finalUsage = null;
|
|
199
|
+
let consumed = false;
|
|
200
|
+
let resolveText;
|
|
201
|
+
let resolveUsage;
|
|
202
|
+
const textPromise = new Promise((resolve) => {
|
|
203
|
+
resolveText = resolve;
|
|
204
|
+
});
|
|
205
|
+
const usagePromise = new Promise((resolve) => {
|
|
206
|
+
resolveUsage = resolve;
|
|
207
|
+
});
|
|
208
|
+
const buffer = [];
|
|
209
|
+
async function* driveStream() {
|
|
210
|
+
if (consumed) {
|
|
211
|
+
for (const chunk of buffer) yield chunk;
|
|
212
|
+
return;
|
|
213
|
+
}
|
|
214
|
+
consumed = true;
|
|
215
|
+
try {
|
|
216
|
+
for await (const chunk of response.stream) {
|
|
217
|
+
if (chunk.type === "text-delta" && chunk.textDelta) {
|
|
218
|
+
fullText += chunk.textDelta;
|
|
219
|
+
buffer.push(chunk.textDelta);
|
|
220
|
+
yield chunk.textDelta;
|
|
221
|
+
}
|
|
222
|
+
if (chunk.type === "finish" && chunk.usage) {
|
|
223
|
+
finalUsage = chunk.usage;
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
} finally {
|
|
227
|
+
resolveText(fullText);
|
|
228
|
+
resolveUsage(finalUsage ?? { promptTokens: 0, completionTokens: 0, totalTokens: 0 });
|
|
229
|
+
}
|
|
230
|
+
}
|
|
231
|
+
return {
|
|
232
|
+
textStream: driveStream(),
|
|
233
|
+
text: textPromise,
|
|
234
|
+
usage: usagePromise,
|
|
235
|
+
abort() {
|
|
236
|
+
abortController.abort();
|
|
237
|
+
},
|
|
238
|
+
toSSEResponse() {
|
|
239
|
+
const encoder = new TextEncoder();
|
|
240
|
+
const source = consumed ? buffer : null;
|
|
241
|
+
const readable = new ReadableStream({
|
|
242
|
+
async start(controller) {
|
|
243
|
+
try {
|
|
244
|
+
if (source) {
|
|
245
|
+
for (const chunk of source) {
|
|
246
|
+
const data = JSON.stringify({ type: "text-delta", textDelta: chunk });
|
|
247
|
+
controller.enqueue(encoder.encode(`data: ${data}
|
|
248
|
+
|
|
249
|
+
`));
|
|
250
|
+
}
|
|
251
|
+
} else {
|
|
252
|
+
for await (const chunk of driveStream()) {
|
|
253
|
+
const data = JSON.stringify({ type: "text-delta", textDelta: chunk });
|
|
254
|
+
controller.enqueue(encoder.encode(`data: ${data}
|
|
255
|
+
|
|
256
|
+
`));
|
|
257
|
+
}
|
|
258
|
+
}
|
|
259
|
+
controller.enqueue(encoder.encode("data: [DONE]\n\n"));
|
|
260
|
+
} catch (err) {
|
|
261
|
+
const msg = err instanceof Error ? err.message : "Unknown error";
|
|
262
|
+
controller.enqueue(encoder.encode(`data: ${JSON.stringify({ type: "error", error: msg })}
|
|
263
|
+
|
|
264
|
+
`));
|
|
265
|
+
} finally {
|
|
266
|
+
controller.close();
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
});
|
|
270
|
+
return new Response(readable, {
|
|
271
|
+
headers: {
|
|
272
|
+
"Content-Type": "text/event-stream",
|
|
273
|
+
"Cache-Control": "no-cache",
|
|
274
|
+
"Connection": "keep-alive"
|
|
275
|
+
}
|
|
276
|
+
});
|
|
277
|
+
},
|
|
278
|
+
toReadableStream() {
|
|
279
|
+
const encoder = new TextEncoder();
|
|
280
|
+
return new ReadableStream({
|
|
281
|
+
async start(controller) {
|
|
282
|
+
try {
|
|
283
|
+
for await (const chunk of driveStream()) {
|
|
284
|
+
controller.enqueue(encoder.encode(chunk));
|
|
285
|
+
}
|
|
286
|
+
} finally {
|
|
287
|
+
controller.close();
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
});
|
|
291
|
+
}
|
|
292
|
+
};
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
// src/generate-object.ts
|
|
296
|
+
async function generateObject(options) {
|
|
297
|
+
const ref = resolveModel(options.model);
|
|
298
|
+
const apiKey = resolveApiKey(ref.provider, ref.config.apiKey);
|
|
299
|
+
const provider = getProvider(ref.provider, { ...ref.config, apiKey });
|
|
300
|
+
const messages = toProviderMessages(options.messages, options.system, options.prompt);
|
|
301
|
+
const jsonSchema = zodToJsonSchema(options.schema);
|
|
302
|
+
const systemMsg = messages.find((m) => m.role === "system");
|
|
303
|
+
const jsonInstruction = `
|
|
304
|
+
|
|
305
|
+
Respond with a valid JSON object matching this schema:
|
|
306
|
+
${JSON.stringify(jsonSchema, null, 2)}`;
|
|
307
|
+
if (systemMsg) {
|
|
308
|
+
systemMsg.content = (systemMsg.content ?? "") + jsonInstruction;
|
|
309
|
+
} else {
|
|
310
|
+
messages.unshift({ role: "system", content: jsonInstruction });
|
|
311
|
+
}
|
|
312
|
+
const responseFormat = ref.provider === "anthropic" || ref.provider === "ollama" ? void 0 : {
|
|
313
|
+
type: "json_schema",
|
|
314
|
+
json_schema: {
|
|
315
|
+
name: options.schemaName ?? "response",
|
|
316
|
+
strict: true,
|
|
317
|
+
schema: jsonSchema
|
|
318
|
+
}
|
|
319
|
+
};
|
|
320
|
+
const response = await provider.chat({
|
|
321
|
+
model: ref.model,
|
|
322
|
+
messages,
|
|
323
|
+
temperature: options.temperature,
|
|
324
|
+
maxTokens: options.maxTokens,
|
|
325
|
+
responseFormat,
|
|
326
|
+
signal: options.signal
|
|
327
|
+
});
|
|
328
|
+
const text = response.text ?? "";
|
|
329
|
+
let parsed;
|
|
330
|
+
try {
|
|
331
|
+
const jsonMatch = text.match(/```(?:json)?\s*([\s\S]*?)```/) ?? [null, text];
|
|
332
|
+
parsed = JSON.parse(jsonMatch[1].trim());
|
|
333
|
+
} catch {
|
|
334
|
+
throw new Error(
|
|
335
|
+
`[voltx/ai] Failed to parse JSON from model response. Raw text: ${text.slice(0, 200)}`
|
|
336
|
+
);
|
|
337
|
+
}
|
|
338
|
+
const validated = options.schema.safeParse(parsed);
|
|
339
|
+
if (!validated.success) {
|
|
340
|
+
throw new Error(
|
|
341
|
+
`[voltx/ai] Schema validation failed: ${validated.error.message}`
|
|
342
|
+
);
|
|
343
|
+
}
|
|
344
|
+
return {
|
|
345
|
+
object: validated.data,
|
|
346
|
+
usage: response.usage,
|
|
347
|
+
raw: response.raw
|
|
348
|
+
};
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
// src/embed.ts
|
|
352
|
+
async function embed(options) {
|
|
353
|
+
const ref = resolveModel(options.model);
|
|
354
|
+
const apiKey = resolveApiKey(ref.provider, ref.config.apiKey);
|
|
355
|
+
const provider = getProvider(ref.provider, { ...ref.config, apiKey });
|
|
356
|
+
if (!provider.embed) {
|
|
357
|
+
throw new Error(
|
|
358
|
+
`[voltx/ai] Provider "${ref.provider}" does not support embeddings. Use openai, google, or ollama.`
|
|
359
|
+
);
|
|
360
|
+
}
|
|
361
|
+
const response = await provider.embed({
|
|
362
|
+
model: ref.model,
|
|
363
|
+
input: options.value
|
|
364
|
+
});
|
|
365
|
+
return {
|
|
366
|
+
embedding: response.embeddings[0],
|
|
367
|
+
usage: response.usage
|
|
368
|
+
};
|
|
369
|
+
}
|
|
370
|
+
async function embedMany(options) {
|
|
371
|
+
const ref = resolveModel(options.model);
|
|
372
|
+
const apiKey = resolveApiKey(ref.provider, ref.config.apiKey);
|
|
373
|
+
const provider = getProvider(ref.provider, { ...ref.config, apiKey });
|
|
374
|
+
if (!provider.embed) {
|
|
375
|
+
throw new Error(
|
|
376
|
+
`[voltx/ai] Provider "${ref.provider}" does not support embeddings. Use openai, google, or ollama.`
|
|
377
|
+
);
|
|
378
|
+
}
|
|
379
|
+
const response = await provider.embed({
|
|
380
|
+
model: ref.model,
|
|
381
|
+
input: options.values
|
|
382
|
+
});
|
|
383
|
+
return {
|
|
384
|
+
embeddings: response.embeddings,
|
|
385
|
+
usage: response.usage
|
|
386
|
+
};
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
// src/providers/openai-compatible.ts
|
|
390
|
+
function createOpenAICompatibleProvider(cfg) {
|
|
391
|
+
const baseUrl = (cfg.baseUrl ?? cfg.defaultBaseUrl).replace(/\/$/, "");
|
|
392
|
+
function buildHeaders() {
|
|
393
|
+
const headers = {
|
|
394
|
+
"Content-Type": "application/json",
|
|
395
|
+
...cfg.headers
|
|
396
|
+
};
|
|
397
|
+
if (cfg.apiKey) {
|
|
398
|
+
headers["Authorization"] = `Bearer ${cfg.apiKey}`;
|
|
399
|
+
}
|
|
400
|
+
return headers;
|
|
401
|
+
}
|
|
402
|
+
function buildBody(options, stream = false) {
|
|
403
|
+
const body = {
|
|
404
|
+
model: options.model,
|
|
405
|
+
messages: options.messages,
|
|
406
|
+
stream
|
|
407
|
+
};
|
|
408
|
+
if (options.temperature !== void 0) body.temperature = options.temperature;
|
|
409
|
+
if (options.maxTokens !== void 0) body.max_tokens = options.maxTokens;
|
|
410
|
+
if (options.topP !== void 0) body.top_p = options.topP;
|
|
411
|
+
if (options.stop) body.stop = options.stop;
|
|
412
|
+
if (options.tools && options.tools.length > 0) body.tools = options.tools;
|
|
413
|
+
if (options.responseFormat && cfg.supportsJsonSchema !== false) {
|
|
414
|
+
body.response_format = options.responseFormat;
|
|
415
|
+
}
|
|
416
|
+
if (stream && cfg.supportsStreamOptions !== false) {
|
|
417
|
+
body.stream_options = { include_usage: true };
|
|
418
|
+
}
|
|
419
|
+
return body;
|
|
420
|
+
}
|
|
421
|
+
function mapFinishReason(reason) {
|
|
422
|
+
switch (reason) {
|
|
423
|
+
case "stop":
|
|
424
|
+
return "stop";
|
|
425
|
+
case "length":
|
|
426
|
+
return "length";
|
|
427
|
+
case "tool_calls":
|
|
428
|
+
return "tool_calls";
|
|
429
|
+
case "content_filter":
|
|
430
|
+
return "content_filter";
|
|
431
|
+
default:
|
|
432
|
+
return "unknown";
|
|
433
|
+
}
|
|
434
|
+
}
|
|
435
|
+
const provider = {
|
|
436
|
+
name: cfg.name,
|
|
437
|
+
async chat(options) {
|
|
438
|
+
const res = await fetch(`${baseUrl}/chat/completions`, {
|
|
439
|
+
method: "POST",
|
|
440
|
+
headers: buildHeaders(),
|
|
441
|
+
body: JSON.stringify(buildBody(options, false)),
|
|
442
|
+
signal: options.signal
|
|
443
|
+
});
|
|
444
|
+
if (!res.ok) {
|
|
445
|
+
const errorBody = await res.text().catch(() => "Unknown error");
|
|
446
|
+
throw new Error(`[voltx/ai] ${cfg.name} API error (${res.status}): ${errorBody}`);
|
|
447
|
+
}
|
|
448
|
+
const data = await res.json();
|
|
449
|
+
const choices = data.choices;
|
|
450
|
+
const choice = choices?.[0] ?? {};
|
|
451
|
+
const message = choice.message ?? {};
|
|
452
|
+
const usage = data.usage ?? {};
|
|
453
|
+
return {
|
|
454
|
+
text: message.content ?? null,
|
|
455
|
+
toolCalls: message.tool_calls ?? [],
|
|
456
|
+
usage: {
|
|
457
|
+
promptTokens: usage.prompt_tokens ?? 0,
|
|
458
|
+
completionTokens: usage.completion_tokens ?? 0,
|
|
459
|
+
totalTokens: usage.total_tokens ?? 0
|
|
460
|
+
},
|
|
461
|
+
finishReason: mapFinishReason(choice.finish_reason),
|
|
462
|
+
raw: data
|
|
463
|
+
};
|
|
464
|
+
},
|
|
465
|
+
async stream(options) {
|
|
466
|
+
const res = await fetch(`${baseUrl}/chat/completions`, {
|
|
467
|
+
method: "POST",
|
|
468
|
+
headers: buildHeaders(),
|
|
469
|
+
body: JSON.stringify(buildBody(options, true)),
|
|
470
|
+
signal: options.signal
|
|
471
|
+
});
|
|
472
|
+
if (!res.ok) {
|
|
473
|
+
const errorBody = await res.text().catch(() => "Unknown error");
|
|
474
|
+
throw new Error(`[voltx/ai] ${cfg.name} streaming error (${res.status}): ${errorBody}`);
|
|
475
|
+
}
|
|
476
|
+
const body = res.body;
|
|
477
|
+
if (!body) throw new Error(`[voltx/ai] ${cfg.name} returned no stream body`);
|
|
478
|
+
async function* parseSSEStream() {
|
|
479
|
+
const reader = body.getReader();
|
|
480
|
+
const decoder = new TextDecoder();
|
|
481
|
+
let buffer = "";
|
|
482
|
+
const toolCallAccumulator = /* @__PURE__ */ new Map();
|
|
483
|
+
try {
|
|
484
|
+
while (true) {
|
|
485
|
+
const { done, value } = await reader.read();
|
|
486
|
+
if (done) break;
|
|
487
|
+
buffer += decoder.decode(value, { stream: true });
|
|
488
|
+
const lines = buffer.split("\n");
|
|
489
|
+
buffer = lines.pop() ?? "";
|
|
490
|
+
for (const line of lines) {
|
|
491
|
+
const trimmed = line.trim();
|
|
492
|
+
if (!trimmed || !trimmed.startsWith("data: ")) continue;
|
|
493
|
+
const payload = trimmed.slice(6);
|
|
494
|
+
if (payload === "[DONE]") return;
|
|
495
|
+
try {
|
|
496
|
+
const chunk = JSON.parse(payload);
|
|
497
|
+
const choices = chunk.choices;
|
|
498
|
+
const delta = choices?.[0]?.delta;
|
|
499
|
+
const finishReason = choices?.[0]?.finish_reason;
|
|
500
|
+
if (delta?.content) {
|
|
501
|
+
yield { type: "text-delta", textDelta: delta.content };
|
|
502
|
+
}
|
|
503
|
+
if (delta?.tool_calls) {
|
|
504
|
+
const tcArray = delta.tool_calls;
|
|
505
|
+
for (const tc of tcArray) {
|
|
506
|
+
const index = tc.index ?? 0;
|
|
507
|
+
const fn = tc.function;
|
|
508
|
+
if (!toolCallAccumulator.has(index)) {
|
|
509
|
+
toolCallAccumulator.set(index, { id: tc.id ?? "", name: fn?.name ?? "", arguments: "" });
|
|
510
|
+
}
|
|
511
|
+
const acc = toolCallAccumulator.get(index);
|
|
512
|
+
if (tc.id) acc.id = tc.id;
|
|
513
|
+
if (fn?.name) acc.name = fn.name;
|
|
514
|
+
if (fn?.arguments) acc.arguments += fn.arguments;
|
|
515
|
+
}
|
|
516
|
+
}
|
|
517
|
+
const usage = chunk.usage;
|
|
518
|
+
if (finishReason) {
|
|
519
|
+
if (finishReason === "tool_calls") {
|
|
520
|
+
for (const [, tc] of toolCallAccumulator) {
|
|
521
|
+
yield {
|
|
522
|
+
type: "tool-call-delta",
|
|
523
|
+
toolCallDelta: {
|
|
524
|
+
id: tc.id,
|
|
525
|
+
type: "function",
|
|
526
|
+
function: { name: tc.name, arguments: tc.arguments }
|
|
527
|
+
}
|
|
528
|
+
};
|
|
529
|
+
}
|
|
530
|
+
}
|
|
531
|
+
yield {
|
|
532
|
+
type: "finish",
|
|
533
|
+
finishReason: mapFinishReason(finishReason),
|
|
534
|
+
usage: usage ? {
|
|
535
|
+
promptTokens: usage.prompt_tokens ?? 0,
|
|
536
|
+
completionTokens: usage.completion_tokens ?? 0,
|
|
537
|
+
totalTokens: usage.total_tokens ?? 0
|
|
538
|
+
} : void 0
|
|
539
|
+
};
|
|
540
|
+
}
|
|
541
|
+
} catch {
|
|
542
|
+
}
|
|
543
|
+
}
|
|
544
|
+
}
|
|
545
|
+
} finally {
|
|
546
|
+
reader.releaseLock();
|
|
547
|
+
}
|
|
548
|
+
}
|
|
549
|
+
return { stream: parseSSEStream() };
|
|
550
|
+
}
|
|
551
|
+
};
|
|
552
|
+
if (cfg.supportsEmbeddings) {
|
|
553
|
+
provider.embed = async (options) => {
|
|
554
|
+
const input = Array.isArray(options.input) ? options.input : [options.input];
|
|
555
|
+
const res = await fetch(`${baseUrl}/embeddings`, {
|
|
556
|
+
method: "POST",
|
|
557
|
+
headers: buildHeaders(),
|
|
558
|
+
body: JSON.stringify({ model: options.model, input })
|
|
559
|
+
});
|
|
560
|
+
if (!res.ok) {
|
|
561
|
+
const errorBody = await res.text().catch(() => "Unknown error");
|
|
562
|
+
throw new Error(`[voltx/ai] ${cfg.name} embeddings error (${res.status}): ${errorBody}`);
|
|
563
|
+
}
|
|
564
|
+
const data = await res.json();
|
|
565
|
+
const embeddings = data.data.map((d) => d.embedding);
|
|
566
|
+
const usage = data.usage ?? {};
|
|
567
|
+
return {
|
|
568
|
+
embeddings,
|
|
569
|
+
usage: { tokens: usage.total_tokens ?? usage.prompt_tokens ?? 0 }
|
|
570
|
+
};
|
|
571
|
+
};
|
|
572
|
+
}
|
|
573
|
+
return provider;
|
|
574
|
+
}
|
|
575
|
+
|
|
576
|
+
// src/providers/openai.ts
|
|
577
|
+
function createOpenAIProvider(config = {}) {
|
|
578
|
+
return createOpenAICompatibleProvider({
|
|
579
|
+
...config,
|
|
580
|
+
name: "openai",
|
|
581
|
+
apiKey: config.apiKey ?? resolveApiKey("openai"),
|
|
582
|
+
defaultBaseUrl: "https://api.openai.com/v1",
|
|
583
|
+
supportsEmbeddings: true,
|
|
584
|
+
supportsStreamOptions: true,
|
|
585
|
+
supportsJsonSchema: true
|
|
586
|
+
});
|
|
587
|
+
}
|
|
588
|
+
function openai(model) {
|
|
589
|
+
return { provider: "openai", model, config: {} };
|
|
590
|
+
}
|
|
591
|
+
|
|
592
|
+
// src/providers/anthropic.ts
|
|
593
|
+
function createAnthropicProvider(config = {}) {
|
|
594
|
+
const apiKey = config.apiKey ?? resolveApiKey("anthropic");
|
|
595
|
+
const baseUrl = (config.baseUrl ?? "https://api.anthropic.com").replace(/\/$/, "");
|
|
596
|
+
function buildHeaders() {
|
|
597
|
+
return {
|
|
598
|
+
"Content-Type": "application/json",
|
|
599
|
+
"x-api-key": apiKey ?? "",
|
|
600
|
+
"anthropic-version": "2023-06-01",
|
|
601
|
+
...config.headers
|
|
602
|
+
};
|
|
603
|
+
}
|
|
604
|
+
function convertMessages(messages) {
|
|
605
|
+
let system;
|
|
606
|
+
const converted = [];
|
|
607
|
+
for (const msg of messages) {
|
|
608
|
+
if (msg.role === "system") {
|
|
609
|
+
system = system ? `${system}
|
|
610
|
+
|
|
611
|
+
${msg.content ?? ""}` : msg.content ?? "";
|
|
612
|
+
continue;
|
|
613
|
+
}
|
|
614
|
+
if (msg.role === "tool") {
|
|
615
|
+
converted.push({
|
|
616
|
+
role: "user",
|
|
617
|
+
content: [{
|
|
618
|
+
type: "tool_result",
|
|
619
|
+
tool_use_id: msg.tool_call_id,
|
|
620
|
+
content: msg.content ?? ""
|
|
621
|
+
}]
|
|
622
|
+
});
|
|
623
|
+
continue;
|
|
624
|
+
}
|
|
625
|
+
if (msg.role === "assistant" && msg.tool_calls && msg.tool_calls.length > 0) {
|
|
626
|
+
const content = [];
|
|
627
|
+
if (msg.content) content.push({ type: "text", text: msg.content });
|
|
628
|
+
for (const tc of msg.tool_calls) {
|
|
629
|
+
content.push({
|
|
630
|
+
type: "tool_use",
|
|
631
|
+
id: tc.id,
|
|
632
|
+
name: tc.function.name,
|
|
633
|
+
input: JSON.parse(tc.function.arguments)
|
|
634
|
+
});
|
|
635
|
+
}
|
|
636
|
+
converted.push({ role: "assistant", content });
|
|
637
|
+
continue;
|
|
638
|
+
}
|
|
639
|
+
converted.push({
|
|
640
|
+
role: msg.role,
|
|
641
|
+
content: msg.content ?? ""
|
|
642
|
+
});
|
|
643
|
+
}
|
|
644
|
+
return { system, messages: converted };
|
|
645
|
+
}
|
|
646
|
+
function convertTools(tools) {
|
|
647
|
+
if (!tools || tools.length === 0) return void 0;
|
|
648
|
+
return tools.map((t) => ({
|
|
649
|
+
name: t.function.name,
|
|
650
|
+
description: t.function.description,
|
|
651
|
+
input_schema: t.function.parameters
|
|
652
|
+
}));
|
|
653
|
+
}
|
|
654
|
+
function mapStopReason(reason) {
|
|
655
|
+
switch (reason) {
|
|
656
|
+
case "end_turn":
|
|
657
|
+
return "stop";
|
|
658
|
+
case "max_tokens":
|
|
659
|
+
return "length";
|
|
660
|
+
case "tool_use":
|
|
661
|
+
return "tool_calls";
|
|
662
|
+
default:
|
|
663
|
+
return "unknown";
|
|
664
|
+
}
|
|
665
|
+
}
|
|
666
|
+
return {
|
|
667
|
+
name: "anthropic",
|
|
668
|
+
async chat(options) {
|
|
669
|
+
const { system, messages } = convertMessages(options.messages);
|
|
670
|
+
const body = {
|
|
671
|
+
model: options.model,
|
|
672
|
+
messages,
|
|
673
|
+
max_tokens: options.maxTokens ?? 4096
|
|
674
|
+
};
|
|
675
|
+
if (system) body.system = system;
|
|
676
|
+
if (options.temperature !== void 0) body.temperature = options.temperature;
|
|
677
|
+
if (options.topP !== void 0) body.top_p = options.topP;
|
|
678
|
+
if (options.stop) body.stop_sequences = options.stop;
|
|
679
|
+
const tools = convertTools(options.tools);
|
|
680
|
+
if (tools) body.tools = tools;
|
|
681
|
+
const res = await fetch(`${baseUrl}/v1/messages`, {
|
|
682
|
+
method: "POST",
|
|
683
|
+
headers: buildHeaders(),
|
|
684
|
+
body: JSON.stringify(body),
|
|
685
|
+
signal: options.signal
|
|
686
|
+
});
|
|
687
|
+
if (!res.ok) {
|
|
688
|
+
const errorBody = await res.text().catch(() => "Unknown error");
|
|
689
|
+
throw new Error(`[voltx/ai] Anthropic API error (${res.status}): ${errorBody}`);
|
|
690
|
+
}
|
|
691
|
+
const data = await res.json();
|
|
692
|
+
const content = data.content;
|
|
693
|
+
const usage = data.usage ?? {};
|
|
694
|
+
let text = "";
|
|
695
|
+
const toolCalls = [];
|
|
696
|
+
for (const block of content) {
|
|
697
|
+
if (block.type === "text") {
|
|
698
|
+
text += block.text;
|
|
699
|
+
} else if (block.type === "tool_use") {
|
|
700
|
+
toolCalls.push({
|
|
701
|
+
id: block.id,
|
|
702
|
+
type: "function",
|
|
703
|
+
function: {
|
|
704
|
+
name: block.name,
|
|
705
|
+
arguments: JSON.stringify(block.input)
|
|
706
|
+
}
|
|
707
|
+
});
|
|
708
|
+
}
|
|
709
|
+
}
|
|
710
|
+
return {
|
|
711
|
+
text: text || null,
|
|
712
|
+
toolCalls,
|
|
713
|
+
usage: {
|
|
714
|
+
promptTokens: usage.input_tokens ?? 0,
|
|
715
|
+
completionTokens: usage.output_tokens ?? 0,
|
|
716
|
+
totalTokens: (usage.input_tokens ?? 0) + (usage.output_tokens ?? 0)
|
|
717
|
+
},
|
|
718
|
+
finishReason: mapStopReason(data.stop_reason),
|
|
719
|
+
raw: data
|
|
720
|
+
};
|
|
721
|
+
},
|
|
722
|
+
async stream(options) {
|
|
723
|
+
const { system, messages } = convertMessages(options.messages);
|
|
724
|
+
const body = {
|
|
725
|
+
model: options.model,
|
|
726
|
+
messages,
|
|
727
|
+
max_tokens: options.maxTokens ?? 4096,
|
|
728
|
+
stream: true
|
|
729
|
+
};
|
|
730
|
+
if (system) body.system = system;
|
|
731
|
+
if (options.temperature !== void 0) body.temperature = options.temperature;
|
|
732
|
+
if (options.topP !== void 0) body.top_p = options.topP;
|
|
733
|
+
if (options.stop) body.stop_sequences = options.stop;
|
|
734
|
+
const tools = convertTools(options.tools);
|
|
735
|
+
if (tools) body.tools = tools;
|
|
736
|
+
const res = await fetch(`${baseUrl}/v1/messages`, {
|
|
737
|
+
method: "POST",
|
|
738
|
+
headers: buildHeaders(),
|
|
739
|
+
body: JSON.stringify(body),
|
|
740
|
+
signal: options.signal
|
|
741
|
+
});
|
|
742
|
+
if (!res.ok) {
|
|
743
|
+
const errorBody = await res.text().catch(() => "Unknown error");
|
|
744
|
+
throw new Error(`[voltx/ai] Anthropic streaming error (${res.status}): ${errorBody}`);
|
|
745
|
+
}
|
|
746
|
+
const reader = res.body.getReader();
|
|
747
|
+
const decoder = new TextDecoder();
|
|
748
|
+
async function* parseStream() {
|
|
749
|
+
let buffer = "";
|
|
750
|
+
let currentToolId = "";
|
|
751
|
+
let currentToolName = "";
|
|
752
|
+
let currentToolArgs = "";
|
|
753
|
+
try {
|
|
754
|
+
while (true) {
|
|
755
|
+
const { done, value } = await reader.read();
|
|
756
|
+
if (done) break;
|
|
757
|
+
buffer += decoder.decode(value, { stream: true });
|
|
758
|
+
const lines = buffer.split("\n");
|
|
759
|
+
buffer = lines.pop() ?? "";
|
|
760
|
+
for (const line of lines) {
|
|
761
|
+
const trimmed = line.trim();
|
|
762
|
+
if (!trimmed.startsWith("data: ")) continue;
|
|
763
|
+
const payload = trimmed.slice(6);
|
|
764
|
+
try {
|
|
765
|
+
const event = JSON.parse(payload);
|
|
766
|
+
const type = event.type;
|
|
767
|
+
if (type === "content_block_start") {
|
|
768
|
+
const contentBlock = event.content_block;
|
|
769
|
+
if (contentBlock?.type === "tool_use") {
|
|
770
|
+
currentToolId = contentBlock.id;
|
|
771
|
+
currentToolName = contentBlock.name;
|
|
772
|
+
currentToolArgs = "";
|
|
773
|
+
}
|
|
774
|
+
} else if (type === "content_block_delta") {
|
|
775
|
+
const delta = event.delta;
|
|
776
|
+
if (delta.type === "text_delta") {
|
|
777
|
+
yield { type: "text-delta", textDelta: delta.text };
|
|
778
|
+
} else if (delta.type === "input_json_delta") {
|
|
779
|
+
currentToolArgs += delta.partial_json ?? "";
|
|
780
|
+
}
|
|
781
|
+
} else if (type === "content_block_stop") {
|
|
782
|
+
if (currentToolId) {
|
|
783
|
+
yield {
|
|
784
|
+
type: "tool-call-delta",
|
|
785
|
+
toolCallDelta: {
|
|
786
|
+
id: currentToolId,
|
|
787
|
+
type: "function",
|
|
788
|
+
function: { name: currentToolName, arguments: currentToolArgs }
|
|
789
|
+
}
|
|
790
|
+
};
|
|
791
|
+
currentToolId = "";
|
|
792
|
+
currentToolName = "";
|
|
793
|
+
currentToolArgs = "";
|
|
794
|
+
}
|
|
795
|
+
} else if (type === "message_delta") {
|
|
796
|
+
const delta = event.delta;
|
|
797
|
+
const usage = event.usage;
|
|
798
|
+
yield {
|
|
799
|
+
type: "finish",
|
|
800
|
+
finishReason: mapStopReason(delta.stop_reason),
|
|
801
|
+
usage: usage ? {
|
|
802
|
+
promptTokens: usage.input_tokens ?? 0,
|
|
803
|
+
completionTokens: usage.output_tokens ?? 0,
|
|
804
|
+
totalTokens: (usage.input_tokens ?? 0) + (usage.output_tokens ?? 0)
|
|
805
|
+
} : void 0
|
|
806
|
+
};
|
|
807
|
+
}
|
|
808
|
+
} catch {
|
|
809
|
+
}
|
|
810
|
+
}
|
|
811
|
+
}
|
|
812
|
+
} finally {
|
|
813
|
+
reader.releaseLock();
|
|
814
|
+
}
|
|
815
|
+
}
|
|
816
|
+
return { stream: parseStream() };
|
|
817
|
+
}
|
|
818
|
+
// Anthropic does not support embeddings
|
|
819
|
+
};
|
|
820
|
+
}
|
|
821
|
+
function anthropic(model) {
|
|
822
|
+
return { provider: "anthropic", model, config: {} };
|
|
823
|
+
}
|
|
824
|
+
|
|
825
|
+
// src/providers/google.ts
|
|
826
|
+
function createGoogleProvider(config = {}) {
|
|
827
|
+
const apiKey = config.apiKey ?? resolveApiKey("google");
|
|
828
|
+
return createOpenAICompatibleProvider({
|
|
829
|
+
...config,
|
|
830
|
+
name: "google",
|
|
831
|
+
apiKey,
|
|
832
|
+
defaultBaseUrl: "https://generativelanguage.googleapis.com/v1beta/openai",
|
|
833
|
+
supportsEmbeddings: true,
|
|
834
|
+
supportsStreamOptions: false,
|
|
835
|
+
supportsJsonSchema: true
|
|
836
|
+
});
|
|
837
|
+
}
|
|
838
|
+
function google(model) {
|
|
839
|
+
return { provider: "google", model, config: {} };
|
|
840
|
+
}
|
|
841
|
+
|
|
842
|
+
// src/providers/cerebras.ts
|
|
843
|
+
function createCerebrasProvider(config = {}) {
|
|
844
|
+
return createOpenAICompatibleProvider({
|
|
845
|
+
...config,
|
|
846
|
+
name: "cerebras",
|
|
847
|
+
apiKey: config.apiKey ?? resolveApiKey("cerebras"),
|
|
848
|
+
defaultBaseUrl: "https://api.cerebras.ai/v1",
|
|
849
|
+
supportsEmbeddings: false,
|
|
850
|
+
supportsStreamOptions: false,
|
|
851
|
+
supportsJsonSchema: true
|
|
852
|
+
});
|
|
853
|
+
}
|
|
854
|
+
function cerebras(model) {
|
|
855
|
+
return { provider: "cerebras", model, config: {} };
|
|
856
|
+
}
|
|
857
|
+
|
|
858
|
+
// src/providers/openrouter.ts
|
|
859
|
+
function createOpenRouterProvider(config = {}) {
|
|
860
|
+
return createOpenAICompatibleProvider({
|
|
861
|
+
...config,
|
|
862
|
+
name: "openrouter",
|
|
863
|
+
apiKey: config.apiKey ?? resolveApiKey("openrouter"),
|
|
864
|
+
defaultBaseUrl: "https://openrouter.ai/api/v1",
|
|
865
|
+
headers: {
|
|
866
|
+
"HTTP-Referer": "https://voltx.co.in",
|
|
867
|
+
"X-Title": "VoltX AI Framework",
|
|
868
|
+
...config.headers
|
|
869
|
+
},
|
|
870
|
+
supportsEmbeddings: false,
|
|
871
|
+
supportsStreamOptions: true,
|
|
872
|
+
supportsJsonSchema: true
|
|
873
|
+
});
|
|
874
|
+
}
|
|
875
|
+
function openrouter(model) {
|
|
876
|
+
return { provider: "openrouter", model, config: {} };
|
|
877
|
+
}
|
|
878
|
+
|
|
879
|
+
// src/providers/ollama.ts
|
|
880
|
+
function createOllamaProvider(config = {}) {
|
|
881
|
+
return createOpenAICompatibleProvider({
|
|
882
|
+
name: "ollama",
|
|
883
|
+
baseUrl: config.baseUrl,
|
|
884
|
+
headers: config.headers,
|
|
885
|
+
defaultBaseUrl: "http://localhost:11434/v1",
|
|
886
|
+
supportsEmbeddings: true,
|
|
887
|
+
supportsStreamOptions: false,
|
|
888
|
+
supportsJsonSchema: false
|
|
889
|
+
});
|
|
890
|
+
}
|
|
891
|
+
function ollama(model) {
|
|
892
|
+
return { provider: "ollama", model, config: {} };
|
|
893
|
+
}
|
|
894
|
+
|
|
895
|
+
// src/index.ts
|
|
896
|
+
registerProvider("openai", createOpenAIProvider);
|
|
897
|
+
registerProvider("anthropic", createAnthropicProvider);
|
|
898
|
+
registerProvider("google", createGoogleProvider);
|
|
899
|
+
registerProvider("cerebras", createCerebrasProvider);
|
|
900
|
+
registerProvider("openrouter", createOpenRouterProvider);
|
|
901
|
+
registerProvider("ollama", createOllamaProvider);
|
|
902
|
+
var VERSION = "0.3.0";
|
|
903
|
+
export {
|
|
904
|
+
VERSION,
|
|
905
|
+
anthropic,
|
|
906
|
+
cerebras,
|
|
907
|
+
createAnthropicProvider,
|
|
908
|
+
createCerebrasProvider,
|
|
909
|
+
createGoogleProvider,
|
|
910
|
+
createOllamaProvider,
|
|
911
|
+
createOpenAIProvider,
|
|
912
|
+
createOpenRouterProvider,
|
|
913
|
+
embed,
|
|
914
|
+
embedMany,
|
|
915
|
+
generateObject,
|
|
916
|
+
generateText,
|
|
917
|
+
getProvider,
|
|
918
|
+
google,
|
|
919
|
+
ollama,
|
|
920
|
+
openai,
|
|
921
|
+
openrouter,
|
|
922
|
+
registerProvider,
|
|
923
|
+
resolveModel,
|
|
924
|
+
streamText
|
|
925
|
+
};
|