@yourgpt/llm-sdk 0.1.0 → 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +61 -40
- package/dist/adapters/index.d.mts +4 -258
- package/dist/adapters/index.d.ts +4 -258
- package/dist/adapters/index.js +0 -113
- package/dist/adapters/index.js.map +1 -1
- package/dist/adapters/index.mjs +1 -112
- package/dist/adapters/index.mjs.map +1 -1
- package/dist/base-D_FyHFKj.d.mts +235 -0
- package/dist/base-D_FyHFKj.d.ts +235 -0
- package/dist/index.d.mts +145 -450
- package/dist/index.d.ts +145 -450
- package/dist/index.js +1837 -307
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1827 -305
- package/dist/index.mjs.map +1 -1
- package/dist/providers/anthropic/index.d.mts +61 -0
- package/dist/providers/anthropic/index.d.ts +61 -0
- package/dist/providers/anthropic/index.js +939 -0
- package/dist/providers/anthropic/index.js.map +1 -0
- package/dist/providers/anthropic/index.mjs +934 -0
- package/dist/providers/anthropic/index.mjs.map +1 -0
- package/dist/providers/azure/index.d.mts +38 -0
- package/dist/providers/azure/index.d.ts +38 -0
- package/dist/providers/azure/index.js +380 -0
- package/dist/providers/azure/index.js.map +1 -0
- package/dist/providers/azure/index.mjs +377 -0
- package/dist/providers/azure/index.mjs.map +1 -0
- package/dist/providers/google/index.d.mts +72 -0
- package/dist/providers/google/index.d.ts +72 -0
- package/dist/providers/google/index.js +790 -0
- package/dist/providers/google/index.js.map +1 -0
- package/dist/providers/google/index.mjs +785 -0
- package/dist/providers/google/index.mjs.map +1 -0
- package/dist/providers/ollama/index.d.mts +24 -0
- package/dist/providers/ollama/index.d.ts +24 -0
- package/dist/providers/ollama/index.js +235 -0
- package/dist/providers/ollama/index.js.map +1 -0
- package/dist/providers/ollama/index.mjs +232 -0
- package/dist/providers/ollama/index.mjs.map +1 -0
- package/dist/providers/openai/index.d.mts +82 -0
- package/dist/providers/openai/index.d.ts +82 -0
- package/dist/providers/openai/index.js +679 -0
- package/dist/providers/openai/index.js.map +1 -0
- package/dist/providers/openai/index.mjs +674 -0
- package/dist/providers/openai/index.mjs.map +1 -0
- package/dist/providers/xai/index.d.mts +78 -0
- package/dist/providers/xai/index.d.ts +78 -0
- package/dist/providers/xai/index.js +671 -0
- package/dist/providers/xai/index.js.map +1 -0
- package/dist/providers/xai/index.mjs +666 -0
- package/dist/providers/xai/index.mjs.map +1 -0
- package/dist/types-BBCZ3Fxy.d.mts +308 -0
- package/dist/types-CdORv1Yu.d.mts +338 -0
- package/dist/types-CdORv1Yu.d.ts +338 -0
- package/dist/types-DcoCaVVC.d.ts +308 -0
- package/package.json +34 -3
package/dist/index.js
CHANGED
|
@@ -4,7 +4,1694 @@ var core = require('@yourgpt/copilot-sdk/core');
|
|
|
4
4
|
var hono = require('hono');
|
|
5
5
|
var cors = require('hono/cors');
|
|
6
6
|
|
|
7
|
-
// src/
|
|
7
|
+
// src/core/tool.ts
|
|
8
|
+
function tool(config) {
|
|
9
|
+
return {
|
|
10
|
+
description: config.description,
|
|
11
|
+
parameters: config.parameters,
|
|
12
|
+
execute: config.execute
|
|
13
|
+
};
|
|
14
|
+
}
|
|
15
|
+
function toolToJsonSchema(toolDef) {
|
|
16
|
+
const schema = zodToJsonSchema(toolDef.parameters);
|
|
17
|
+
return {
|
|
18
|
+
type: "object",
|
|
19
|
+
properties: schema.properties ?? {},
|
|
20
|
+
required: schema.required
|
|
21
|
+
};
|
|
22
|
+
}
|
|
23
|
+
function zodToJsonSchema(schema) {
|
|
24
|
+
const def = schema._def;
|
|
25
|
+
const typeName = def.typeName;
|
|
26
|
+
const description = def.description;
|
|
27
|
+
switch (typeName) {
|
|
28
|
+
case "ZodString":
|
|
29
|
+
return { type: "string", description };
|
|
30
|
+
case "ZodNumber":
|
|
31
|
+
return { type: "number", description };
|
|
32
|
+
case "ZodBoolean":
|
|
33
|
+
return { type: "boolean", description };
|
|
34
|
+
case "ZodEnum": {
|
|
35
|
+
const values = def.values;
|
|
36
|
+
return { type: "string", enum: values, description };
|
|
37
|
+
}
|
|
38
|
+
case "ZodArray": {
|
|
39
|
+
const innerType = def.type;
|
|
40
|
+
return {
|
|
41
|
+
type: "array",
|
|
42
|
+
items: zodToJsonSchema(innerType),
|
|
43
|
+
description
|
|
44
|
+
};
|
|
45
|
+
}
|
|
46
|
+
case "ZodObject": {
|
|
47
|
+
const shape = def.shape;
|
|
48
|
+
const shapeObj = shape();
|
|
49
|
+
const properties = {};
|
|
50
|
+
const required = [];
|
|
51
|
+
for (const [key, value] of Object.entries(shapeObj)) {
|
|
52
|
+
properties[key] = zodToJsonSchema(value);
|
|
53
|
+
const valueDef = value._def;
|
|
54
|
+
const isOptional = valueDef.typeName === "ZodOptional" || valueDef.typeName === "ZodNullable" || valueDef.typeName === "ZodDefault";
|
|
55
|
+
if (!isOptional) {
|
|
56
|
+
required.push(key);
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
return {
|
|
60
|
+
type: "object",
|
|
61
|
+
properties,
|
|
62
|
+
required: required.length > 0 ? required : void 0,
|
|
63
|
+
description
|
|
64
|
+
};
|
|
65
|
+
}
|
|
66
|
+
case "ZodOptional":
|
|
67
|
+
case "ZodNullable": {
|
|
68
|
+
const innerType = def.innerType;
|
|
69
|
+
return zodToJsonSchema(innerType);
|
|
70
|
+
}
|
|
71
|
+
case "ZodDefault": {
|
|
72
|
+
const innerType = def.innerType;
|
|
73
|
+
return zodToJsonSchema(innerType);
|
|
74
|
+
}
|
|
75
|
+
case "ZodLiteral": {
|
|
76
|
+
const value = def.value;
|
|
77
|
+
if (typeof value === "string") {
|
|
78
|
+
return { type: "string", enum: [value], description };
|
|
79
|
+
}
|
|
80
|
+
if (typeof value === "number") {
|
|
81
|
+
return { type: "number", enum: [value], description };
|
|
82
|
+
}
|
|
83
|
+
if (typeof value === "boolean") {
|
|
84
|
+
return { type: "boolean", enum: [value], description };
|
|
85
|
+
}
|
|
86
|
+
return { description };
|
|
87
|
+
}
|
|
88
|
+
case "ZodUnion": {
|
|
89
|
+
const options = def.options;
|
|
90
|
+
const stringLiterals = [];
|
|
91
|
+
for (const option of options) {
|
|
92
|
+
const optDef = option._def;
|
|
93
|
+
if (optDef.typeName === "ZodLiteral" && typeof optDef.value === "string") {
|
|
94
|
+
stringLiterals.push(optDef.value);
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
if (stringLiterals.length === options.length) {
|
|
98
|
+
return { type: "string", enum: stringLiterals, description };
|
|
99
|
+
}
|
|
100
|
+
return zodToJsonSchema(options[0]);
|
|
101
|
+
}
|
|
102
|
+
default:
|
|
103
|
+
return { type: "string", description };
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
function formatToolsForOpenAI(tools) {
|
|
107
|
+
return Object.entries(tools).map(([name, toolDef]) => ({
|
|
108
|
+
type: "function",
|
|
109
|
+
function: {
|
|
110
|
+
name,
|
|
111
|
+
description: toolDef.description,
|
|
112
|
+
parameters: toolToJsonSchema(toolDef)
|
|
113
|
+
}
|
|
114
|
+
}));
|
|
115
|
+
}
|
|
116
|
+
function formatToolsForAnthropic(tools) {
|
|
117
|
+
return Object.entries(tools).map(([name, toolDef]) => ({
|
|
118
|
+
name,
|
|
119
|
+
description: toolDef.description,
|
|
120
|
+
input_schema: toolToJsonSchema(toolDef)
|
|
121
|
+
}));
|
|
122
|
+
}
|
|
123
|
+
function formatToolsForGoogle(tools) {
|
|
124
|
+
return [
|
|
125
|
+
{
|
|
126
|
+
functionDeclarations: Object.entries(tools).map(([name, toolDef]) => ({
|
|
127
|
+
name,
|
|
128
|
+
description: toolDef.description,
|
|
129
|
+
parameters: toolToJsonSchema(toolDef)
|
|
130
|
+
}))
|
|
131
|
+
}
|
|
132
|
+
];
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
// src/core/generate-text.ts
|
|
136
|
+
async function generateText(params) {
|
|
137
|
+
const { model, tools, maxSteps = 1, signal } = params;
|
|
138
|
+
let messages = buildMessages(params);
|
|
139
|
+
const steps = [];
|
|
140
|
+
const allToolCalls = [];
|
|
141
|
+
const allToolResults = [];
|
|
142
|
+
for (let step = 0; step < maxSteps; step++) {
|
|
143
|
+
if (signal?.aborted) {
|
|
144
|
+
throw new Error("Generation aborted");
|
|
145
|
+
}
|
|
146
|
+
const formattedTools = tools ? formatToolsForProvider(tools, model.provider) : void 0;
|
|
147
|
+
const result = await model.doGenerate({
|
|
148
|
+
messages,
|
|
149
|
+
tools: formattedTools,
|
|
150
|
+
temperature: params.temperature,
|
|
151
|
+
maxTokens: params.maxTokens,
|
|
152
|
+
signal
|
|
153
|
+
});
|
|
154
|
+
const stepToolResults = [];
|
|
155
|
+
if (result.toolCalls && result.toolCalls.length > 0 && tools) {
|
|
156
|
+
allToolCalls.push(...result.toolCalls);
|
|
157
|
+
for (const call of result.toolCalls) {
|
|
158
|
+
const toolDef = tools[call.name];
|
|
159
|
+
if (!toolDef) {
|
|
160
|
+
const errorResult = {
|
|
161
|
+
toolCallId: call.id,
|
|
162
|
+
result: { error: `Tool not found: ${call.name}` }
|
|
163
|
+
};
|
|
164
|
+
stepToolResults.push(errorResult);
|
|
165
|
+
allToolResults.push(errorResult);
|
|
166
|
+
continue;
|
|
167
|
+
}
|
|
168
|
+
try {
|
|
169
|
+
const parsedArgs = toolDef.parameters.parse(call.args);
|
|
170
|
+
const toolResult = await toolDef.execute(parsedArgs, {
|
|
171
|
+
toolCallId: call.id,
|
|
172
|
+
abortSignal: signal,
|
|
173
|
+
messages
|
|
174
|
+
});
|
|
175
|
+
const result2 = {
|
|
176
|
+
toolCallId: call.id,
|
|
177
|
+
result: toolResult
|
|
178
|
+
};
|
|
179
|
+
stepToolResults.push(result2);
|
|
180
|
+
allToolResults.push(result2);
|
|
181
|
+
} catch (error) {
|
|
182
|
+
const errorResult = {
|
|
183
|
+
toolCallId: call.id,
|
|
184
|
+
result: {
|
|
185
|
+
error: error instanceof Error ? error.message : "Tool execution failed"
|
|
186
|
+
}
|
|
187
|
+
};
|
|
188
|
+
stepToolResults.push(errorResult);
|
|
189
|
+
allToolResults.push(errorResult);
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
steps.push({
|
|
194
|
+
text: result.text,
|
|
195
|
+
toolCalls: result.toolCalls,
|
|
196
|
+
toolResults: stepToolResults,
|
|
197
|
+
finishReason: result.finishReason,
|
|
198
|
+
usage: result.usage
|
|
199
|
+
});
|
|
200
|
+
if (!result.toolCalls || result.toolCalls.length === 0) {
|
|
201
|
+
break;
|
|
202
|
+
}
|
|
203
|
+
const assistantMessage = {
|
|
204
|
+
role: "assistant",
|
|
205
|
+
content: result.text || null,
|
|
206
|
+
toolCalls: result.toolCalls
|
|
207
|
+
};
|
|
208
|
+
messages = [...messages, assistantMessage];
|
|
209
|
+
for (const tr of stepToolResults) {
|
|
210
|
+
const toolMessage = {
|
|
211
|
+
role: "tool",
|
|
212
|
+
toolCallId: tr.toolCallId,
|
|
213
|
+
content: JSON.stringify(tr.result)
|
|
214
|
+
};
|
|
215
|
+
messages = [...messages, toolMessage];
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
const lastStep = steps[steps.length - 1];
|
|
219
|
+
return {
|
|
220
|
+
text: lastStep?.text ?? "",
|
|
221
|
+
usage: sumUsage(steps),
|
|
222
|
+
finishReason: lastStep?.finishReason ?? "stop",
|
|
223
|
+
steps,
|
|
224
|
+
toolCalls: allToolCalls,
|
|
225
|
+
toolResults: allToolResults,
|
|
226
|
+
response: {
|
|
227
|
+
messages
|
|
228
|
+
}
|
|
229
|
+
};
|
|
230
|
+
}
|
|
231
|
+
function buildMessages(params) {
|
|
232
|
+
const messages = [];
|
|
233
|
+
if (params.system) {
|
|
234
|
+
messages.push({ role: "system", content: params.system });
|
|
235
|
+
}
|
|
236
|
+
if (params.messages) {
|
|
237
|
+
messages.push(...params.messages);
|
|
238
|
+
}
|
|
239
|
+
if (params.prompt) {
|
|
240
|
+
messages.push({ role: "user", content: params.prompt });
|
|
241
|
+
}
|
|
242
|
+
return messages;
|
|
243
|
+
}
|
|
244
|
+
function formatToolsForProvider(tools, provider) {
|
|
245
|
+
switch (provider) {
|
|
246
|
+
case "anthropic":
|
|
247
|
+
return formatToolsForAnthropic(tools);
|
|
248
|
+
case "openai":
|
|
249
|
+
case "xai":
|
|
250
|
+
case "azure":
|
|
251
|
+
default:
|
|
252
|
+
return formatToolsForOpenAI(tools);
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
function sumUsage(steps) {
|
|
256
|
+
return steps.reduce(
|
|
257
|
+
(acc, step) => ({
|
|
258
|
+
promptTokens: acc.promptTokens + (step.usage?.promptTokens ?? 0),
|
|
259
|
+
completionTokens: acc.completionTokens + (step.usage?.completionTokens ?? 0),
|
|
260
|
+
totalTokens: acc.totalTokens + (step.usage?.totalTokens ?? 0)
|
|
261
|
+
}),
|
|
262
|
+
{ promptTokens: 0, completionTokens: 0, totalTokens: 0 }
|
|
263
|
+
);
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
// src/core/stream-text.ts
|
|
267
|
+
async function streamText(params) {
|
|
268
|
+
const { model, tools, maxSteps = 1, signal } = params;
|
|
269
|
+
let fullText = "";
|
|
270
|
+
let finalUsage = {
|
|
271
|
+
promptTokens: 0,
|
|
272
|
+
completionTokens: 0,
|
|
273
|
+
totalTokens: 0
|
|
274
|
+
};
|
|
275
|
+
let finalFinishReason = "stop";
|
|
276
|
+
async function* createFullStream() {
|
|
277
|
+
let messages = buildMessages2(params);
|
|
278
|
+
for (let step = 0; step < maxSteps; step++) {
|
|
279
|
+
yield { type: "step-start", step };
|
|
280
|
+
if (signal?.aborted) {
|
|
281
|
+
yield { type: "error", error: new Error("Stream aborted") };
|
|
282
|
+
return;
|
|
283
|
+
}
|
|
284
|
+
const formattedTools = tools ? formatToolsForProvider2(tools, model.provider) : void 0;
|
|
285
|
+
let stepText = "";
|
|
286
|
+
const toolCalls = [];
|
|
287
|
+
let stepFinishReason = "stop";
|
|
288
|
+
try {
|
|
289
|
+
for await (const chunk of model.doStream({
|
|
290
|
+
messages,
|
|
291
|
+
tools: formattedTools,
|
|
292
|
+
temperature: params.temperature,
|
|
293
|
+
maxTokens: params.maxTokens,
|
|
294
|
+
signal
|
|
295
|
+
})) {
|
|
296
|
+
switch (chunk.type) {
|
|
297
|
+
case "text-delta":
|
|
298
|
+
stepText += chunk.text;
|
|
299
|
+
fullText += chunk.text;
|
|
300
|
+
yield { type: "text-delta", text: chunk.text };
|
|
301
|
+
break;
|
|
302
|
+
case "tool-call":
|
|
303
|
+
toolCalls.push(chunk.toolCall);
|
|
304
|
+
yield {
|
|
305
|
+
type: "tool-call-complete",
|
|
306
|
+
toolCall: chunk.toolCall
|
|
307
|
+
};
|
|
308
|
+
break;
|
|
309
|
+
case "finish":
|
|
310
|
+
stepFinishReason = chunk.finishReason;
|
|
311
|
+
finalFinishReason = chunk.finishReason;
|
|
312
|
+
if (chunk.usage) {
|
|
313
|
+
finalUsage = {
|
|
314
|
+
promptTokens: finalUsage.promptTokens + chunk.usage.promptTokens,
|
|
315
|
+
completionTokens: finalUsage.completionTokens + chunk.usage.completionTokens,
|
|
316
|
+
totalTokens: finalUsage.totalTokens + chunk.usage.totalTokens
|
|
317
|
+
};
|
|
318
|
+
}
|
|
319
|
+
break;
|
|
320
|
+
case "error":
|
|
321
|
+
yield { type: "error", error: chunk.error };
|
|
322
|
+
return;
|
|
323
|
+
}
|
|
324
|
+
}
|
|
325
|
+
} catch (error) {
|
|
326
|
+
yield {
|
|
327
|
+
type: "error",
|
|
328
|
+
error: error instanceof Error ? error : new Error(String(error))
|
|
329
|
+
};
|
|
330
|
+
return;
|
|
331
|
+
}
|
|
332
|
+
yield { type: "step-finish", step, finishReason: stepFinishReason };
|
|
333
|
+
if (toolCalls.length === 0 || !tools) {
|
|
334
|
+
break;
|
|
335
|
+
}
|
|
336
|
+
const assistantMessage = {
|
|
337
|
+
role: "assistant",
|
|
338
|
+
content: stepText || null,
|
|
339
|
+
toolCalls
|
|
340
|
+
};
|
|
341
|
+
messages = [...messages, assistantMessage];
|
|
342
|
+
for (const call of toolCalls) {
|
|
343
|
+
const toolDef = tools[call.name];
|
|
344
|
+
if (!toolDef) {
|
|
345
|
+
const errorResult = { error: `Tool not found: ${call.name}` };
|
|
346
|
+
yield {
|
|
347
|
+
type: "tool-result",
|
|
348
|
+
toolCallId: call.id,
|
|
349
|
+
result: errorResult
|
|
350
|
+
};
|
|
351
|
+
messages = [
|
|
352
|
+
...messages,
|
|
353
|
+
{
|
|
354
|
+
role: "tool",
|
|
355
|
+
toolCallId: call.id,
|
|
356
|
+
content: JSON.stringify(errorResult)
|
|
357
|
+
}
|
|
358
|
+
];
|
|
359
|
+
continue;
|
|
360
|
+
}
|
|
361
|
+
try {
|
|
362
|
+
const parsedArgs = toolDef.parameters.parse(call.args);
|
|
363
|
+
const result = await toolDef.execute(parsedArgs, {
|
|
364
|
+
toolCallId: call.id,
|
|
365
|
+
abortSignal: signal,
|
|
366
|
+
messages
|
|
367
|
+
});
|
|
368
|
+
yield { type: "tool-result", toolCallId: call.id, result };
|
|
369
|
+
messages = [
|
|
370
|
+
...messages,
|
|
371
|
+
{
|
|
372
|
+
role: "tool",
|
|
373
|
+
toolCallId: call.id,
|
|
374
|
+
content: JSON.stringify(result)
|
|
375
|
+
}
|
|
376
|
+
];
|
|
377
|
+
} catch (error) {
|
|
378
|
+
const errorResult = {
|
|
379
|
+
error: error instanceof Error ? error.message : "Tool execution failed"
|
|
380
|
+
};
|
|
381
|
+
yield {
|
|
382
|
+
type: "tool-result",
|
|
383
|
+
toolCallId: call.id,
|
|
384
|
+
result: errorResult
|
|
385
|
+
};
|
|
386
|
+
messages = [
|
|
387
|
+
...messages,
|
|
388
|
+
{
|
|
389
|
+
role: "tool",
|
|
390
|
+
toolCallId: call.id,
|
|
391
|
+
content: JSON.stringify(errorResult)
|
|
392
|
+
}
|
|
393
|
+
];
|
|
394
|
+
}
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
yield {
|
|
398
|
+
type: "finish",
|
|
399
|
+
finishReason: finalFinishReason,
|
|
400
|
+
usage: finalUsage
|
|
401
|
+
};
|
|
402
|
+
}
|
|
403
|
+
async function* createTextStream() {
|
|
404
|
+
for await (const part of createFullStream()) {
|
|
405
|
+
if (part.type === "text-delta") {
|
|
406
|
+
yield part.text;
|
|
407
|
+
}
|
|
408
|
+
}
|
|
409
|
+
}
|
|
410
|
+
let textPromise;
|
|
411
|
+
let usagePromise;
|
|
412
|
+
let finishReasonPromise;
|
|
413
|
+
async function consumeStream() {
|
|
414
|
+
for await (const _ of createFullStream()) {
|
|
415
|
+
}
|
|
416
|
+
}
|
|
417
|
+
return {
|
|
418
|
+
textStream: createTextStream(),
|
|
419
|
+
fullStream: createFullStream(),
|
|
420
|
+
get text() {
|
|
421
|
+
if (!textPromise) {
|
|
422
|
+
textPromise = consumeStream().then(() => fullText);
|
|
423
|
+
}
|
|
424
|
+
return textPromise;
|
|
425
|
+
},
|
|
426
|
+
get usage() {
|
|
427
|
+
if (!usagePromise) {
|
|
428
|
+
usagePromise = consumeStream().then(() => finalUsage);
|
|
429
|
+
}
|
|
430
|
+
return usagePromise;
|
|
431
|
+
},
|
|
432
|
+
get finishReason() {
|
|
433
|
+
if (!finishReasonPromise) {
|
|
434
|
+
finishReasonPromise = consumeStream().then(() => finalFinishReason);
|
|
435
|
+
}
|
|
436
|
+
return finishReasonPromise;
|
|
437
|
+
},
|
|
438
|
+
toTextStreamResponse(options) {
|
|
439
|
+
const stream = createTextStreamReadable(createTextStream());
|
|
440
|
+
return new Response(stream, {
|
|
441
|
+
status: options?.status ?? 200,
|
|
442
|
+
headers: {
|
|
443
|
+
"Content-Type": "text/plain; charset=utf-8",
|
|
444
|
+
"Cache-Control": "no-cache",
|
|
445
|
+
Connection: "keep-alive",
|
|
446
|
+
...options?.headers
|
|
447
|
+
}
|
|
448
|
+
});
|
|
449
|
+
},
|
|
450
|
+
toDataStreamResponse(options) {
|
|
451
|
+
const stream = createDataStreamReadable(createFullStream());
|
|
452
|
+
return new Response(stream, {
|
|
453
|
+
status: options?.status ?? 200,
|
|
454
|
+
headers: {
|
|
455
|
+
"Content-Type": "text/event-stream",
|
|
456
|
+
"Cache-Control": "no-cache",
|
|
457
|
+
Connection: "keep-alive",
|
|
458
|
+
...options?.headers
|
|
459
|
+
}
|
|
460
|
+
});
|
|
461
|
+
}
|
|
462
|
+
};
|
|
463
|
+
}
|
|
464
|
+
function buildMessages2(params) {
|
|
465
|
+
const messages = [];
|
|
466
|
+
if (params.system) {
|
|
467
|
+
messages.push({ role: "system", content: params.system });
|
|
468
|
+
}
|
|
469
|
+
if (params.messages) {
|
|
470
|
+
messages.push(...params.messages);
|
|
471
|
+
}
|
|
472
|
+
if (params.prompt) {
|
|
473
|
+
messages.push({ role: "user", content: params.prompt });
|
|
474
|
+
}
|
|
475
|
+
return messages;
|
|
476
|
+
}
|
|
477
|
+
function formatToolsForProvider2(tools, provider) {
|
|
478
|
+
switch (provider) {
|
|
479
|
+
case "anthropic":
|
|
480
|
+
return formatToolsForAnthropic(tools);
|
|
481
|
+
case "openai":
|
|
482
|
+
case "xai":
|
|
483
|
+
case "azure":
|
|
484
|
+
default:
|
|
485
|
+
return formatToolsForOpenAI(tools);
|
|
486
|
+
}
|
|
487
|
+
}
|
|
488
|
+
function createTextStreamReadable(textStream) {
|
|
489
|
+
const encoder = new TextEncoder();
|
|
490
|
+
return new ReadableStream({
|
|
491
|
+
async start(controller) {
|
|
492
|
+
try {
|
|
493
|
+
for await (const text of textStream) {
|
|
494
|
+
controller.enqueue(encoder.encode(text));
|
|
495
|
+
}
|
|
496
|
+
controller.close();
|
|
497
|
+
} catch (error) {
|
|
498
|
+
controller.error(error);
|
|
499
|
+
}
|
|
500
|
+
}
|
|
501
|
+
});
|
|
502
|
+
}
|
|
503
|
+
function createDataStreamReadable(fullStream) {
|
|
504
|
+
const encoder = new TextEncoder();
|
|
505
|
+
return new ReadableStream({
|
|
506
|
+
async start(controller) {
|
|
507
|
+
try {
|
|
508
|
+
for await (const part of fullStream) {
|
|
509
|
+
const data = JSON.stringify(part);
|
|
510
|
+
controller.enqueue(encoder.encode(`data: ${data}
|
|
511
|
+
|
|
512
|
+
`));
|
|
513
|
+
}
|
|
514
|
+
controller.enqueue(encoder.encode("data: [DONE]\n\n"));
|
|
515
|
+
controller.close();
|
|
516
|
+
} catch (error) {
|
|
517
|
+
const errorData = JSON.stringify({
|
|
518
|
+
type: "error",
|
|
519
|
+
error: error instanceof Error ? error.message : String(error)
|
|
520
|
+
});
|
|
521
|
+
controller.enqueue(encoder.encode(`data: ${errorData}
|
|
522
|
+
|
|
523
|
+
`));
|
|
524
|
+
controller.close();
|
|
525
|
+
}
|
|
526
|
+
}
|
|
527
|
+
});
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
// src/core/types.ts
|
|
531
|
+
var DEFAULT_CAPABILITIES = {
|
|
532
|
+
supportsVision: false,
|
|
533
|
+
supportsTools: true,
|
|
534
|
+
supportsStreaming: true,
|
|
535
|
+
supportsJsonMode: false,
|
|
536
|
+
supportsThinking: false,
|
|
537
|
+
supportsPDF: false,
|
|
538
|
+
maxTokens: 8192,
|
|
539
|
+
supportedImageTypes: []
|
|
540
|
+
};
|
|
541
|
+
|
|
542
|
+
// src/providers/openai/provider.ts
|
|
543
|
+
var OPENAI_MODELS = {
|
|
544
|
+
// GPT-4o series
|
|
545
|
+
"gpt-4o": { vision: true, tools: true, jsonMode: true, maxTokens: 128e3 },
|
|
546
|
+
"gpt-4o-mini": {
|
|
547
|
+
vision: true,
|
|
548
|
+
tools: true,
|
|
549
|
+
jsonMode: true,
|
|
550
|
+
maxTokens: 128e3
|
|
551
|
+
},
|
|
552
|
+
"gpt-4o-2024-11-20": {
|
|
553
|
+
vision: true,
|
|
554
|
+
tools: true,
|
|
555
|
+
jsonMode: true,
|
|
556
|
+
maxTokens: 128e3
|
|
557
|
+
},
|
|
558
|
+
"gpt-4o-2024-08-06": {
|
|
559
|
+
vision: true,
|
|
560
|
+
tools: true,
|
|
561
|
+
jsonMode: true,
|
|
562
|
+
maxTokens: 128e3
|
|
563
|
+
},
|
|
564
|
+
// GPT-4 Turbo
|
|
565
|
+
"gpt-4-turbo": {
|
|
566
|
+
vision: true,
|
|
567
|
+
tools: true,
|
|
568
|
+
jsonMode: true,
|
|
569
|
+
maxTokens: 128e3
|
|
570
|
+
},
|
|
571
|
+
"gpt-4-turbo-preview": {
|
|
572
|
+
vision: false,
|
|
573
|
+
tools: true,
|
|
574
|
+
jsonMode: true,
|
|
575
|
+
maxTokens: 128e3
|
|
576
|
+
},
|
|
577
|
+
// GPT-4
|
|
578
|
+
"gpt-4": { vision: false, tools: true, jsonMode: false, maxTokens: 8192 },
|
|
579
|
+
"gpt-4-32k": {
|
|
580
|
+
vision: false,
|
|
581
|
+
tools: true,
|
|
582
|
+
jsonMode: false,
|
|
583
|
+
maxTokens: 32768
|
|
584
|
+
},
|
|
585
|
+
// GPT-3.5
|
|
586
|
+
"gpt-3.5-turbo": {
|
|
587
|
+
vision: false,
|
|
588
|
+
tools: true,
|
|
589
|
+
jsonMode: true,
|
|
590
|
+
maxTokens: 16385
|
|
591
|
+
},
|
|
592
|
+
// O1 series (reasoning)
|
|
593
|
+
o1: { vision: true, tools: false, jsonMode: false, maxTokens: 128e3 },
|
|
594
|
+
"o1-mini": { vision: true, tools: false, jsonMode: false, maxTokens: 128e3 },
|
|
595
|
+
"o1-preview": {
|
|
596
|
+
vision: true,
|
|
597
|
+
tools: false,
|
|
598
|
+
jsonMode: false,
|
|
599
|
+
maxTokens: 128e3
|
|
600
|
+
},
|
|
601
|
+
// O3 series
|
|
602
|
+
"o3-mini": { vision: true, tools: false, jsonMode: false, maxTokens: 128e3 }
|
|
603
|
+
};
|
|
604
|
+
function openai(modelId, options = {}) {
|
|
605
|
+
const apiKey = options.apiKey ?? process.env.OPENAI_API_KEY;
|
|
606
|
+
const baseURL = options.baseURL ?? "https://api.openai.com/v1";
|
|
607
|
+
let client = null;
|
|
608
|
+
async function getClient() {
|
|
609
|
+
if (!client) {
|
|
610
|
+
const { default: OpenAI } = await import('openai');
|
|
611
|
+
client = new OpenAI({
|
|
612
|
+
apiKey,
|
|
613
|
+
baseURL,
|
|
614
|
+
organization: options.organization,
|
|
615
|
+
defaultHeaders: options.headers
|
|
616
|
+
});
|
|
617
|
+
}
|
|
618
|
+
return client;
|
|
619
|
+
}
|
|
620
|
+
const modelConfig = OPENAI_MODELS[modelId] ?? OPENAI_MODELS["gpt-4o"];
|
|
621
|
+
return {
|
|
622
|
+
provider: "openai",
|
|
623
|
+
modelId,
|
|
624
|
+
capabilities: {
|
|
625
|
+
supportsVision: modelConfig.vision,
|
|
626
|
+
supportsTools: modelConfig.tools,
|
|
627
|
+
supportsStreaming: true,
|
|
628
|
+
supportsJsonMode: modelConfig.jsonMode,
|
|
629
|
+
supportsThinking: false,
|
|
630
|
+
supportsPDF: false,
|
|
631
|
+
maxTokens: modelConfig.maxTokens,
|
|
632
|
+
supportedImageTypes: modelConfig.vision ? ["image/png", "image/jpeg", "image/gif", "image/webp"] : []
|
|
633
|
+
},
|
|
634
|
+
async doGenerate(params) {
|
|
635
|
+
const client2 = await getClient();
|
|
636
|
+
const messages = formatMessagesForOpenAI(params.messages);
|
|
637
|
+
const response = await client2.chat.completions.create({
|
|
638
|
+
model: modelId,
|
|
639
|
+
messages,
|
|
640
|
+
tools: params.tools,
|
|
641
|
+
temperature: params.temperature,
|
|
642
|
+
max_tokens: params.maxTokens
|
|
643
|
+
});
|
|
644
|
+
const choice = response.choices[0];
|
|
645
|
+
const message = choice.message;
|
|
646
|
+
const toolCalls = (message.tool_calls ?? []).map(
|
|
647
|
+
(tc) => ({
|
|
648
|
+
id: tc.id,
|
|
649
|
+
name: tc.function.name,
|
|
650
|
+
args: JSON.parse(tc.function.arguments || "{}")
|
|
651
|
+
})
|
|
652
|
+
);
|
|
653
|
+
return {
|
|
654
|
+
text: message.content ?? "",
|
|
655
|
+
toolCalls,
|
|
656
|
+
finishReason: mapFinishReason(choice.finish_reason),
|
|
657
|
+
usage: {
|
|
658
|
+
promptTokens: response.usage?.prompt_tokens ?? 0,
|
|
659
|
+
completionTokens: response.usage?.completion_tokens ?? 0,
|
|
660
|
+
totalTokens: response.usage?.total_tokens ?? 0
|
|
661
|
+
},
|
|
662
|
+
rawResponse: response
|
|
663
|
+
};
|
|
664
|
+
},
|
|
665
|
+
async *doStream(params) {
|
|
666
|
+
const client2 = await getClient();
|
|
667
|
+
const messages = formatMessagesForOpenAI(params.messages);
|
|
668
|
+
const stream = await client2.chat.completions.create({
|
|
669
|
+
model: modelId,
|
|
670
|
+
messages,
|
|
671
|
+
tools: params.tools,
|
|
672
|
+
temperature: params.temperature,
|
|
673
|
+
max_tokens: params.maxTokens,
|
|
674
|
+
stream: true
|
|
675
|
+
});
|
|
676
|
+
let currentToolCall = null;
|
|
677
|
+
let totalPromptTokens = 0;
|
|
678
|
+
let totalCompletionTokens = 0;
|
|
679
|
+
for await (const chunk of stream) {
|
|
680
|
+
if (params.signal?.aborted) {
|
|
681
|
+
yield { type: "error", error: new Error("Aborted") };
|
|
682
|
+
return;
|
|
683
|
+
}
|
|
684
|
+
const choice = chunk.choices[0];
|
|
685
|
+
const delta = choice?.delta;
|
|
686
|
+
if (delta?.content) {
|
|
687
|
+
yield { type: "text-delta", text: delta.content };
|
|
688
|
+
}
|
|
689
|
+
if (delta?.tool_calls) {
|
|
690
|
+
for (const tc of delta.tool_calls) {
|
|
691
|
+
if (tc.id) {
|
|
692
|
+
if (currentToolCall) {
|
|
693
|
+
yield {
|
|
694
|
+
type: "tool-call",
|
|
695
|
+
toolCall: {
|
|
696
|
+
id: currentToolCall.id,
|
|
697
|
+
name: currentToolCall.name,
|
|
698
|
+
args: JSON.parse(currentToolCall.arguments || "{}")
|
|
699
|
+
}
|
|
700
|
+
};
|
|
701
|
+
}
|
|
702
|
+
currentToolCall = {
|
|
703
|
+
id: tc.id,
|
|
704
|
+
name: tc.function?.name ?? "",
|
|
705
|
+
arguments: tc.function?.arguments ?? ""
|
|
706
|
+
};
|
|
707
|
+
} else if (currentToolCall && tc.function?.arguments) {
|
|
708
|
+
currentToolCall.arguments += tc.function.arguments;
|
|
709
|
+
}
|
|
710
|
+
}
|
|
711
|
+
}
|
|
712
|
+
if (choice?.finish_reason) {
|
|
713
|
+
if (currentToolCall) {
|
|
714
|
+
yield {
|
|
715
|
+
type: "tool-call",
|
|
716
|
+
toolCall: {
|
|
717
|
+
id: currentToolCall.id,
|
|
718
|
+
name: currentToolCall.name,
|
|
719
|
+
args: JSON.parse(currentToolCall.arguments || "{}")
|
|
720
|
+
}
|
|
721
|
+
};
|
|
722
|
+
currentToolCall = null;
|
|
723
|
+
}
|
|
724
|
+
if (chunk.usage) {
|
|
725
|
+
totalPromptTokens = chunk.usage.prompt_tokens;
|
|
726
|
+
totalCompletionTokens = chunk.usage.completion_tokens;
|
|
727
|
+
}
|
|
728
|
+
yield {
|
|
729
|
+
type: "finish",
|
|
730
|
+
finishReason: mapFinishReason(choice.finish_reason),
|
|
731
|
+
usage: {
|
|
732
|
+
promptTokens: totalPromptTokens,
|
|
733
|
+
completionTokens: totalCompletionTokens,
|
|
734
|
+
totalTokens: totalPromptTokens + totalCompletionTokens
|
|
735
|
+
}
|
|
736
|
+
};
|
|
737
|
+
}
|
|
738
|
+
}
|
|
739
|
+
}
|
|
740
|
+
};
|
|
741
|
+
}
|
|
742
|
+
function mapFinishReason(reason) {
|
|
743
|
+
switch (reason) {
|
|
744
|
+
case "stop":
|
|
745
|
+
return "stop";
|
|
746
|
+
case "length":
|
|
747
|
+
return "length";
|
|
748
|
+
case "tool_calls":
|
|
749
|
+
case "function_call":
|
|
750
|
+
return "tool-calls";
|
|
751
|
+
case "content_filter":
|
|
752
|
+
return "content-filter";
|
|
753
|
+
default:
|
|
754
|
+
return "unknown";
|
|
755
|
+
}
|
|
756
|
+
}
|
|
757
|
+
function formatMessagesForOpenAI(messages) {
|
|
758
|
+
return messages.map((msg) => {
|
|
759
|
+
switch (msg.role) {
|
|
760
|
+
case "system":
|
|
761
|
+
return { role: "system", content: msg.content };
|
|
762
|
+
case "user":
|
|
763
|
+
if (typeof msg.content === "string") {
|
|
764
|
+
return { role: "user", content: msg.content };
|
|
765
|
+
}
|
|
766
|
+
return {
|
|
767
|
+
role: "user",
|
|
768
|
+
content: msg.content.map((part) => {
|
|
769
|
+
if (part.type === "text") {
|
|
770
|
+
return { type: "text", text: part.text };
|
|
771
|
+
}
|
|
772
|
+
if (part.type === "image") {
|
|
773
|
+
const imageData = typeof part.image === "string" ? part.image : Buffer.from(part.image).toString("base64");
|
|
774
|
+
const url = imageData.startsWith("data:") ? imageData : `data:${part.mimeType ?? "image/png"};base64,${imageData}`;
|
|
775
|
+
return { type: "image_url", image_url: { url, detail: "auto" } };
|
|
776
|
+
}
|
|
777
|
+
return { type: "text", text: "" };
|
|
778
|
+
})
|
|
779
|
+
};
|
|
780
|
+
case "assistant":
|
|
781
|
+
const assistantMsg = {
|
|
782
|
+
role: "assistant",
|
|
783
|
+
content: msg.content
|
|
784
|
+
};
|
|
785
|
+
if (msg.toolCalls && msg.toolCalls.length > 0) {
|
|
786
|
+
assistantMsg.tool_calls = msg.toolCalls.map((tc) => ({
|
|
787
|
+
id: tc.id,
|
|
788
|
+
type: "function",
|
|
789
|
+
function: {
|
|
790
|
+
name: tc.name,
|
|
791
|
+
arguments: JSON.stringify(tc.args)
|
|
792
|
+
}
|
|
793
|
+
}));
|
|
794
|
+
}
|
|
795
|
+
return assistantMsg;
|
|
796
|
+
case "tool":
|
|
797
|
+
return {
|
|
798
|
+
role: "tool",
|
|
799
|
+
tool_call_id: msg.toolCallId,
|
|
800
|
+
content: msg.content
|
|
801
|
+
};
|
|
802
|
+
default:
|
|
803
|
+
return msg;
|
|
804
|
+
}
|
|
805
|
+
});
|
|
806
|
+
}
|
|
807
|
+
|
|
808
|
+
// src/providers/anthropic/provider.ts
|
|
809
|
+
var ANTHROPIC_MODELS = {
|
|
810
|
+
// Claude 4 series
|
|
811
|
+
"claude-sonnet-4-20250514": {
|
|
812
|
+
vision: true,
|
|
813
|
+
tools: true,
|
|
814
|
+
thinking: true,
|
|
815
|
+
pdf: true,
|
|
816
|
+
maxTokens: 2e5
|
|
817
|
+
},
|
|
818
|
+
"claude-opus-4-20250514": {
|
|
819
|
+
vision: true,
|
|
820
|
+
tools: true,
|
|
821
|
+
thinking: true,
|
|
822
|
+
pdf: true,
|
|
823
|
+
maxTokens: 2e5
|
|
824
|
+
},
|
|
825
|
+
// Claude 3.7 series
|
|
826
|
+
"claude-3-7-sonnet-20250219": {
|
|
827
|
+
vision: true,
|
|
828
|
+
tools: true,
|
|
829
|
+
thinking: true,
|
|
830
|
+
pdf: true,
|
|
831
|
+
maxTokens: 2e5
|
|
832
|
+
},
|
|
833
|
+
"claude-3-7-sonnet-latest": {
|
|
834
|
+
vision: true,
|
|
835
|
+
tools: true,
|
|
836
|
+
thinking: true,
|
|
837
|
+
pdf: true,
|
|
838
|
+
maxTokens: 2e5
|
|
839
|
+
},
|
|
840
|
+
// Claude 3.5 series
|
|
841
|
+
"claude-3-5-sonnet-20241022": {
|
|
842
|
+
vision: true,
|
|
843
|
+
tools: true,
|
|
844
|
+
thinking: false,
|
|
845
|
+
pdf: true,
|
|
846
|
+
maxTokens: 2e5
|
|
847
|
+
},
|
|
848
|
+
"claude-3-5-sonnet-latest": {
|
|
849
|
+
vision: true,
|
|
850
|
+
tools: true,
|
|
851
|
+
thinking: false,
|
|
852
|
+
pdf: true,
|
|
853
|
+
maxTokens: 2e5
|
|
854
|
+
},
|
|
855
|
+
"claude-3-5-haiku-20241022": {
|
|
856
|
+
vision: true,
|
|
857
|
+
tools: true,
|
|
858
|
+
thinking: false,
|
|
859
|
+
pdf: false,
|
|
860
|
+
maxTokens: 2e5
|
|
861
|
+
},
|
|
862
|
+
"claude-3-5-haiku-latest": {
|
|
863
|
+
vision: true,
|
|
864
|
+
tools: true,
|
|
865
|
+
thinking: false,
|
|
866
|
+
pdf: false,
|
|
867
|
+
maxTokens: 2e5
|
|
868
|
+
},
|
|
869
|
+
// Claude 3 series
|
|
870
|
+
"claude-3-opus-20240229": {
|
|
871
|
+
vision: true,
|
|
872
|
+
tools: true,
|
|
873
|
+
thinking: false,
|
|
874
|
+
pdf: false,
|
|
875
|
+
maxTokens: 2e5
|
|
876
|
+
},
|
|
877
|
+
"claude-3-sonnet-20240229": {
|
|
878
|
+
vision: true,
|
|
879
|
+
tools: true,
|
|
880
|
+
thinking: false,
|
|
881
|
+
pdf: false,
|
|
882
|
+
maxTokens: 2e5
|
|
883
|
+
},
|
|
884
|
+
"claude-3-haiku-20240307": {
|
|
885
|
+
vision: true,
|
|
886
|
+
tools: true,
|
|
887
|
+
thinking: false,
|
|
888
|
+
pdf: false,
|
|
889
|
+
maxTokens: 2e5
|
|
890
|
+
}
|
|
891
|
+
};
|
|
892
|
+
function anthropic(modelId, options = {}) {
|
|
893
|
+
const apiKey = options.apiKey ?? process.env.ANTHROPIC_API_KEY;
|
|
894
|
+
let client = null;
|
|
895
|
+
async function getClient() {
|
|
896
|
+
if (!client) {
|
|
897
|
+
const { default: Anthropic } = await import('@anthropic-ai/sdk');
|
|
898
|
+
client = new Anthropic({
|
|
899
|
+
apiKey,
|
|
900
|
+
baseURL: options.baseURL
|
|
901
|
+
});
|
|
902
|
+
}
|
|
903
|
+
return client;
|
|
904
|
+
}
|
|
905
|
+
const modelConfig = ANTHROPIC_MODELS[modelId] ?? ANTHROPIC_MODELS["claude-3-5-sonnet-latest"];
|
|
906
|
+
return {
|
|
907
|
+
provider: "anthropic",
|
|
908
|
+
modelId,
|
|
909
|
+
capabilities: {
|
|
910
|
+
supportsVision: modelConfig.vision,
|
|
911
|
+
supportsTools: modelConfig.tools,
|
|
912
|
+
supportsStreaming: true,
|
|
913
|
+
supportsJsonMode: false,
|
|
914
|
+
supportsThinking: modelConfig.thinking,
|
|
915
|
+
supportsPDF: modelConfig.pdf,
|
|
916
|
+
maxTokens: modelConfig.maxTokens,
|
|
917
|
+
supportedImageTypes: modelConfig.vision ? ["image/png", "image/jpeg", "image/gif", "image/webp"] : []
|
|
918
|
+
},
|
|
919
|
+
async doGenerate(params) {
|
|
920
|
+
const client2 = await getClient();
|
|
921
|
+
const { system, messages } = formatMessagesForAnthropic(params.messages);
|
|
922
|
+
const requestOptions = {
|
|
923
|
+
model: modelId,
|
|
924
|
+
max_tokens: params.maxTokens ?? 4096,
|
|
925
|
+
system: system || void 0,
|
|
926
|
+
messages,
|
|
927
|
+
tools: params.tools
|
|
928
|
+
};
|
|
929
|
+
if (params.temperature !== void 0) {
|
|
930
|
+
requestOptions.temperature = params.temperature;
|
|
931
|
+
}
|
|
932
|
+
if (options.thinking?.enabled && modelConfig.thinking) {
|
|
933
|
+
requestOptions.thinking = {
|
|
934
|
+
type: "enabled",
|
|
935
|
+
budget_tokens: options.thinking.budgetTokens ?? 1e4
|
|
936
|
+
};
|
|
937
|
+
}
|
|
938
|
+
const response = await client2.messages.create(requestOptions);
|
|
939
|
+
let text = "";
|
|
940
|
+
const toolCalls = [];
|
|
941
|
+
for (const block of response.content) {
|
|
942
|
+
if (block.type === "text") {
|
|
943
|
+
text += block.text;
|
|
944
|
+
} else if (block.type === "tool_use") {
|
|
945
|
+
toolCalls.push({
|
|
946
|
+
id: block.id,
|
|
947
|
+
name: block.name,
|
|
948
|
+
args: block.input
|
|
949
|
+
});
|
|
950
|
+
}
|
|
951
|
+
}
|
|
952
|
+
return {
|
|
953
|
+
text,
|
|
954
|
+
toolCalls,
|
|
955
|
+
finishReason: mapFinishReason2(response.stop_reason),
|
|
956
|
+
usage: {
|
|
957
|
+
promptTokens: response.usage?.input_tokens ?? 0,
|
|
958
|
+
completionTokens: response.usage?.output_tokens ?? 0,
|
|
959
|
+
totalTokens: (response.usage?.input_tokens ?? 0) + (response.usage?.output_tokens ?? 0)
|
|
960
|
+
},
|
|
961
|
+
rawResponse: response
|
|
962
|
+
};
|
|
963
|
+
},
|
|
964
|
+
async *doStream(params) {
|
|
965
|
+
const client2 = await getClient();
|
|
966
|
+
const { system, messages } = formatMessagesForAnthropic(params.messages);
|
|
967
|
+
const requestOptions = {
|
|
968
|
+
model: modelId,
|
|
969
|
+
max_tokens: params.maxTokens ?? 4096,
|
|
970
|
+
system: system || void 0,
|
|
971
|
+
messages,
|
|
972
|
+
tools: params.tools
|
|
973
|
+
};
|
|
974
|
+
if (params.temperature !== void 0) {
|
|
975
|
+
requestOptions.temperature = params.temperature;
|
|
976
|
+
}
|
|
977
|
+
if (options.thinking?.enabled && modelConfig.thinking) {
|
|
978
|
+
requestOptions.thinking = {
|
|
979
|
+
type: "enabled",
|
|
980
|
+
budget_tokens: options.thinking.budgetTokens ?? 1e4
|
|
981
|
+
};
|
|
982
|
+
}
|
|
983
|
+
const stream = await client2.messages.stream(requestOptions);
|
|
984
|
+
let currentToolUse = null;
|
|
985
|
+
let inputTokens = 0;
|
|
986
|
+
let outputTokens = 0;
|
|
987
|
+
for await (const event of stream) {
|
|
988
|
+
if (params.signal?.aborted) {
|
|
989
|
+
yield { type: "error", error: new Error("Aborted") };
|
|
990
|
+
return;
|
|
991
|
+
}
|
|
992
|
+
switch (event.type) {
|
|
993
|
+
case "message_start":
|
|
994
|
+
if (event.message?.usage) {
|
|
995
|
+
inputTokens = event.message.usage.input_tokens ?? 0;
|
|
996
|
+
}
|
|
997
|
+
break;
|
|
998
|
+
case "content_block_start":
|
|
999
|
+
if (event.content_block?.type === "tool_use") {
|
|
1000
|
+
currentToolUse = {
|
|
1001
|
+
id: event.content_block.id,
|
|
1002
|
+
name: event.content_block.name,
|
|
1003
|
+
input: ""
|
|
1004
|
+
};
|
|
1005
|
+
}
|
|
1006
|
+
break;
|
|
1007
|
+
case "content_block_delta":
|
|
1008
|
+
if (event.delta?.type === "text_delta") {
|
|
1009
|
+
yield { type: "text-delta", text: event.delta.text };
|
|
1010
|
+
} else if (event.delta?.type === "input_json_delta" && currentToolUse) {
|
|
1011
|
+
currentToolUse.input += event.delta.partial_json;
|
|
1012
|
+
}
|
|
1013
|
+
break;
|
|
1014
|
+
case "content_block_stop":
|
|
1015
|
+
if (currentToolUse) {
|
|
1016
|
+
yield {
|
|
1017
|
+
type: "tool-call",
|
|
1018
|
+
toolCall: {
|
|
1019
|
+
id: currentToolUse.id,
|
|
1020
|
+
name: currentToolUse.name,
|
|
1021
|
+
args: JSON.parse(currentToolUse.input || "{}")
|
|
1022
|
+
}
|
|
1023
|
+
};
|
|
1024
|
+
currentToolUse = null;
|
|
1025
|
+
}
|
|
1026
|
+
break;
|
|
1027
|
+
case "message_delta":
|
|
1028
|
+
if (event.usage) {
|
|
1029
|
+
outputTokens = event.usage.output_tokens ?? 0;
|
|
1030
|
+
}
|
|
1031
|
+
if (event.delta?.stop_reason) {
|
|
1032
|
+
yield {
|
|
1033
|
+
type: "finish",
|
|
1034
|
+
finishReason: mapFinishReason2(event.delta.stop_reason),
|
|
1035
|
+
usage: {
|
|
1036
|
+
promptTokens: inputTokens,
|
|
1037
|
+
completionTokens: outputTokens,
|
|
1038
|
+
totalTokens: inputTokens + outputTokens
|
|
1039
|
+
}
|
|
1040
|
+
};
|
|
1041
|
+
}
|
|
1042
|
+
break;
|
|
1043
|
+
}
|
|
1044
|
+
}
|
|
1045
|
+
}
|
|
1046
|
+
};
|
|
1047
|
+
}
|
|
1048
|
+
function mapFinishReason2(reason) {
|
|
1049
|
+
switch (reason) {
|
|
1050
|
+
case "end_turn":
|
|
1051
|
+
case "stop_sequence":
|
|
1052
|
+
return "stop";
|
|
1053
|
+
case "max_tokens":
|
|
1054
|
+
return "length";
|
|
1055
|
+
case "tool_use":
|
|
1056
|
+
return "tool-calls";
|
|
1057
|
+
default:
|
|
1058
|
+
return "unknown";
|
|
1059
|
+
}
|
|
1060
|
+
}
|
|
1061
|
+
function formatMessagesForAnthropic(messages) {
|
|
1062
|
+
let system = "";
|
|
1063
|
+
const formatted = [];
|
|
1064
|
+
const pendingToolResults = [];
|
|
1065
|
+
for (const msg of messages) {
|
|
1066
|
+
if (msg.role === "system") {
|
|
1067
|
+
system += (system ? "\n" : "") + msg.content;
|
|
1068
|
+
continue;
|
|
1069
|
+
}
|
|
1070
|
+
if (msg.role === "assistant" && pendingToolResults.length > 0) {
|
|
1071
|
+
formatted.push({
|
|
1072
|
+
role: "user",
|
|
1073
|
+
content: pendingToolResults.map((tr) => ({
|
|
1074
|
+
type: "tool_result",
|
|
1075
|
+
tool_use_id: tr.toolCallId,
|
|
1076
|
+
content: tr.content
|
|
1077
|
+
}))
|
|
1078
|
+
});
|
|
1079
|
+
pendingToolResults.length = 0;
|
|
1080
|
+
}
|
|
1081
|
+
if (msg.role === "user") {
|
|
1082
|
+
if (pendingToolResults.length > 0) {
|
|
1083
|
+
formatted.push({
|
|
1084
|
+
role: "user",
|
|
1085
|
+
content: pendingToolResults.map((tr) => ({
|
|
1086
|
+
type: "tool_result",
|
|
1087
|
+
tool_use_id: tr.toolCallId,
|
|
1088
|
+
content: tr.content
|
|
1089
|
+
}))
|
|
1090
|
+
});
|
|
1091
|
+
pendingToolResults.length = 0;
|
|
1092
|
+
}
|
|
1093
|
+
if (typeof msg.content === "string") {
|
|
1094
|
+
formatted.push({ role: "user", content: msg.content });
|
|
1095
|
+
} else {
|
|
1096
|
+
const content = [];
|
|
1097
|
+
for (const part of msg.content) {
|
|
1098
|
+
if (part.type === "text") {
|
|
1099
|
+
content.push({ type: "text", text: part.text });
|
|
1100
|
+
} else if (part.type === "image") {
|
|
1101
|
+
const imageData = typeof part.image === "string" ? part.image : Buffer.from(part.image).toString("base64");
|
|
1102
|
+
if (imageData.startsWith("http")) {
|
|
1103
|
+
content.push({
|
|
1104
|
+
type: "image",
|
|
1105
|
+
source: { type: "url", url: imageData }
|
|
1106
|
+
});
|
|
1107
|
+
} else {
|
|
1108
|
+
const base64 = imageData.startsWith("data:") ? imageData.split(",")[1] : imageData;
|
|
1109
|
+
content.push({
|
|
1110
|
+
type: "image",
|
|
1111
|
+
source: {
|
|
1112
|
+
type: "base64",
|
|
1113
|
+
media_type: part.mimeType ?? "image/png",
|
|
1114
|
+
data: base64
|
|
1115
|
+
}
|
|
1116
|
+
});
|
|
1117
|
+
}
|
|
1118
|
+
}
|
|
1119
|
+
}
|
|
1120
|
+
formatted.push({ role: "user", content });
|
|
1121
|
+
}
|
|
1122
|
+
} else if (msg.role === "assistant") {
|
|
1123
|
+
const content = [];
|
|
1124
|
+
if (msg.content) {
|
|
1125
|
+
content.push({ type: "text", text: msg.content });
|
|
1126
|
+
}
|
|
1127
|
+
if (msg.toolCalls && msg.toolCalls.length > 0) {
|
|
1128
|
+
for (const tc of msg.toolCalls) {
|
|
1129
|
+
content.push({
|
|
1130
|
+
type: "tool_use",
|
|
1131
|
+
id: tc.id,
|
|
1132
|
+
name: tc.name,
|
|
1133
|
+
input: tc.args
|
|
1134
|
+
});
|
|
1135
|
+
}
|
|
1136
|
+
}
|
|
1137
|
+
if (content.length > 0) {
|
|
1138
|
+
formatted.push({ role: "assistant", content });
|
|
1139
|
+
}
|
|
1140
|
+
} else if (msg.role === "tool") {
|
|
1141
|
+
pendingToolResults.push({
|
|
1142
|
+
toolCallId: msg.toolCallId,
|
|
1143
|
+
content: msg.content
|
|
1144
|
+
});
|
|
1145
|
+
}
|
|
1146
|
+
}
|
|
1147
|
+
if (pendingToolResults.length > 0) {
|
|
1148
|
+
formatted.push({
|
|
1149
|
+
role: "user",
|
|
1150
|
+
content: pendingToolResults.map((tr) => ({
|
|
1151
|
+
type: "tool_result",
|
|
1152
|
+
tool_use_id: tr.toolCallId,
|
|
1153
|
+
content: tr.content
|
|
1154
|
+
}))
|
|
1155
|
+
});
|
|
1156
|
+
}
|
|
1157
|
+
return { system, messages: formatted };
|
|
1158
|
+
}
|
|
1159
|
+
|
|
1160
|
+
// src/providers/xai/provider.ts
|
|
1161
|
+
var XAI_MODELS = {
|
|
1162
|
+
// Grok 4.1 Fast (Latest - December 2025)
|
|
1163
|
+
"grok-4-1-fast-reasoning": { vision: false, tools: true, maxTokens: 2e6 },
|
|
1164
|
+
"grok-4-1-fast-non-reasoning": {
|
|
1165
|
+
vision: false,
|
|
1166
|
+
tools: true,
|
|
1167
|
+
maxTokens: 2e6
|
|
1168
|
+
},
|
|
1169
|
+
// Grok 4 Fast (September 2025)
|
|
1170
|
+
"grok-4-fast-reasoning": { vision: false, tools: true, maxTokens: 2e6 },
|
|
1171
|
+
"grok-4-fast-non-reasoning": {
|
|
1172
|
+
vision: false,
|
|
1173
|
+
tools: true,
|
|
1174
|
+
maxTokens: 2e6
|
|
1175
|
+
},
|
|
1176
|
+
// Grok 4 (July 2025)
|
|
1177
|
+
"grok-4": { vision: true, tools: true, maxTokens: 256e3 },
|
|
1178
|
+
"grok-4-0709": { vision: true, tools: true, maxTokens: 256e3 },
|
|
1179
|
+
// Grok 3 (February 2025) - Stable
|
|
1180
|
+
"grok-3-beta": { vision: true, tools: true, maxTokens: 131072 },
|
|
1181
|
+
"grok-3-fast-beta": { vision: false, tools: true, maxTokens: 131072 },
|
|
1182
|
+
"grok-3-mini-beta": { vision: false, tools: true, maxTokens: 32768 },
|
|
1183
|
+
"grok-3-mini-fast-beta": { vision: false, tools: true, maxTokens: 32768 },
|
|
1184
|
+
// Grok Code Fast (August 2025)
|
|
1185
|
+
"grok-code-fast-1": { vision: false, tools: true, maxTokens: 256e3 },
|
|
1186
|
+
// Grok 2 (Legacy)
|
|
1187
|
+
"grok-2": { vision: true, tools: true, maxTokens: 131072 },
|
|
1188
|
+
"grok-2-latest": { vision: true, tools: true, maxTokens: 131072 },
|
|
1189
|
+
"grok-2-mini": { vision: false, tools: true, maxTokens: 131072 }
|
|
1190
|
+
};
|
|
1191
|
+
function xai(modelId, options = {}) {
|
|
1192
|
+
const apiKey = options.apiKey ?? process.env.XAI_API_KEY;
|
|
1193
|
+
const baseURL = options.baseURL ?? "https://api.x.ai/v1";
|
|
1194
|
+
let client = null;
|
|
1195
|
+
async function getClient() {
|
|
1196
|
+
if (!client) {
|
|
1197
|
+
const { default: OpenAI } = await import('openai');
|
|
1198
|
+
client = new OpenAI({
|
|
1199
|
+
apiKey,
|
|
1200
|
+
baseURL
|
|
1201
|
+
});
|
|
1202
|
+
}
|
|
1203
|
+
return client;
|
|
1204
|
+
}
|
|
1205
|
+
const modelConfig = XAI_MODELS[modelId] ?? XAI_MODELS["grok-3-fast-beta"];
|
|
1206
|
+
return {
|
|
1207
|
+
provider: "xai",
|
|
1208
|
+
modelId,
|
|
1209
|
+
capabilities: {
|
|
1210
|
+
supportsVision: modelConfig.vision,
|
|
1211
|
+
supportsTools: modelConfig.tools,
|
|
1212
|
+
supportsStreaming: true,
|
|
1213
|
+
supportsJsonMode: false,
|
|
1214
|
+
// xAI doesn't support JSON mode yet
|
|
1215
|
+
supportsThinking: false,
|
|
1216
|
+
supportsPDF: false,
|
|
1217
|
+
maxTokens: modelConfig.maxTokens,
|
|
1218
|
+
supportedImageTypes: modelConfig.vision ? ["image/png", "image/jpeg", "image/gif", "image/webp"] : []
|
|
1219
|
+
},
|
|
1220
|
+
async doGenerate(params) {
|
|
1221
|
+
const client2 = await getClient();
|
|
1222
|
+
const messages = formatMessagesForXAI(params.messages);
|
|
1223
|
+
const response = await client2.chat.completions.create({
|
|
1224
|
+
model: modelId,
|
|
1225
|
+
messages,
|
|
1226
|
+
tools: params.tools,
|
|
1227
|
+
temperature: params.temperature,
|
|
1228
|
+
max_tokens: params.maxTokens
|
|
1229
|
+
});
|
|
1230
|
+
const choice = response.choices[0];
|
|
1231
|
+
const message = choice.message;
|
|
1232
|
+
const toolCalls = (message.tool_calls ?? []).map(
|
|
1233
|
+
(tc) => ({
|
|
1234
|
+
id: tc.id,
|
|
1235
|
+
name: tc.function.name,
|
|
1236
|
+
args: JSON.parse(tc.function.arguments || "{}")
|
|
1237
|
+
})
|
|
1238
|
+
);
|
|
1239
|
+
return {
|
|
1240
|
+
text: message.content ?? "",
|
|
1241
|
+
toolCalls,
|
|
1242
|
+
finishReason: mapFinishReason3(choice.finish_reason),
|
|
1243
|
+
usage: {
|
|
1244
|
+
promptTokens: response.usage?.prompt_tokens ?? 0,
|
|
1245
|
+
completionTokens: response.usage?.completion_tokens ?? 0,
|
|
1246
|
+
totalTokens: response.usage?.total_tokens ?? 0
|
|
1247
|
+
},
|
|
1248
|
+
rawResponse: response
|
|
1249
|
+
};
|
|
1250
|
+
},
|
|
1251
|
+
async *doStream(params) {
|
|
1252
|
+
const client2 = await getClient();
|
|
1253
|
+
const messages = formatMessagesForXAI(params.messages);
|
|
1254
|
+
const stream = await client2.chat.completions.create({
|
|
1255
|
+
model: modelId,
|
|
1256
|
+
messages,
|
|
1257
|
+
tools: params.tools,
|
|
1258
|
+
temperature: params.temperature,
|
|
1259
|
+
max_tokens: params.maxTokens,
|
|
1260
|
+
stream: true
|
|
1261
|
+
});
|
|
1262
|
+
let currentToolCall = null;
|
|
1263
|
+
let totalPromptTokens = 0;
|
|
1264
|
+
let totalCompletionTokens = 0;
|
|
1265
|
+
for await (const chunk of stream) {
|
|
1266
|
+
if (params.signal?.aborted) {
|
|
1267
|
+
yield { type: "error", error: new Error("Aborted") };
|
|
1268
|
+
return;
|
|
1269
|
+
}
|
|
1270
|
+
const choice = chunk.choices[0];
|
|
1271
|
+
const delta = choice?.delta;
|
|
1272
|
+
if (delta?.content) {
|
|
1273
|
+
yield { type: "text-delta", text: delta.content };
|
|
1274
|
+
}
|
|
1275
|
+
if (delta?.tool_calls) {
|
|
1276
|
+
for (const tc of delta.tool_calls) {
|
|
1277
|
+
if (tc.id) {
|
|
1278
|
+
if (currentToolCall) {
|
|
1279
|
+
yield {
|
|
1280
|
+
type: "tool-call",
|
|
1281
|
+
toolCall: {
|
|
1282
|
+
id: currentToolCall.id,
|
|
1283
|
+
name: currentToolCall.name,
|
|
1284
|
+
args: JSON.parse(currentToolCall.arguments || "{}")
|
|
1285
|
+
}
|
|
1286
|
+
};
|
|
1287
|
+
}
|
|
1288
|
+
currentToolCall = {
|
|
1289
|
+
id: tc.id,
|
|
1290
|
+
name: tc.function?.name ?? "",
|
|
1291
|
+
arguments: tc.function?.arguments ?? ""
|
|
1292
|
+
};
|
|
1293
|
+
} else if (currentToolCall && tc.function?.arguments) {
|
|
1294
|
+
currentToolCall.arguments += tc.function.arguments;
|
|
1295
|
+
}
|
|
1296
|
+
}
|
|
1297
|
+
}
|
|
1298
|
+
if (choice?.finish_reason) {
|
|
1299
|
+
if (currentToolCall) {
|
|
1300
|
+
yield {
|
|
1301
|
+
type: "tool-call",
|
|
1302
|
+
toolCall: {
|
|
1303
|
+
id: currentToolCall.id,
|
|
1304
|
+
name: currentToolCall.name,
|
|
1305
|
+
args: JSON.parse(currentToolCall.arguments || "{}")
|
|
1306
|
+
}
|
|
1307
|
+
};
|
|
1308
|
+
currentToolCall = null;
|
|
1309
|
+
}
|
|
1310
|
+
if (chunk.usage) {
|
|
1311
|
+
totalPromptTokens = chunk.usage.prompt_tokens;
|
|
1312
|
+
totalCompletionTokens = chunk.usage.completion_tokens;
|
|
1313
|
+
}
|
|
1314
|
+
yield {
|
|
1315
|
+
type: "finish",
|
|
1316
|
+
finishReason: mapFinishReason3(choice.finish_reason),
|
|
1317
|
+
usage: {
|
|
1318
|
+
promptTokens: totalPromptTokens,
|
|
1319
|
+
completionTokens: totalCompletionTokens,
|
|
1320
|
+
totalTokens: totalPromptTokens + totalCompletionTokens
|
|
1321
|
+
}
|
|
1322
|
+
};
|
|
1323
|
+
}
|
|
1324
|
+
}
|
|
1325
|
+
}
|
|
1326
|
+
};
|
|
1327
|
+
}
|
|
1328
|
+
function mapFinishReason3(reason) {
|
|
1329
|
+
switch (reason) {
|
|
1330
|
+
case "stop":
|
|
1331
|
+
return "stop";
|
|
1332
|
+
case "length":
|
|
1333
|
+
return "length";
|
|
1334
|
+
case "tool_calls":
|
|
1335
|
+
case "function_call":
|
|
1336
|
+
return "tool-calls";
|
|
1337
|
+
case "content_filter":
|
|
1338
|
+
return "content-filter";
|
|
1339
|
+
default:
|
|
1340
|
+
return "unknown";
|
|
1341
|
+
}
|
|
1342
|
+
}
|
|
1343
|
+
function formatMessagesForXAI(messages) {
|
|
1344
|
+
return messages.map((msg) => {
|
|
1345
|
+
switch (msg.role) {
|
|
1346
|
+
case "system":
|
|
1347
|
+
return { role: "system", content: msg.content };
|
|
1348
|
+
case "user":
|
|
1349
|
+
if (typeof msg.content === "string") {
|
|
1350
|
+
return { role: "user", content: msg.content };
|
|
1351
|
+
}
|
|
1352
|
+
return {
|
|
1353
|
+
role: "user",
|
|
1354
|
+
content: msg.content.map((part) => {
|
|
1355
|
+
if (part.type === "text") {
|
|
1356
|
+
return { type: "text", text: part.text };
|
|
1357
|
+
}
|
|
1358
|
+
if (part.type === "image") {
|
|
1359
|
+
const imageData = typeof part.image === "string" ? part.image : Buffer.from(part.image).toString("base64");
|
|
1360
|
+
const url = imageData.startsWith("data:") ? imageData : `data:${part.mimeType ?? "image/png"};base64,${imageData}`;
|
|
1361
|
+
return { type: "image_url", image_url: { url, detail: "auto" } };
|
|
1362
|
+
}
|
|
1363
|
+
return { type: "text", text: "" };
|
|
1364
|
+
})
|
|
1365
|
+
};
|
|
1366
|
+
case "assistant":
|
|
1367
|
+
const assistantMsg = {
|
|
1368
|
+
role: "assistant",
|
|
1369
|
+
content: msg.content
|
|
1370
|
+
};
|
|
1371
|
+
if (msg.toolCalls && msg.toolCalls.length > 0) {
|
|
1372
|
+
assistantMsg.tool_calls = msg.toolCalls.map((tc) => ({
|
|
1373
|
+
id: tc.id,
|
|
1374
|
+
type: "function",
|
|
1375
|
+
function: {
|
|
1376
|
+
name: tc.name,
|
|
1377
|
+
arguments: JSON.stringify(tc.args)
|
|
1378
|
+
}
|
|
1379
|
+
}));
|
|
1380
|
+
}
|
|
1381
|
+
return assistantMsg;
|
|
1382
|
+
case "tool":
|
|
1383
|
+
return {
|
|
1384
|
+
role: "tool",
|
|
1385
|
+
tool_call_id: msg.toolCallId,
|
|
1386
|
+
content: msg.content
|
|
1387
|
+
};
|
|
1388
|
+
default:
|
|
1389
|
+
return msg;
|
|
1390
|
+
}
|
|
1391
|
+
});
|
|
1392
|
+
}
|
|
1393
|
+
|
|
1394
|
+
// src/providers/google/provider.ts
|
|
1395
|
+
var GOOGLE_MODELS = {
|
|
1396
|
+
// Gemini 2.0
|
|
1397
|
+
"gemini-2.0-flash": {
|
|
1398
|
+
vision: true,
|
|
1399
|
+
tools: true,
|
|
1400
|
+
audio: true,
|
|
1401
|
+
video: true,
|
|
1402
|
+
maxTokens: 1048576
|
|
1403
|
+
},
|
|
1404
|
+
"gemini-2.0-flash-exp": {
|
|
1405
|
+
vision: true,
|
|
1406
|
+
tools: true,
|
|
1407
|
+
audio: true,
|
|
1408
|
+
video: true,
|
|
1409
|
+
maxTokens: 1048576
|
|
1410
|
+
},
|
|
1411
|
+
"gemini-2.0-flash-thinking-exp": {
|
|
1412
|
+
vision: true,
|
|
1413
|
+
tools: false,
|
|
1414
|
+
audio: false,
|
|
1415
|
+
video: false,
|
|
1416
|
+
maxTokens: 32767
|
|
1417
|
+
},
|
|
1418
|
+
// Gemini 1.5
|
|
1419
|
+
"gemini-1.5-pro": {
|
|
1420
|
+
vision: true,
|
|
1421
|
+
tools: true,
|
|
1422
|
+
audio: true,
|
|
1423
|
+
video: true,
|
|
1424
|
+
maxTokens: 2097152
|
|
1425
|
+
},
|
|
1426
|
+
"gemini-1.5-pro-latest": {
|
|
1427
|
+
vision: true,
|
|
1428
|
+
tools: true,
|
|
1429
|
+
audio: true,
|
|
1430
|
+
video: true,
|
|
1431
|
+
maxTokens: 2097152
|
|
1432
|
+
},
|
|
1433
|
+
"gemini-1.5-flash": {
|
|
1434
|
+
vision: true,
|
|
1435
|
+
tools: true,
|
|
1436
|
+
audio: true,
|
|
1437
|
+
video: true,
|
|
1438
|
+
maxTokens: 1048576
|
|
1439
|
+
},
|
|
1440
|
+
"gemini-1.5-flash-latest": {
|
|
1441
|
+
vision: true,
|
|
1442
|
+
tools: true,
|
|
1443
|
+
audio: true,
|
|
1444
|
+
video: true,
|
|
1445
|
+
maxTokens: 1048576
|
|
1446
|
+
},
|
|
1447
|
+
"gemini-1.5-flash-8b": {
|
|
1448
|
+
vision: true,
|
|
1449
|
+
tools: true,
|
|
1450
|
+
audio: false,
|
|
1451
|
+
video: false,
|
|
1452
|
+
maxTokens: 1048576
|
|
1453
|
+
}
|
|
1454
|
+
};
|
|
1455
|
+
function google(modelId, options = {}) {
|
|
1456
|
+
const apiKey = options.apiKey ?? process.env.GOOGLE_API_KEY ?? process.env.GEMINI_API_KEY;
|
|
1457
|
+
let client = null;
|
|
1458
|
+
async function getClient() {
|
|
1459
|
+
if (!client) {
|
|
1460
|
+
const { GoogleGenerativeAI } = await import('@google/generative-ai');
|
|
1461
|
+
client = new GoogleGenerativeAI(apiKey);
|
|
1462
|
+
}
|
|
1463
|
+
return client;
|
|
1464
|
+
}
|
|
1465
|
+
const modelConfig = GOOGLE_MODELS[modelId] ?? GOOGLE_MODELS["gemini-2.0-flash"];
|
|
1466
|
+
return {
|
|
1467
|
+
provider: "google",
|
|
1468
|
+
modelId,
|
|
1469
|
+
capabilities: {
|
|
1470
|
+
supportsVision: modelConfig.vision,
|
|
1471
|
+
supportsTools: modelConfig.tools,
|
|
1472
|
+
supportsStreaming: true,
|
|
1473
|
+
supportsJsonMode: true,
|
|
1474
|
+
supportsThinking: modelId.includes("thinking"),
|
|
1475
|
+
supportsPDF: true,
|
|
1476
|
+
maxTokens: modelConfig.maxTokens,
|
|
1477
|
+
supportedImageTypes: modelConfig.vision ? ["image/png", "image/jpeg", "image/gif", "image/webp"] : []
|
|
1478
|
+
},
|
|
1479
|
+
async doGenerate(params) {
|
|
1480
|
+
const client2 = await getClient();
|
|
1481
|
+
const model = client2.getGenerativeModel({
|
|
1482
|
+
model: modelId,
|
|
1483
|
+
safetySettings: options.safetySettings
|
|
1484
|
+
});
|
|
1485
|
+
const { systemInstruction, contents } = formatMessagesForGemini(
|
|
1486
|
+
params.messages
|
|
1487
|
+
);
|
|
1488
|
+
const chat = model.startChat({
|
|
1489
|
+
history: contents.slice(0, -1),
|
|
1490
|
+
systemInstruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : void 0,
|
|
1491
|
+
tools: params.tools ? [{ functionDeclarations: formatToolsForGemini(params.tools) }] : void 0,
|
|
1492
|
+
generationConfig: {
|
|
1493
|
+
temperature: params.temperature,
|
|
1494
|
+
maxOutputTokens: params.maxTokens
|
|
1495
|
+
}
|
|
1496
|
+
});
|
|
1497
|
+
const lastMessage = contents[contents.length - 1];
|
|
1498
|
+
const result = await chat.sendMessage(lastMessage.parts);
|
|
1499
|
+
const response = result.response;
|
|
1500
|
+
let text = "";
|
|
1501
|
+
const toolCalls = [];
|
|
1502
|
+
let toolCallIndex = 0;
|
|
1503
|
+
const candidate = response.candidates?.[0];
|
|
1504
|
+
if (candidate?.content?.parts) {
|
|
1505
|
+
for (const part of candidate.content.parts) {
|
|
1506
|
+
if ("text" in part && part.text) {
|
|
1507
|
+
text += part.text;
|
|
1508
|
+
}
|
|
1509
|
+
if ("functionCall" in part && part.functionCall) {
|
|
1510
|
+
toolCalls.push({
|
|
1511
|
+
id: `call_${toolCallIndex++}`,
|
|
1512
|
+
name: part.functionCall.name,
|
|
1513
|
+
args: part.functionCall.args || {}
|
|
1514
|
+
});
|
|
1515
|
+
}
|
|
1516
|
+
}
|
|
1517
|
+
}
|
|
1518
|
+
return {
|
|
1519
|
+
text,
|
|
1520
|
+
toolCalls,
|
|
1521
|
+
finishReason: mapFinishReason4(candidate?.finishReason),
|
|
1522
|
+
usage: {
|
|
1523
|
+
promptTokens: response.usageMetadata?.promptTokenCount ?? 0,
|
|
1524
|
+
completionTokens: response.usageMetadata?.candidatesTokenCount ?? 0,
|
|
1525
|
+
totalTokens: response.usageMetadata?.totalTokenCount ?? 0
|
|
1526
|
+
},
|
|
1527
|
+
rawResponse: response
|
|
1528
|
+
};
|
|
1529
|
+
},
|
|
1530
|
+
async *doStream(params) {
|
|
1531
|
+
const client2 = await getClient();
|
|
1532
|
+
const model = client2.getGenerativeModel({
|
|
1533
|
+
model: modelId,
|
|
1534
|
+
safetySettings: options.safetySettings
|
|
1535
|
+
});
|
|
1536
|
+
const { systemInstruction, contents } = formatMessagesForGemini(
|
|
1537
|
+
params.messages
|
|
1538
|
+
);
|
|
1539
|
+
const chat = model.startChat({
|
|
1540
|
+
history: contents.slice(0, -1),
|
|
1541
|
+
systemInstruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : void 0,
|
|
1542
|
+
tools: params.tools ? [{ functionDeclarations: formatToolsForGemini(params.tools) }] : void 0,
|
|
1543
|
+
generationConfig: {
|
|
1544
|
+
temperature: params.temperature,
|
|
1545
|
+
maxOutputTokens: params.maxTokens
|
|
1546
|
+
}
|
|
1547
|
+
});
|
|
1548
|
+
const lastMessage = contents[contents.length - 1];
|
|
1549
|
+
const result = await chat.sendMessageStream(lastMessage.parts);
|
|
1550
|
+
let toolCallIndex = 0;
|
|
1551
|
+
let promptTokens = 0;
|
|
1552
|
+
let completionTokens = 0;
|
|
1553
|
+
try {
|
|
1554
|
+
for await (const chunk of result.stream) {
|
|
1555
|
+
if (params.signal?.aborted) {
|
|
1556
|
+
yield { type: "error", error: new Error("Aborted") };
|
|
1557
|
+
return;
|
|
1558
|
+
}
|
|
1559
|
+
const candidate = chunk.candidates?.[0];
|
|
1560
|
+
if (!candidate?.content?.parts) continue;
|
|
1561
|
+
for (const part of candidate.content.parts) {
|
|
1562
|
+
if ("text" in part && part.text) {
|
|
1563
|
+
yield { type: "text-delta", text: part.text };
|
|
1564
|
+
}
|
|
1565
|
+
if ("functionCall" in part && part.functionCall) {
|
|
1566
|
+
yield {
|
|
1567
|
+
type: "tool-call",
|
|
1568
|
+
toolCall: {
|
|
1569
|
+
id: `call_${toolCallIndex++}`,
|
|
1570
|
+
name: part.functionCall.name,
|
|
1571
|
+
args: part.functionCall.args || {}
|
|
1572
|
+
}
|
|
1573
|
+
};
|
|
1574
|
+
}
|
|
1575
|
+
}
|
|
1576
|
+
if (chunk.usageMetadata) {
|
|
1577
|
+
promptTokens = chunk.usageMetadata.promptTokenCount ?? 0;
|
|
1578
|
+
completionTokens = chunk.usageMetadata.candidatesTokenCount ?? 0;
|
|
1579
|
+
}
|
|
1580
|
+
if (candidate.finishReason) {
|
|
1581
|
+
yield {
|
|
1582
|
+
type: "finish",
|
|
1583
|
+
finishReason: mapFinishReason4(candidate.finishReason),
|
|
1584
|
+
usage: {
|
|
1585
|
+
promptTokens,
|
|
1586
|
+
completionTokens,
|
|
1587
|
+
totalTokens: promptTokens + completionTokens
|
|
1588
|
+
}
|
|
1589
|
+
};
|
|
1590
|
+
}
|
|
1591
|
+
}
|
|
1592
|
+
} catch (error) {
|
|
1593
|
+
yield {
|
|
1594
|
+
type: "error",
|
|
1595
|
+
error: error instanceof Error ? error : new Error(String(error))
|
|
1596
|
+
};
|
|
1597
|
+
}
|
|
1598
|
+
}
|
|
1599
|
+
};
|
|
1600
|
+
}
|
|
1601
|
+
function mapFinishReason4(reason) {
|
|
1602
|
+
switch (reason) {
|
|
1603
|
+
case "STOP":
|
|
1604
|
+
return "stop";
|
|
1605
|
+
case "MAX_TOKENS":
|
|
1606
|
+
return "length";
|
|
1607
|
+
case "SAFETY":
|
|
1608
|
+
return "content-filter";
|
|
1609
|
+
default:
|
|
1610
|
+
return "unknown";
|
|
1611
|
+
}
|
|
1612
|
+
}
|
|
1613
|
+
function formatMessagesForGemini(messages) {
|
|
1614
|
+
let systemInstruction = "";
|
|
1615
|
+
const contents = [];
|
|
1616
|
+
for (const msg of messages) {
|
|
1617
|
+
if (msg.role === "system") {
|
|
1618
|
+
systemInstruction += (systemInstruction ? "\n" : "") + msg.content;
|
|
1619
|
+
continue;
|
|
1620
|
+
}
|
|
1621
|
+
const parts = [];
|
|
1622
|
+
if (msg.role === "user") {
|
|
1623
|
+
if (typeof msg.content === "string") {
|
|
1624
|
+
parts.push({ text: msg.content });
|
|
1625
|
+
} else {
|
|
1626
|
+
for (const part of msg.content) {
|
|
1627
|
+
if (part.type === "text") {
|
|
1628
|
+
parts.push({ text: part.text });
|
|
1629
|
+
} else if (part.type === "image") {
|
|
1630
|
+
const imageData = typeof part.image === "string" ? part.image : Buffer.from(part.image).toString("base64");
|
|
1631
|
+
const base64 = imageData.startsWith("data:") ? imageData.split(",")[1] : imageData;
|
|
1632
|
+
parts.push({
|
|
1633
|
+
inlineData: {
|
|
1634
|
+
mimeType: part.mimeType ?? "image/png",
|
|
1635
|
+
data: base64
|
|
1636
|
+
}
|
|
1637
|
+
});
|
|
1638
|
+
}
|
|
1639
|
+
}
|
|
1640
|
+
}
|
|
1641
|
+
contents.push({ role: "user", parts });
|
|
1642
|
+
} else if (msg.role === "assistant") {
|
|
1643
|
+
if (msg.content) {
|
|
1644
|
+
parts.push({ text: msg.content });
|
|
1645
|
+
}
|
|
1646
|
+
if (msg.toolCalls?.length) {
|
|
1647
|
+
for (const tc of msg.toolCalls) {
|
|
1648
|
+
parts.push({
|
|
1649
|
+
functionCall: {
|
|
1650
|
+
name: tc.name,
|
|
1651
|
+
args: tc.args
|
|
1652
|
+
}
|
|
1653
|
+
});
|
|
1654
|
+
}
|
|
1655
|
+
}
|
|
1656
|
+
if (parts.length > 0) {
|
|
1657
|
+
contents.push({ role: "model", parts });
|
|
1658
|
+
}
|
|
1659
|
+
} else if (msg.role === "tool") {
|
|
1660
|
+
contents.push({
|
|
1661
|
+
role: "user",
|
|
1662
|
+
parts: [
|
|
1663
|
+
{
|
|
1664
|
+
functionResponse: {
|
|
1665
|
+
name: "tool",
|
|
1666
|
+
// Gemini doesn't track by ID
|
|
1667
|
+
response: JSON.parse(msg.content || "{}")
|
|
1668
|
+
}
|
|
1669
|
+
}
|
|
1670
|
+
]
|
|
1671
|
+
});
|
|
1672
|
+
}
|
|
1673
|
+
}
|
|
1674
|
+
if (contents.length === 0 || contents[0].role !== "user") {
|
|
1675
|
+
contents.unshift({ role: "user", parts: [{ text: "" }] });
|
|
1676
|
+
}
|
|
1677
|
+
const merged = [];
|
|
1678
|
+
for (const content of contents) {
|
|
1679
|
+
const last = merged[merged.length - 1];
|
|
1680
|
+
if (last && last.role === content.role) {
|
|
1681
|
+
last.parts.push(...content.parts);
|
|
1682
|
+
} else {
|
|
1683
|
+
merged.push({ ...content, parts: [...content.parts] });
|
|
1684
|
+
}
|
|
1685
|
+
}
|
|
1686
|
+
return { systemInstruction, contents: merged };
|
|
1687
|
+
}
|
|
1688
|
+
function formatToolsForGemini(tools) {
|
|
1689
|
+
return tools.map((t) => ({
|
|
1690
|
+
name: t.function.name,
|
|
1691
|
+
description: t.function.description,
|
|
1692
|
+
parameters: t.function.parameters
|
|
1693
|
+
}));
|
|
1694
|
+
}
|
|
8
1695
|
|
|
9
1696
|
// src/adapters/base.ts
|
|
10
1697
|
function formatMessages(messages, systemPrompt) {
|
|
@@ -197,7 +1884,7 @@ function messageToOpenAIContent(message) {
|
|
|
197
1884
|
}
|
|
198
1885
|
return blocks;
|
|
199
1886
|
}
|
|
200
|
-
function
|
|
1887
|
+
function formatMessagesForAnthropic2(messages, systemPrompt) {
|
|
201
1888
|
const formatted = [];
|
|
202
1889
|
for (let i = 0; i < messages.length; i++) {
|
|
203
1890
|
const msg = messages[i];
|
|
@@ -256,7 +1943,7 @@ function formatMessagesForAnthropic(messages, systemPrompt) {
|
|
|
256
1943
|
messages: formatted
|
|
257
1944
|
};
|
|
258
1945
|
}
|
|
259
|
-
function
|
|
1946
|
+
function formatMessagesForOpenAI2(messages, systemPrompt) {
|
|
260
1947
|
const formatted = [];
|
|
261
1948
|
if (systemPrompt) {
|
|
262
1949
|
formatted.push({ role: "system", content: systemPrompt });
|
|
@@ -349,7 +2036,7 @@ var OpenAIAdapter = class {
|
|
|
349
2036
|
messages = processedMessages;
|
|
350
2037
|
}
|
|
351
2038
|
} else {
|
|
352
|
-
messages =
|
|
2039
|
+
messages = formatMessagesForOpenAI2(
|
|
353
2040
|
request.messages,
|
|
354
2041
|
request.systemPrompt
|
|
355
2042
|
);
|
|
@@ -597,7 +2284,7 @@ var AnthropicAdapter = class {
|
|
|
597
2284
|
if (request.rawMessages && request.rawMessages.length > 0) {
|
|
598
2285
|
messages = this.convertToAnthropicMessages(request.rawMessages);
|
|
599
2286
|
} else {
|
|
600
|
-
const formatted =
|
|
2287
|
+
const formatted = formatMessagesForAnthropic2(request.messages);
|
|
601
2288
|
messages = formatted.messages;
|
|
602
2289
|
}
|
|
603
2290
|
const tools = request.actions?.map((action) => ({
|
|
@@ -743,117 +2430,6 @@ var AnthropicAdapter = class {
|
|
|
743
2430
|
function createAnthropicAdapter(config) {
|
|
744
2431
|
return new AnthropicAdapter(config);
|
|
745
2432
|
}
|
|
746
|
-
var GroqAdapter = class {
|
|
747
|
-
constructor(config) {
|
|
748
|
-
this.provider = "groq";
|
|
749
|
-
this.config = config;
|
|
750
|
-
this.model = config.model || "llama-3.1-70b-versatile";
|
|
751
|
-
}
|
|
752
|
-
async *stream(request) {
|
|
753
|
-
const messages = formatMessages(request.messages, request.systemPrompt);
|
|
754
|
-
const tools = request.actions?.length ? formatTools(request.actions) : void 0;
|
|
755
|
-
const messageId = core.generateMessageId();
|
|
756
|
-
yield { type: "message:start", id: messageId };
|
|
757
|
-
try {
|
|
758
|
-
const response = await fetch(
|
|
759
|
-
"https://api.groq.com/openai/v1/chat/completions",
|
|
760
|
-
{
|
|
761
|
-
method: "POST",
|
|
762
|
-
headers: {
|
|
763
|
-
"Content-Type": "application/json",
|
|
764
|
-
Authorization: `Bearer ${this.config.apiKey}`
|
|
765
|
-
},
|
|
766
|
-
body: JSON.stringify({
|
|
767
|
-
model: request.config?.model || this.model,
|
|
768
|
-
messages,
|
|
769
|
-
tools,
|
|
770
|
-
temperature: request.config?.temperature ?? this.config.temperature,
|
|
771
|
-
max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
|
|
772
|
-
stream: true
|
|
773
|
-
}),
|
|
774
|
-
signal: request.signal
|
|
775
|
-
}
|
|
776
|
-
);
|
|
777
|
-
if (!response.ok) {
|
|
778
|
-
throw new Error(`Groq API error: ${response.status}`);
|
|
779
|
-
}
|
|
780
|
-
if (!response.body) {
|
|
781
|
-
throw new Error("No response body");
|
|
782
|
-
}
|
|
783
|
-
const reader = response.body.getReader();
|
|
784
|
-
const decoder = new TextDecoder();
|
|
785
|
-
let buffer = "";
|
|
786
|
-
let currentToolCall = null;
|
|
787
|
-
while (true) {
|
|
788
|
-
const { done, value } = await reader.read();
|
|
789
|
-
if (done) break;
|
|
790
|
-
buffer += decoder.decode(value, { stream: true });
|
|
791
|
-
const lines = buffer.split("\n");
|
|
792
|
-
buffer = lines.pop() || "";
|
|
793
|
-
for (const line of lines) {
|
|
794
|
-
if (!line.startsWith("data: ")) continue;
|
|
795
|
-
const data = line.slice(6).trim();
|
|
796
|
-
if (data === "[DONE]") continue;
|
|
797
|
-
try {
|
|
798
|
-
const chunk = JSON.parse(data);
|
|
799
|
-
const delta = chunk.choices?.[0]?.delta;
|
|
800
|
-
if (delta?.content) {
|
|
801
|
-
yield { type: "message:delta", content: delta.content };
|
|
802
|
-
}
|
|
803
|
-
if (delta?.tool_calls) {
|
|
804
|
-
for (const toolCall of delta.tool_calls) {
|
|
805
|
-
if (toolCall.id) {
|
|
806
|
-
if (currentToolCall) {
|
|
807
|
-
yield {
|
|
808
|
-
type: "action:args",
|
|
809
|
-
id: currentToolCall.id,
|
|
810
|
-
args: currentToolCall.arguments
|
|
811
|
-
};
|
|
812
|
-
}
|
|
813
|
-
currentToolCall = {
|
|
814
|
-
id: toolCall.id,
|
|
815
|
-
name: toolCall.function?.name || "",
|
|
816
|
-
arguments: toolCall.function?.arguments || ""
|
|
817
|
-
};
|
|
818
|
-
yield {
|
|
819
|
-
type: "action:start",
|
|
820
|
-
id: currentToolCall.id,
|
|
821
|
-
name: currentToolCall.name
|
|
822
|
-
};
|
|
823
|
-
} else if (currentToolCall && toolCall.function?.arguments) {
|
|
824
|
-
currentToolCall.arguments += toolCall.function.arguments;
|
|
825
|
-
}
|
|
826
|
-
}
|
|
827
|
-
}
|
|
828
|
-
if (chunk.choices?.[0]?.finish_reason && currentToolCall) {
|
|
829
|
-
yield {
|
|
830
|
-
type: "action:args",
|
|
831
|
-
id: currentToolCall.id,
|
|
832
|
-
args: currentToolCall.arguments
|
|
833
|
-
};
|
|
834
|
-
}
|
|
835
|
-
} catch {
|
|
836
|
-
}
|
|
837
|
-
}
|
|
838
|
-
}
|
|
839
|
-
yield { type: "message:end" };
|
|
840
|
-
yield { type: "done" };
|
|
841
|
-
} catch (error) {
|
|
842
|
-
if (error.name === "AbortError") {
|
|
843
|
-
yield { type: "done" };
|
|
844
|
-
} else {
|
|
845
|
-
yield {
|
|
846
|
-
type: "error",
|
|
847
|
-
message: error instanceof Error ? error.message : "Unknown error",
|
|
848
|
-
code: "GROQ_ERROR"
|
|
849
|
-
};
|
|
850
|
-
}
|
|
851
|
-
}
|
|
852
|
-
}
|
|
853
|
-
};
|
|
854
|
-
function createGroqAdapter(config) {
|
|
855
|
-
return new GroqAdapter(config);
|
|
856
|
-
}
|
|
857
2433
|
var OllamaAdapter = class {
|
|
858
2434
|
constructor(config = {}) {
|
|
859
2435
|
this.provider = "ollama";
|
|
@@ -1020,7 +2596,7 @@ function messageToGeminiContent(msg) {
|
|
|
1020
2596
|
parts
|
|
1021
2597
|
};
|
|
1022
2598
|
}
|
|
1023
|
-
function
|
|
2599
|
+
function formatToolsForGemini2(actions) {
|
|
1024
2600
|
if (!actions || actions.length === 0) return void 0;
|
|
1025
2601
|
return {
|
|
1026
2602
|
functionDeclarations: actions.map((action) => ({
|
|
@@ -1106,7 +2682,7 @@ var GoogleAdapter = class {
|
|
|
1106
2682
|
mergedContents.push({ ...content, parts: [...content.parts] });
|
|
1107
2683
|
}
|
|
1108
2684
|
}
|
|
1109
|
-
const tools =
|
|
2685
|
+
const tools = formatToolsForGemini2(request.actions);
|
|
1110
2686
|
const messageId = core.generateMessageId();
|
|
1111
2687
|
yield { type: "message:start", id: messageId };
|
|
1112
2688
|
try {
|
|
@@ -1212,7 +2788,7 @@ var GoogleAdapter = class {
|
|
|
1212
2788
|
mergedContents.push({ ...content, parts: [...content.parts] });
|
|
1213
2789
|
}
|
|
1214
2790
|
}
|
|
1215
|
-
const tools =
|
|
2791
|
+
const tools = formatToolsForGemini2(request.actions);
|
|
1216
2792
|
const chat = model.startChat({
|
|
1217
2793
|
history: mergedContents.slice(0, -1),
|
|
1218
2794
|
systemInstruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : void 0,
|
|
@@ -1310,7 +2886,7 @@ var XAIAdapter = class {
|
|
|
1310
2886
|
messages = processedMessages;
|
|
1311
2887
|
}
|
|
1312
2888
|
} else {
|
|
1313
|
-
messages =
|
|
2889
|
+
messages = formatMessagesForOpenAI2(
|
|
1314
2890
|
request.messages,
|
|
1315
2891
|
request.systemPrompt
|
|
1316
2892
|
);
|
|
@@ -1399,7 +2975,7 @@ var XAIAdapter = class {
|
|
|
1399
2975
|
}
|
|
1400
2976
|
}
|
|
1401
2977
|
} else {
|
|
1402
|
-
messages =
|
|
2978
|
+
messages = formatMessagesForOpenAI2(
|
|
1403
2979
|
request.messages,
|
|
1404
2980
|
request.systemPrompt
|
|
1405
2981
|
);
|
|
@@ -1496,7 +3072,7 @@ var AzureAdapter = class {
|
|
|
1496
3072
|
messages = processedMessages;
|
|
1497
3073
|
}
|
|
1498
3074
|
} else {
|
|
1499
|
-
messages =
|
|
3075
|
+
messages = formatMessagesForOpenAI2(
|
|
1500
3076
|
request.messages,
|
|
1501
3077
|
request.systemPrompt
|
|
1502
3078
|
);
|
|
@@ -1586,7 +3162,7 @@ var AzureAdapter = class {
|
|
|
1586
3162
|
}
|
|
1587
3163
|
}
|
|
1588
3164
|
} else {
|
|
1589
|
-
messages =
|
|
3165
|
+
messages = formatMessagesForOpenAI2(
|
|
1590
3166
|
request.messages,
|
|
1591
3167
|
request.systemPrompt
|
|
1592
3168
|
);
|
|
@@ -1660,23 +3236,23 @@ function createSSEResponse(generator) {
|
|
|
1660
3236
|
}
|
|
1661
3237
|
|
|
1662
3238
|
// src/server/runtime.ts
|
|
1663
|
-
function buildToolResultForAI(
|
|
3239
|
+
function buildToolResultForAI(tool2, result, args) {
|
|
1664
3240
|
const typedResult = result;
|
|
1665
|
-
const responseMode = typedResult?._aiResponseMode ??
|
|
3241
|
+
const responseMode = typedResult?._aiResponseMode ?? tool2?.aiResponseMode ?? "full";
|
|
1666
3242
|
if (typedResult?._aiContent && typedResult._aiContent.length > 0) {
|
|
1667
3243
|
return typedResult._aiContent;
|
|
1668
3244
|
}
|
|
1669
3245
|
let aiContext;
|
|
1670
3246
|
if (typedResult?._aiContext) {
|
|
1671
3247
|
aiContext = typedResult._aiContext;
|
|
1672
|
-
} else if (
|
|
1673
|
-
aiContext = typeof
|
|
3248
|
+
} else if (tool2?.aiContext) {
|
|
3249
|
+
aiContext = typeof tool2.aiContext === "function" ? tool2.aiContext(typedResult, args) : tool2.aiContext;
|
|
1674
3250
|
}
|
|
1675
3251
|
switch (responseMode) {
|
|
1676
3252
|
case "none":
|
|
1677
3253
|
return aiContext ?? "[Result displayed to user]";
|
|
1678
3254
|
case "brief":
|
|
1679
|
-
return aiContext ?? `[Tool ${
|
|
3255
|
+
return aiContext ?? `[Tool ${tool2?.name ?? "unknown"} executed successfully]`;
|
|
1680
3256
|
case "full":
|
|
1681
3257
|
default:
|
|
1682
3258
|
const fullData = JSON.stringify(result);
|
|
@@ -1745,8 +3321,8 @@ var Runtime = class {
|
|
|
1745
3321
|
}
|
|
1746
3322
|
}
|
|
1747
3323
|
if (config.tools) {
|
|
1748
|
-
for (const
|
|
1749
|
-
this.tools.set(
|
|
3324
|
+
for (const tool2 of config.tools) {
|
|
3325
|
+
this.tools.set(tool2.name, tool2);
|
|
1750
3326
|
}
|
|
1751
3327
|
}
|
|
1752
3328
|
}
|
|
@@ -1776,13 +3352,6 @@ var Runtime = class {
|
|
|
1776
3352
|
temperature: llm.temperature,
|
|
1777
3353
|
maxTokens: llm.maxTokens
|
|
1778
3354
|
});
|
|
1779
|
-
case "groq":
|
|
1780
|
-
return createGroqAdapter({
|
|
1781
|
-
apiKey: llm.apiKey,
|
|
1782
|
-
model: llm.model,
|
|
1783
|
-
temperature: llm.temperature,
|
|
1784
|
-
maxTokens: llm.maxTokens
|
|
1785
|
-
});
|
|
1786
3355
|
case "ollama":
|
|
1787
3356
|
return createOllamaAdapter({
|
|
1788
3357
|
model: llm.model,
|
|
@@ -2014,8 +3583,8 @@ var Runtime = class {
|
|
|
2014
3583
|
/**
|
|
2015
3584
|
* Register a new tool
|
|
2016
3585
|
*/
|
|
2017
|
-
registerTool(
|
|
2018
|
-
this.tools.set(
|
|
3586
|
+
registerTool(tool2) {
|
|
3587
|
+
this.tools.set(tool2.name, tool2);
|
|
2019
3588
|
}
|
|
2020
3589
|
/**
|
|
2021
3590
|
* Unregister a tool
|
|
@@ -2094,12 +3663,12 @@ var Runtime = class {
|
|
|
2094
3663
|
this.config.agentLoop?.maxIterations || 20;
|
|
2095
3664
|
const allTools = [...this.tools.values()];
|
|
2096
3665
|
if (request.tools) {
|
|
2097
|
-
for (const
|
|
3666
|
+
for (const tool2 of request.tools) {
|
|
2098
3667
|
allTools.push({
|
|
2099
|
-
name:
|
|
2100
|
-
description:
|
|
3668
|
+
name: tool2.name,
|
|
3669
|
+
description: tool2.description,
|
|
2101
3670
|
location: "client",
|
|
2102
|
-
inputSchema:
|
|
3671
|
+
inputSchema: tool2.inputSchema
|
|
2103
3672
|
});
|
|
2104
3673
|
}
|
|
2105
3674
|
}
|
|
@@ -2204,8 +3773,8 @@ var Runtime = class {
|
|
|
2204
3773
|
const serverToolCalls = [];
|
|
2205
3774
|
const clientToolCalls = [];
|
|
2206
3775
|
for (const tc of toolCalls) {
|
|
2207
|
-
const
|
|
2208
|
-
if (
|
|
3776
|
+
const tool2 = allTools.find((t) => t.name === tc.name);
|
|
3777
|
+
if (tool2?.location === "server" && tool2.handler) {
|
|
2209
3778
|
serverToolCalls.push(tc);
|
|
2210
3779
|
} else {
|
|
2211
3780
|
clientToolCalls.push(tc);
|
|
@@ -2214,8 +3783,8 @@ var Runtime = class {
|
|
|
2214
3783
|
const serverToolResults = [];
|
|
2215
3784
|
const toolContextData = "toolContext" in this.config ? this.config.toolContext : void 0;
|
|
2216
3785
|
for (const tc of serverToolCalls) {
|
|
2217
|
-
const
|
|
2218
|
-
if (
|
|
3786
|
+
const tool2 = allTools.find((t) => t.name === tc.name);
|
|
3787
|
+
if (tool2?.handler) {
|
|
2219
3788
|
if (debug) {
|
|
2220
3789
|
console.log(`[Copilot SDK] Executing server-side tool: ${tc.name}`);
|
|
2221
3790
|
}
|
|
@@ -2227,13 +3796,13 @@ var Runtime = class {
|
|
|
2227
3796
|
toolContextData
|
|
2228
3797
|
);
|
|
2229
3798
|
try {
|
|
2230
|
-
const result = await
|
|
3799
|
+
const result = await tool2.handler(tc.args, toolContext);
|
|
2231
3800
|
serverToolResults.push({
|
|
2232
3801
|
id: tc.id,
|
|
2233
3802
|
name: tc.name,
|
|
2234
3803
|
args: tc.args,
|
|
2235
3804
|
result,
|
|
2236
|
-
tool
|
|
3805
|
+
tool: tool2
|
|
2237
3806
|
});
|
|
2238
3807
|
yield {
|
|
2239
3808
|
type: "action:end",
|
|
@@ -2250,7 +3819,7 @@ var Runtime = class {
|
|
|
2250
3819
|
name: tc.name,
|
|
2251
3820
|
args: tc.args,
|
|
2252
3821
|
result: errorResult,
|
|
2253
|
-
tool
|
|
3822
|
+
tool: tool2
|
|
2254
3823
|
});
|
|
2255
3824
|
yield {
|
|
2256
3825
|
type: "action:end",
|
|
@@ -2369,12 +3938,12 @@ var Runtime = class {
|
|
|
2369
3938
|
const maxIterations = this.config.agentLoop?.maxIterations || 20;
|
|
2370
3939
|
const allTools = [...this.tools.values()];
|
|
2371
3940
|
if (request.tools) {
|
|
2372
|
-
for (const
|
|
3941
|
+
for (const tool2 of request.tools) {
|
|
2373
3942
|
allTools.push({
|
|
2374
|
-
name:
|
|
2375
|
-
description:
|
|
3943
|
+
name: tool2.name,
|
|
3944
|
+
description: tool2.description,
|
|
2376
3945
|
location: "client",
|
|
2377
|
-
inputSchema:
|
|
3946
|
+
inputSchema: tool2.inputSchema
|
|
2378
3947
|
});
|
|
2379
3948
|
}
|
|
2380
3949
|
}
|
|
@@ -2439,8 +4008,8 @@ var Runtime = class {
|
|
|
2439
4008
|
const serverToolCalls = [];
|
|
2440
4009
|
const clientToolCalls = [];
|
|
2441
4010
|
for (const tc of result.toolCalls) {
|
|
2442
|
-
const
|
|
2443
|
-
if (
|
|
4011
|
+
const tool2 = allTools.find((t) => t.name === tc.name);
|
|
4012
|
+
if (tool2?.location === "server" && tool2.handler) {
|
|
2444
4013
|
serverToolCalls.push(tc);
|
|
2445
4014
|
} else {
|
|
2446
4015
|
clientToolCalls.push({
|
|
@@ -2465,8 +4034,8 @@ var Runtime = class {
|
|
|
2465
4034
|
const serverToolResults = [];
|
|
2466
4035
|
const toolContextData = "toolContext" in this.config ? this.config.toolContext : void 0;
|
|
2467
4036
|
for (const tc of serverToolCalls) {
|
|
2468
|
-
const
|
|
2469
|
-
if (
|
|
4037
|
+
const tool2 = allTools.find((t) => t.name === tc.name);
|
|
4038
|
+
if (tool2?.handler) {
|
|
2470
4039
|
if (debug) {
|
|
2471
4040
|
console.log(`[Copilot SDK] Executing tool: ${tc.name}`);
|
|
2472
4041
|
}
|
|
@@ -2478,13 +4047,13 @@ var Runtime = class {
|
|
|
2478
4047
|
toolContextData
|
|
2479
4048
|
);
|
|
2480
4049
|
try {
|
|
2481
|
-
const toolResult = await
|
|
4050
|
+
const toolResult = await tool2.handler(tc.args, toolContext);
|
|
2482
4051
|
serverToolResults.push({
|
|
2483
4052
|
id: tc.id,
|
|
2484
4053
|
name: tc.name,
|
|
2485
4054
|
args: tc.args,
|
|
2486
4055
|
result: toolResult,
|
|
2487
|
-
tool
|
|
4056
|
+
tool: tool2
|
|
2488
4057
|
});
|
|
2489
4058
|
yield {
|
|
2490
4059
|
type: "action:end",
|
|
@@ -2501,7 +4070,7 @@ var Runtime = class {
|
|
|
2501
4070
|
name: tc.name,
|
|
2502
4071
|
args: tc.args,
|
|
2503
4072
|
result: errorResult,
|
|
2504
|
-
tool
|
|
4073
|
+
tool: tool2
|
|
2505
4074
|
});
|
|
2506
4075
|
yield {
|
|
2507
4076
|
type: "action:end",
|
|
@@ -2608,11 +4177,11 @@ var Runtime = class {
|
|
|
2608
4177
|
* Convert tools to legacy action format (for adapter compatibility)
|
|
2609
4178
|
*/
|
|
2610
4179
|
convertToolsToActions(tools) {
|
|
2611
|
-
return tools.map((
|
|
2612
|
-
name:
|
|
2613
|
-
description:
|
|
2614
|
-
parameters: this.convertInputSchemaToParameters(
|
|
2615
|
-
handler:
|
|
4180
|
+
return tools.map((tool2) => ({
|
|
4181
|
+
name: tool2.name,
|
|
4182
|
+
description: tool2.description,
|
|
4183
|
+
parameters: this.convertInputSchemaToParameters(tool2.inputSchema),
|
|
4184
|
+
handler: tool2.handler || (async () => ({ handled: false }))
|
|
2616
4185
|
}));
|
|
2617
4186
|
}
|
|
2618
4187
|
/**
|
|
@@ -2834,7 +4403,7 @@ function getModelCapabilities(providerName, modelId) {
|
|
|
2834
4403
|
}
|
|
2835
4404
|
|
|
2836
4405
|
// src/providers/openai/index.ts
|
|
2837
|
-
var
|
|
4406
|
+
var OPENAI_MODELS2 = {
|
|
2838
4407
|
// GPT-4o series
|
|
2839
4408
|
"gpt-4o": {
|
|
2840
4409
|
vision: true,
|
|
@@ -2945,7 +4514,7 @@ function createOpenAI(config = {}) {
|
|
|
2945
4514
|
const apiKey = config.apiKey ?? process.env.OPENAI_API_KEY ?? "";
|
|
2946
4515
|
return {
|
|
2947
4516
|
name: "openai",
|
|
2948
|
-
supportedModels: Object.keys(
|
|
4517
|
+
supportedModels: Object.keys(OPENAI_MODELS2),
|
|
2949
4518
|
languageModel(modelId) {
|
|
2950
4519
|
return createOpenAIAdapter({
|
|
2951
4520
|
apiKey,
|
|
@@ -2954,7 +4523,7 @@ function createOpenAI(config = {}) {
|
|
|
2954
4523
|
});
|
|
2955
4524
|
},
|
|
2956
4525
|
getCapabilities(modelId) {
|
|
2957
|
-
const model =
|
|
4526
|
+
const model = OPENAI_MODELS2[modelId] ?? OPENAI_MODELS2["gpt-4o"];
|
|
2958
4527
|
return {
|
|
2959
4528
|
supportsVision: model.vision,
|
|
2960
4529
|
supportsTools: model.tools,
|
|
@@ -2976,7 +4545,7 @@ function createOpenAI(config = {}) {
|
|
|
2976
4545
|
}
|
|
2977
4546
|
|
|
2978
4547
|
// src/providers/anthropic/index.ts
|
|
2979
|
-
var
|
|
4548
|
+
var ANTHROPIC_MODELS2 = {
|
|
2980
4549
|
// Claude 4 series (latest)
|
|
2981
4550
|
"claude-sonnet-4-20250514": {
|
|
2982
4551
|
vision: true,
|
|
@@ -3045,7 +4614,7 @@ function createAnthropic(config = {}) {
|
|
|
3045
4614
|
const apiKey = config.apiKey ?? process.env.ANTHROPIC_API_KEY ?? "";
|
|
3046
4615
|
return {
|
|
3047
4616
|
name: "anthropic",
|
|
3048
|
-
supportedModels: Object.keys(
|
|
4617
|
+
supportedModels: Object.keys(ANTHROPIC_MODELS2),
|
|
3049
4618
|
languageModel(modelId) {
|
|
3050
4619
|
return createAnthropicAdapter({
|
|
3051
4620
|
apiKey,
|
|
@@ -3055,7 +4624,7 @@ function createAnthropic(config = {}) {
|
|
|
3055
4624
|
});
|
|
3056
4625
|
},
|
|
3057
4626
|
getCapabilities(modelId) {
|
|
3058
|
-
const model =
|
|
4627
|
+
const model = ANTHROPIC_MODELS2[modelId] ?? ANTHROPIC_MODELS2["claude-3-5-sonnet-latest"];
|
|
3059
4628
|
return {
|
|
3060
4629
|
supportsVision: model.vision,
|
|
3061
4630
|
supportsTools: model.tools,
|
|
@@ -3080,90 +4649,6 @@ function createAnthropic(config = {}) {
|
|
|
3080
4649
|
};
|
|
3081
4650
|
}
|
|
3082
4651
|
|
|
3083
|
-
// src/providers/groq/index.ts
|
|
3084
|
-
var GROQ_MODELS = {
|
|
3085
|
-
// Llama 3.3 series
|
|
3086
|
-
"llama-3.3-70b-versatile": {
|
|
3087
|
-
vision: false,
|
|
3088
|
-
tools: true,
|
|
3089
|
-
maxTokens: 32768
|
|
3090
|
-
},
|
|
3091
|
-
"llama-3.3-70b-specdec": {
|
|
3092
|
-
vision: false,
|
|
3093
|
-
tools: true,
|
|
3094
|
-
maxTokens: 8192
|
|
3095
|
-
},
|
|
3096
|
-
// Llama 3.2 Vision series
|
|
3097
|
-
"llama-3.2-90b-vision-preview": {
|
|
3098
|
-
vision: true,
|
|
3099
|
-
tools: true,
|
|
3100
|
-
maxTokens: 8192
|
|
3101
|
-
},
|
|
3102
|
-
"llama-3.2-11b-vision-preview": {
|
|
3103
|
-
vision: true,
|
|
3104
|
-
tools: true,
|
|
3105
|
-
maxTokens: 8192
|
|
3106
|
-
},
|
|
3107
|
-
// Llama 3.1 series
|
|
3108
|
-
"llama-3.1-70b-versatile": {
|
|
3109
|
-
vision: false,
|
|
3110
|
-
tools: true,
|
|
3111
|
-
maxTokens: 32768
|
|
3112
|
-
},
|
|
3113
|
-
"llama-3.1-8b-instant": {
|
|
3114
|
-
vision: false,
|
|
3115
|
-
tools: true,
|
|
3116
|
-
maxTokens: 8192
|
|
3117
|
-
},
|
|
3118
|
-
// Mixtral series
|
|
3119
|
-
"mixtral-8x7b-32768": {
|
|
3120
|
-
vision: false,
|
|
3121
|
-
tools: true,
|
|
3122
|
-
maxTokens: 32768
|
|
3123
|
-
},
|
|
3124
|
-
// Gemma series
|
|
3125
|
-
"gemma2-9b-it": {
|
|
3126
|
-
vision: false,
|
|
3127
|
-
tools: false,
|
|
3128
|
-
maxTokens: 8192
|
|
3129
|
-
},
|
|
3130
|
-
// DeepSeek
|
|
3131
|
-
"deepseek-r1-distill-llama-70b": {
|
|
3132
|
-
vision: false,
|
|
3133
|
-
tools: true,
|
|
3134
|
-
maxTokens: 8192
|
|
3135
|
-
}
|
|
3136
|
-
};
|
|
3137
|
-
function createGroq(config = {}) {
|
|
3138
|
-
const apiKey = config.apiKey ?? process.env.GROQ_API_KEY ?? "";
|
|
3139
|
-
return {
|
|
3140
|
-
name: "groq",
|
|
3141
|
-
supportedModels: Object.keys(GROQ_MODELS),
|
|
3142
|
-
languageModel(modelId) {
|
|
3143
|
-
return createGroqAdapter({
|
|
3144
|
-
apiKey,
|
|
3145
|
-
model: modelId
|
|
3146
|
-
});
|
|
3147
|
-
},
|
|
3148
|
-
getCapabilities(modelId) {
|
|
3149
|
-
const model = GROQ_MODELS[modelId] ?? GROQ_MODELS["llama-3.3-70b-versatile"];
|
|
3150
|
-
return {
|
|
3151
|
-
supportsVision: model.vision,
|
|
3152
|
-
supportsTools: model.tools,
|
|
3153
|
-
supportsThinking: false,
|
|
3154
|
-
supportsStreaming: true,
|
|
3155
|
-
supportsPDF: false,
|
|
3156
|
-
supportsAudio: false,
|
|
3157
|
-
supportsVideo: false,
|
|
3158
|
-
maxTokens: model.maxTokens,
|
|
3159
|
-
supportedImageTypes: model.vision ? ["image/png", "image/jpeg", "image/gif", "image/webp"] : [],
|
|
3160
|
-
supportsJsonMode: true,
|
|
3161
|
-
supportsSystemMessages: true
|
|
3162
|
-
};
|
|
3163
|
-
}
|
|
3164
|
-
};
|
|
3165
|
-
}
|
|
3166
|
-
|
|
3167
4652
|
// src/providers/ollama/index.ts
|
|
3168
4653
|
var OLLAMA_MODELS = {
|
|
3169
4654
|
// Llama series
|
|
@@ -3297,7 +4782,7 @@ function createOllama(config = {}) {
|
|
|
3297
4782
|
}
|
|
3298
4783
|
|
|
3299
4784
|
// src/providers/google/index.ts
|
|
3300
|
-
var
|
|
4785
|
+
var GOOGLE_MODELS2 = {
|
|
3301
4786
|
// Gemini 2.0 series (latest)
|
|
3302
4787
|
"gemini-2.0-flash": {
|
|
3303
4788
|
vision: true,
|
|
@@ -3397,7 +4882,7 @@ function createGoogle(config = {}) {
|
|
|
3397
4882
|
const apiKey = config.apiKey ?? process.env.GOOGLE_API_KEY ?? "";
|
|
3398
4883
|
return {
|
|
3399
4884
|
name: "google",
|
|
3400
|
-
supportedModels: Object.keys(
|
|
4885
|
+
supportedModels: Object.keys(GOOGLE_MODELS2),
|
|
3401
4886
|
languageModel(modelId) {
|
|
3402
4887
|
return createGoogleAdapter({
|
|
3403
4888
|
apiKey,
|
|
@@ -3407,7 +4892,7 @@ function createGoogle(config = {}) {
|
|
|
3407
4892
|
});
|
|
3408
4893
|
},
|
|
3409
4894
|
getCapabilities(modelId) {
|
|
3410
|
-
const model =
|
|
4895
|
+
const model = GOOGLE_MODELS2[modelId] ?? GOOGLE_MODELS2["gemini-2.0-flash"];
|
|
3411
4896
|
return {
|
|
3412
4897
|
supportsVision: model.vision,
|
|
3413
4898
|
supportsTools: model.tools,
|
|
@@ -3451,56 +4936,95 @@ function createGoogle(config = {}) {
|
|
|
3451
4936
|
}
|
|
3452
4937
|
|
|
3453
4938
|
// src/providers/xai/index.ts
|
|
3454
|
-
var
|
|
3455
|
-
// Grok
|
|
3456
|
-
"grok-
|
|
4939
|
+
var XAI_MODELS2 = {
|
|
4940
|
+
// Grok 4.1 Fast (Latest - December 2025)
|
|
4941
|
+
"grok-4-1-fast-reasoning": {
|
|
4942
|
+
vision: false,
|
|
4943
|
+
tools: true,
|
|
4944
|
+
maxTokens: 2e6,
|
|
4945
|
+
outputTokens: 16384
|
|
4946
|
+
},
|
|
4947
|
+
"grok-4-1-fast-non-reasoning": {
|
|
4948
|
+
vision: false,
|
|
4949
|
+
tools: true,
|
|
4950
|
+
maxTokens: 2e6,
|
|
4951
|
+
outputTokens: 16384
|
|
4952
|
+
},
|
|
4953
|
+
// Grok 4 Fast (September 2025)
|
|
4954
|
+
"grok-4-fast-reasoning": {
|
|
4955
|
+
vision: false,
|
|
4956
|
+
tools: true,
|
|
4957
|
+
maxTokens: 2e6,
|
|
4958
|
+
outputTokens: 16384
|
|
4959
|
+
},
|
|
4960
|
+
"grok-4-fast-non-reasoning": {
|
|
4961
|
+
vision: false,
|
|
4962
|
+
tools: true,
|
|
4963
|
+
maxTokens: 2e6,
|
|
4964
|
+
outputTokens: 16384
|
|
4965
|
+
},
|
|
4966
|
+
// Grok 4 (July 2025)
|
|
4967
|
+
"grok-4": {
|
|
3457
4968
|
vision: true,
|
|
3458
4969
|
tools: true,
|
|
3459
|
-
maxTokens:
|
|
3460
|
-
outputTokens:
|
|
4970
|
+
maxTokens: 256e3,
|
|
4971
|
+
outputTokens: 16384
|
|
3461
4972
|
},
|
|
3462
|
-
"grok-
|
|
4973
|
+
"grok-4-0709": {
|
|
3463
4974
|
vision: true,
|
|
3464
4975
|
tools: true,
|
|
3465
|
-
maxTokens:
|
|
3466
|
-
outputTokens:
|
|
4976
|
+
maxTokens: 256e3,
|
|
4977
|
+
outputTokens: 16384
|
|
3467
4978
|
},
|
|
3468
|
-
|
|
3469
|
-
|
|
4979
|
+
// Grok 3 (February 2025) - Stable
|
|
4980
|
+
"grok-3-beta": {
|
|
4981
|
+
vision: true,
|
|
3470
4982
|
tools: true,
|
|
3471
4983
|
maxTokens: 131072,
|
|
3472
|
-
outputTokens:
|
|
4984
|
+
outputTokens: 8192
|
|
3473
4985
|
},
|
|
3474
|
-
"grok-
|
|
4986
|
+
"grok-3-fast-beta": {
|
|
3475
4987
|
vision: false,
|
|
3476
4988
|
tools: true,
|
|
3477
4989
|
maxTokens: 131072,
|
|
3478
|
-
outputTokens:
|
|
4990
|
+
outputTokens: 8192
|
|
3479
4991
|
},
|
|
3480
|
-
|
|
3481
|
-
|
|
3482
|
-
vision: true,
|
|
4992
|
+
"grok-3-mini-beta": {
|
|
4993
|
+
vision: false,
|
|
3483
4994
|
tools: true,
|
|
3484
4995
|
maxTokens: 32768,
|
|
3485
|
-
outputTokens:
|
|
4996
|
+
outputTokens: 8192
|
|
3486
4997
|
},
|
|
3487
|
-
"grok-
|
|
3488
|
-
vision:
|
|
4998
|
+
"grok-3-mini-fast-beta": {
|
|
4999
|
+
vision: false,
|
|
3489
5000
|
tools: true,
|
|
3490
5001
|
maxTokens: 32768,
|
|
3491
|
-
outputTokens:
|
|
5002
|
+
outputTokens: 8192
|
|
3492
5003
|
},
|
|
3493
|
-
// Grok
|
|
3494
|
-
"grok-
|
|
5004
|
+
// Grok Code Fast (August 2025)
|
|
5005
|
+
"grok-code-fast-1": {
|
|
3495
5006
|
vision: false,
|
|
3496
5007
|
tools: true,
|
|
5008
|
+
maxTokens: 256e3,
|
|
5009
|
+
outputTokens: 16384
|
|
5010
|
+
},
|
|
5011
|
+
// Grok 2 (Legacy - for backward compatibility)
|
|
5012
|
+
"grok-2": {
|
|
5013
|
+
vision: true,
|
|
5014
|
+
tools: true,
|
|
3497
5015
|
maxTokens: 131072,
|
|
3498
5016
|
outputTokens: 4096
|
|
3499
5017
|
},
|
|
3500
|
-
"grok-
|
|
5018
|
+
"grok-2-latest": {
|
|
3501
5019
|
vision: true,
|
|
3502
5020
|
tools: true,
|
|
3503
|
-
maxTokens:
|
|
5021
|
+
maxTokens: 131072,
|
|
5022
|
+
outputTokens: 4096
|
|
5023
|
+
},
|
|
5024
|
+
"grok-2-mini": {
|
|
5025
|
+
vision: false,
|
|
5026
|
+
tools: true,
|
|
5027
|
+
maxTokens: 131072,
|
|
3504
5028
|
outputTokens: 4096
|
|
3505
5029
|
}
|
|
3506
5030
|
};
|
|
@@ -3508,7 +5032,7 @@ function createXAI(config = {}) {
|
|
|
3508
5032
|
const apiKey = config.apiKey ?? process.env.XAI_API_KEY ?? "";
|
|
3509
5033
|
return {
|
|
3510
5034
|
name: "xai",
|
|
3511
|
-
supportedModels: Object.keys(
|
|
5035
|
+
supportedModels: Object.keys(XAI_MODELS2),
|
|
3512
5036
|
languageModel(modelId) {
|
|
3513
5037
|
return createXAIAdapter({
|
|
3514
5038
|
apiKey,
|
|
@@ -3517,7 +5041,7 @@ function createXAI(config = {}) {
|
|
|
3517
5041
|
});
|
|
3518
5042
|
},
|
|
3519
5043
|
getCapabilities(modelId) {
|
|
3520
|
-
const model =
|
|
5044
|
+
const model = XAI_MODELS2[modelId] ?? XAI_MODELS2["grok-3-fast-beta"];
|
|
3521
5045
|
return {
|
|
3522
5046
|
supportsVision: model.vision,
|
|
3523
5047
|
supportsTools: model.tools,
|
|
@@ -3596,12 +5120,12 @@ function createAzure(config) {
|
|
|
3596
5120
|
|
|
3597
5121
|
// src/providers/openai.ts
|
|
3598
5122
|
function transformTools(tools) {
|
|
3599
|
-
return tools.map((
|
|
5123
|
+
return tools.map((tool2) => ({
|
|
3600
5124
|
type: "function",
|
|
3601
5125
|
function: {
|
|
3602
|
-
name:
|
|
3603
|
-
description:
|
|
3604
|
-
parameters:
|
|
5126
|
+
name: tool2.name,
|
|
5127
|
+
description: tool2.description,
|
|
5128
|
+
parameters: tool2.inputSchema
|
|
3605
5129
|
}
|
|
3606
5130
|
}));
|
|
3607
5131
|
}
|
|
@@ -3687,10 +5211,10 @@ var openaiFormatter = {
|
|
|
3687
5211
|
|
|
3688
5212
|
// src/providers/anthropic.ts
|
|
3689
5213
|
function transformTools2(tools) {
|
|
3690
|
-
return tools.map((
|
|
3691
|
-
name:
|
|
3692
|
-
description:
|
|
3693
|
-
input_schema:
|
|
5214
|
+
return tools.map((tool2) => ({
|
|
5215
|
+
name: tool2.name,
|
|
5216
|
+
description: tool2.description,
|
|
5217
|
+
input_schema: tool2.inputSchema
|
|
3694
5218
|
}));
|
|
3695
5219
|
}
|
|
3696
5220
|
function parseToolCalls2(response) {
|
|
@@ -3768,10 +5292,10 @@ var anthropicFormatter = {
|
|
|
3768
5292
|
function transformTools3(tools) {
|
|
3769
5293
|
return [
|
|
3770
5294
|
{
|
|
3771
|
-
functionDeclarations: tools.map((
|
|
3772
|
-
name:
|
|
3773
|
-
description:
|
|
3774
|
-
parameters:
|
|
5295
|
+
functionDeclarations: tools.map((tool2) => ({
|
|
5296
|
+
name: tool2.name,
|
|
5297
|
+
description: tool2.description,
|
|
5298
|
+
parameters: tool2.inputSchema
|
|
3775
5299
|
}))
|
|
3776
5300
|
}
|
|
3777
5301
|
];
|
|
@@ -3897,7 +5421,6 @@ var formatters = {
|
|
|
3897
5421
|
gemini: geminiFormatter,
|
|
3898
5422
|
// Alias
|
|
3899
5423
|
// OpenAI-compatible providers use openaiFormatter
|
|
3900
|
-
groq: openaiFormatter,
|
|
3901
5424
|
ollama: openaiFormatter,
|
|
3902
5425
|
xai: openaiFormatter,
|
|
3903
5426
|
azure: openaiFormatter
|
|
@@ -3921,7 +5444,6 @@ function getSupportedProviders() {
|
|
|
3921
5444
|
// src/providers/index.ts
|
|
3922
5445
|
registerProvider("openai", (config) => createOpenAI(config));
|
|
3923
5446
|
registerProvider("anthropic", (config) => createAnthropic(config));
|
|
3924
|
-
registerProvider("groq", (config) => createGroq(config));
|
|
3925
5447
|
registerProvider("ollama", (config) => createOllama(config));
|
|
3926
5448
|
registerProvider("google", (config) => createGoogle(config));
|
|
3927
5449
|
registerProvider("xai", (config) => createXAI(config));
|
|
@@ -4080,8 +5602,8 @@ function buildConversation(messages, systemPrompt) {
|
|
|
4080
5602
|
async function executeToolCalls(toolCalls, tools, executeServerTool, waitForClientToolResult, emitEvent, debug) {
|
|
4081
5603
|
const results = [];
|
|
4082
5604
|
for (const toolCall of toolCalls) {
|
|
4083
|
-
const
|
|
4084
|
-
if (!
|
|
5605
|
+
const tool2 = tools.find((t) => t.name === toolCall.name);
|
|
5606
|
+
if (!tool2) {
|
|
4085
5607
|
if (debug) {
|
|
4086
5608
|
console.warn(`[AgentLoop] Unknown tool: ${toolCall.name}`);
|
|
4087
5609
|
}
|
|
@@ -4108,9 +5630,9 @@ async function executeToolCalls(toolCalls, tools, executeServerTool, waitForClie
|
|
|
4108
5630
|
});
|
|
4109
5631
|
try {
|
|
4110
5632
|
let response;
|
|
4111
|
-
if (
|
|
4112
|
-
if (
|
|
4113
|
-
response = await
|
|
5633
|
+
if (tool2.location === "server") {
|
|
5634
|
+
if (tool2.handler) {
|
|
5635
|
+
response = await tool2.handler(toolCall.input);
|
|
4114
5636
|
} else if (executeServerTool) {
|
|
4115
5637
|
response = await executeServerTool(toolCall.name, toolCall.input);
|
|
4116
5638
|
} else {
|
|
@@ -4169,13 +5691,14 @@ async function executeToolCalls(toolCalls, tools, executeServerTool, waitForClie
|
|
|
4169
5691
|
|
|
4170
5692
|
exports.AnthropicAdapter = AnthropicAdapter;
|
|
4171
5693
|
exports.AzureAdapter = AzureAdapter;
|
|
5694
|
+
exports.DEFAULT_CAPABILITIES = DEFAULT_CAPABILITIES;
|
|
4172
5695
|
exports.DEFAULT_MAX_ITERATIONS = DEFAULT_MAX_ITERATIONS;
|
|
4173
5696
|
exports.GoogleAdapter = GoogleAdapter;
|
|
4174
|
-
exports.GroqAdapter = GroqAdapter;
|
|
4175
5697
|
exports.OllamaAdapter = OllamaAdapter;
|
|
4176
5698
|
exports.OpenAIAdapter = OpenAIAdapter;
|
|
4177
5699
|
exports.Runtime = Runtime;
|
|
4178
5700
|
exports.XAIAdapter = XAIAdapter;
|
|
5701
|
+
exports.anthropic = anthropic;
|
|
4179
5702
|
exports.anthropicFormatter = anthropicFormatter;
|
|
4180
5703
|
exports.createAnthropic = createAnthropic;
|
|
4181
5704
|
exports.createAnthropicAdapter = createAnthropicAdapter;
|
|
@@ -4185,8 +5708,6 @@ exports.createEventStream = createEventStream;
|
|
|
4185
5708
|
exports.createExpressMiddleware = createExpressMiddleware;
|
|
4186
5709
|
exports.createGoogle = createGoogle;
|
|
4187
5710
|
exports.createGoogleAdapter = createGoogleAdapter;
|
|
4188
|
-
exports.createGroq = createGroq;
|
|
4189
|
-
exports.createGroqAdapter = createGroqAdapter;
|
|
4190
5711
|
exports.createHonoApp = createHonoApp;
|
|
4191
5712
|
exports.createNextHandler = createNextHandler;
|
|
4192
5713
|
exports.createNodeHandler = createNodeHandler;
|
|
@@ -4200,17 +5721,26 @@ exports.createSSEResponse = createSSEResponse;
|
|
|
4200
5721
|
exports.createXAI = createXAI;
|
|
4201
5722
|
exports.createXAIAdapter = createXAIAdapter;
|
|
4202
5723
|
exports.formatSSEData = formatSSEData;
|
|
5724
|
+
exports.formatToolsForAnthropic = formatToolsForAnthropic;
|
|
5725
|
+
exports.formatToolsForGoogle = formatToolsForGoogle;
|
|
5726
|
+
exports.formatToolsForOpenAI = formatToolsForOpenAI;
|
|
4203
5727
|
exports.geminiFormatter = geminiFormatter;
|
|
5728
|
+
exports.generateText = generateText;
|
|
4204
5729
|
exports.getAvailableProviders = getAvailableProviders;
|
|
4205
5730
|
exports.getFormatter = getFormatter;
|
|
4206
5731
|
exports.getModelCapabilities = getModelCapabilities;
|
|
4207
5732
|
exports.getProvider = getProvider;
|
|
4208
5733
|
exports.getSupportedProviders = getSupportedProviders;
|
|
5734
|
+
exports.google = google;
|
|
4209
5735
|
exports.hasProvider = hasProvider;
|
|
4210
5736
|
exports.isProviderSupported = isProviderSupported;
|
|
4211
5737
|
exports.listProviders = listProviders;
|
|
5738
|
+
exports.openai = openai;
|
|
4212
5739
|
exports.openaiFormatter = openaiFormatter;
|
|
4213
5740
|
exports.registerProvider = registerProvider;
|
|
4214
5741
|
exports.runAgentLoop = runAgentLoop;
|
|
5742
|
+
exports.streamText = streamText;
|
|
5743
|
+
exports.tool = tool;
|
|
5744
|
+
exports.xai = xai;
|
|
4215
5745
|
//# sourceMappingURL=index.js.map
|
|
4216
5746
|
//# sourceMappingURL=index.js.map
|