@auto-engineer/ai-gateway 0.11.20 → 0.12.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +1 -1
- package/CHANGELOG.md +8 -0
- package/dist/src/core/context.d.ts +2 -1
- package/dist/src/core/context.d.ts.map +1 -1
- package/dist/src/core/context.js.map +1 -1
- package/dist/src/core/generators.d.ts +2 -2
- package/dist/src/core/generators.d.ts.map +1 -1
- package/dist/src/core/generators.js +38 -106
- package/dist/src/core/generators.js.map +1 -1
- package/dist/src/core/types.d.ts +1 -1
- package/dist/src/core/types.d.ts.map +1 -1
- package/dist/src/node/wrappers.d.ts.map +1 -1
- package/dist/src/node/wrappers.js +0 -2
- package/dist/src/node/wrappers.js.map +1 -1
- package/dist/tsconfig.tsbuildinfo +1 -1
- package/package.json +6 -6
- package/src/core/context.ts +3 -2
- package/src/core/generators.ts +38 -120
- package/src/core/types.ts +1 -1
- package/src/node/wrappers.ts +0 -2
package/package.json
CHANGED
|
@@ -18,11 +18,11 @@
|
|
|
18
18
|
}
|
|
19
19
|
},
|
|
20
20
|
"dependencies": {
|
|
21
|
-
"@ai-sdk/anthropic": "^
|
|
22
|
-
"@ai-sdk/google": "^
|
|
23
|
-
"@ai-sdk/openai": "^
|
|
24
|
-
"@ai-sdk/xai": "^
|
|
25
|
-
"ai": "^
|
|
21
|
+
"@ai-sdk/anthropic": "^2.0.41",
|
|
22
|
+
"@ai-sdk/google": "^2.0.28",
|
|
23
|
+
"@ai-sdk/openai": "^2.0.62",
|
|
24
|
+
"@ai-sdk/xai": "^2.0.31",
|
|
25
|
+
"ai": "^5.0.87",
|
|
26
26
|
"debug": "^4.4.0",
|
|
27
27
|
"zod": "^3.25.67"
|
|
28
28
|
},
|
|
@@ -33,7 +33,7 @@
|
|
|
33
33
|
"publishConfig": {
|
|
34
34
|
"access": "public"
|
|
35
35
|
},
|
|
36
|
-
"version": "0.
|
|
36
|
+
"version": "0.12.1",
|
|
37
37
|
"scripts": {
|
|
38
38
|
"build": "tsc && tsx ../../scripts/fix-esm-imports.ts",
|
|
39
39
|
"test": "vitest run --reporter=dot",
|
package/src/core/context.ts
CHANGED
|
@@ -2,6 +2,7 @@ import { createOpenAI } from '@ai-sdk/openai';
|
|
|
2
2
|
import { createAnthropic } from '@ai-sdk/anthropic';
|
|
3
3
|
import { createGoogleGenerativeAI } from '@ai-sdk/google';
|
|
4
4
|
import { createXai } from '@ai-sdk/xai';
|
|
5
|
+
import type { LanguageModelV2 } from '@ai-sdk/provider';
|
|
5
6
|
import { AIProvider, AIContext, AIConfig } from './types';
|
|
6
7
|
import { DEFAULT_MODELS } from './constants';
|
|
7
8
|
import { createCustomProvider } from './providers/custom';
|
|
@@ -91,7 +92,7 @@ const providerFactories = {
|
|
|
91
92
|
},
|
|
92
93
|
};
|
|
93
94
|
|
|
94
|
-
function createProviderModel(provider: AIProvider, modelName: string, context: AIContext) {
|
|
95
|
+
function createProviderModel(provider: AIProvider, modelName: string, context: AIContext): LanguageModelV2 {
|
|
95
96
|
const factory = providerFactories[provider];
|
|
96
97
|
if (factory == null) {
|
|
97
98
|
throw new Error(`Unknown provider: ${provider as string}`);
|
|
@@ -99,7 +100,7 @@ function createProviderModel(provider: AIProvider, modelName: string, context: A
|
|
|
99
100
|
return factory(modelName, context);
|
|
100
101
|
}
|
|
101
102
|
|
|
102
|
-
export function getModel(provider: AIProvider, model: string | undefined, context: AIContext) {
|
|
103
|
+
export function getModel(provider: AIProvider, model: string | undefined, context: AIContext): LanguageModelV2 {
|
|
103
104
|
const modelName = model ?? getDefaultModel(provider, context);
|
|
104
105
|
debugConfig('Creating model instance for provider %s with model %s', provider, modelName);
|
|
105
106
|
return createProviderModel(provider, modelName, context);
|
package/src/core/generators.ts
CHANGED
|
@@ -38,8 +38,7 @@ export async function generateText(context: AIContext, prompt: string, options:
|
|
|
38
38
|
const result = await aiGenerateText({
|
|
39
39
|
model: modelInstance,
|
|
40
40
|
prompt,
|
|
41
|
-
temperature: finalOptions.temperature,
|
|
42
|
-
maxTokens: finalOptions.maxTokens,
|
|
41
|
+
...(finalOptions.temperature !== undefined && { temperature: finalOptions.temperature }),
|
|
43
42
|
});
|
|
44
43
|
|
|
45
44
|
debugAPI('API call successful - response length: %d, usage: %o', result.text.length, result.usage);
|
|
@@ -61,8 +60,7 @@ export async function* streamText(context: AIContext, prompt: string, options: A
|
|
|
61
60
|
const stream = aiStreamText({
|
|
62
61
|
model: modelInstance,
|
|
63
62
|
prompt,
|
|
64
|
-
temperature: finalOptions.temperature,
|
|
65
|
-
maxTokens: finalOptions.maxTokens,
|
|
63
|
+
...(finalOptions.temperature !== undefined && { temperature: finalOptions.temperature }),
|
|
66
64
|
});
|
|
67
65
|
|
|
68
66
|
let totalChunks = 0;
|
|
@@ -140,8 +138,7 @@ export async function generateTextWithImage(
|
|
|
140
138
|
],
|
|
141
139
|
},
|
|
142
140
|
],
|
|
143
|
-
temperature: finalOptions.temperature,
|
|
144
|
-
maxTokens: finalOptions.maxTokens,
|
|
141
|
+
...(finalOptions.temperature !== undefined && { temperature: finalOptions.temperature }),
|
|
145
142
|
});
|
|
146
143
|
|
|
147
144
|
debugAPI('Image API call successful - response length: %d', result.text.length);
|
|
@@ -157,8 +154,6 @@ async function attemptStructuredGeneration<T>(
|
|
|
157
154
|
prompt: string,
|
|
158
155
|
provider: AIProvider,
|
|
159
156
|
options: StructuredAIOptions<T>,
|
|
160
|
-
registeredTools: Record<string, RegisteredToolForAI>,
|
|
161
|
-
hasTools: boolean,
|
|
162
157
|
): Promise<T> {
|
|
163
158
|
const maxSchemaRetries = 3;
|
|
164
159
|
let lastError: AIToolValidationError | undefined;
|
|
@@ -179,12 +174,7 @@ async function attemptStructuredGeneration<T>(
|
|
|
179
174
|
schema: options.schema,
|
|
180
175
|
schemaName: options.schemaName,
|
|
181
176
|
schemaDescription: options.schemaDescription,
|
|
182
|
-
temperature: options.temperature,
|
|
183
|
-
maxTokens: options.maxTokens,
|
|
184
|
-
...(hasTools && {
|
|
185
|
-
tools: registeredTools,
|
|
186
|
-
toolChoice: 'auto' as const,
|
|
187
|
-
}),
|
|
177
|
+
...(options.temperature !== undefined && { temperature: options.temperature }),
|
|
188
178
|
};
|
|
189
179
|
debugAPI('Generating structured object with schema: %s', options.schemaName ?? 'unnamed');
|
|
190
180
|
const result = await generateObject(opts);
|
|
@@ -212,7 +202,6 @@ export async function generateStructuredData<T>(
|
|
|
212
202
|
context: AIContext,
|
|
213
203
|
prompt: string,
|
|
214
204
|
options: StructuredAIOptions<T>,
|
|
215
|
-
registeredTools: Record<string, RegisteredToolForAI> = {},
|
|
216
205
|
): Promise<T> {
|
|
217
206
|
const resolvedProvider = options.provider ?? getDefaultProvider(context);
|
|
218
207
|
debugAPI(
|
|
@@ -221,9 +210,31 @@ export async function generateStructuredData<T>(
|
|
|
221
210
|
options.schemaName ?? 'unnamed',
|
|
222
211
|
);
|
|
223
212
|
|
|
224
|
-
|
|
213
|
+
return attemptStructuredGeneration(context, prompt, resolvedProvider, options);
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
function startPartialObjectStream(
|
|
217
|
+
result: {
|
|
218
|
+
partialObjectStream: AsyncIterable<unknown>;
|
|
219
|
+
},
|
|
220
|
+
onPartialObject: ((partialObject: unknown) => void) | undefined,
|
|
221
|
+
): void {
|
|
222
|
+
if (!onPartialObject) return;
|
|
225
223
|
|
|
226
|
-
|
|
224
|
+
debugStream('Starting partial object stream');
|
|
225
|
+
void (async () => {
|
|
226
|
+
try {
|
|
227
|
+
let partialCount = 0;
|
|
228
|
+
for await (const partialObject of result.partialObjectStream) {
|
|
229
|
+
partialCount++;
|
|
230
|
+
debugStream('Partial object %d received', partialCount);
|
|
231
|
+
onPartialObject(partialObject);
|
|
232
|
+
}
|
|
233
|
+
debugStream('Partial object stream complete - total partials: %d', partialCount);
|
|
234
|
+
} catch (streamError) {
|
|
235
|
+
debugError('Error in partial object stream: %O', streamError);
|
|
236
|
+
}
|
|
237
|
+
})();
|
|
227
238
|
}
|
|
228
239
|
|
|
229
240
|
export async function streamStructuredData<T>(
|
|
@@ -253,26 +264,10 @@ export async function streamStructuredData<T>(
|
|
|
253
264
|
schema: options.schema,
|
|
254
265
|
schemaName: options.schemaName,
|
|
255
266
|
schemaDescription: options.schemaDescription,
|
|
256
|
-
temperature: options.temperature,
|
|
257
|
-
maxTokens: options.maxTokens,
|
|
267
|
+
...(options.temperature !== undefined && { temperature: options.temperature }),
|
|
258
268
|
});
|
|
259
269
|
|
|
260
|
-
|
|
261
|
-
debugStream('Starting partial object stream');
|
|
262
|
-
void (async () => {
|
|
263
|
-
try {
|
|
264
|
-
let partialCount = 0;
|
|
265
|
-
for await (const partialObject of result.partialObjectStream) {
|
|
266
|
-
partialCount++;
|
|
267
|
-
debugStream('Partial object %d received', partialCount);
|
|
268
|
-
options.onPartialObject?.(partialObject);
|
|
269
|
-
}
|
|
270
|
-
debugStream('Partial object stream complete - total partials: %d', partialCount);
|
|
271
|
-
} catch (streamError) {
|
|
272
|
-
debugError('Error in partial object stream: %O', streamError);
|
|
273
|
-
}
|
|
274
|
-
})();
|
|
275
|
-
}
|
|
270
|
+
startPartialObjectStream(result, options.onPartialObject);
|
|
276
271
|
|
|
277
272
|
const finalObject = await result.object;
|
|
278
273
|
debugStream('Final structured object received');
|
|
@@ -298,103 +293,32 @@ export async function streamStructuredData<T>(
|
|
|
298
293
|
async function executeToolConversation(
|
|
299
294
|
modelInstance: ReturnType<typeof getModel>,
|
|
300
295
|
messages: Array<{ role: 'user' | 'assistant'; content: string }>,
|
|
301
|
-
registeredTools: Record<string, RegisteredToolForAI>,
|
|
302
|
-
hasTools: boolean,
|
|
303
296
|
finalOptions: AIOptions & { temperature?: number; maxTokens?: number },
|
|
304
297
|
provider: AIProvider,
|
|
305
298
|
): Promise<{ finalResult: string; allToolCalls: unknown[] }> {
|
|
306
|
-
let finalResult = '';
|
|
307
299
|
const allToolCalls: unknown[] = [];
|
|
308
|
-
let attempts = 0;
|
|
309
|
-
const maxAttempts = 5;
|
|
310
|
-
|
|
311
|
-
while (attempts < maxAttempts) {
|
|
312
|
-
attempts++;
|
|
313
|
-
debugTools('Tool execution attempt %d/%d', attempts, maxAttempts);
|
|
314
300
|
|
|
301
|
+
try {
|
|
315
302
|
const opts = {
|
|
316
303
|
model: modelInstance,
|
|
317
304
|
messages,
|
|
318
|
-
temperature: finalOptions.temperature,
|
|
319
|
-
maxTokens: finalOptions.maxTokens,
|
|
320
|
-
...(hasTools && {
|
|
321
|
-
tools: registeredTools,
|
|
322
|
-
toolChoice: 'auto' as const,
|
|
323
|
-
}),
|
|
305
|
+
...(finalOptions.temperature !== undefined && { temperature: finalOptions.temperature }),
|
|
324
306
|
};
|
|
325
|
-
debugTools('Request options: %o', { ...opts, tools: hasTools ? '[tools included]' : undefined });
|
|
326
307
|
|
|
327
|
-
|
|
328
|
-
const result = await aiGenerateText(opts);
|
|
329
|
-
debugTools('Result received - has text: %s, tool calls: %d', !!result.text, result.toolCalls?.length ?? 0);
|
|
330
|
-
|
|
331
|
-
if (result.text) {
|
|
332
|
-
messages.push({ role: 'assistant', content: result.text });
|
|
333
|
-
finalResult = result.text;
|
|
334
|
-
debugTools('Assistant message added to conversation');
|
|
335
|
-
}
|
|
336
|
-
|
|
337
|
-
if (result.toolCalls !== undefined && result.toolCalls.length > 0) {
|
|
338
|
-
allToolCalls.push(...result.toolCalls);
|
|
339
|
-
debugTools('Executing %d tool calls', result.toolCalls.length);
|
|
308
|
+
const result = await aiGenerateText(opts);
|
|
340
309
|
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
role: 'user',
|
|
346
|
-
content: `${toolResults}\n\nUsing the tool outputs above, continue your response to the original request.`,
|
|
347
|
-
});
|
|
348
|
-
|
|
349
|
-
continue;
|
|
350
|
-
}
|
|
351
|
-
|
|
352
|
-
debugTools('No tool calls, conversation complete');
|
|
353
|
-
break;
|
|
354
|
-
} catch (error) {
|
|
355
|
-
extractAndLogError(error, provider, 'generateTextWithTools');
|
|
356
|
-
throw error;
|
|
357
|
-
}
|
|
358
|
-
}
|
|
359
|
-
|
|
360
|
-
return { finalResult, allToolCalls };
|
|
361
|
-
}
|
|
362
|
-
|
|
363
|
-
async function executeToolCalls(
|
|
364
|
-
toolCalls: unknown[],
|
|
365
|
-
registeredTools: Record<string, RegisteredToolForAI>,
|
|
366
|
-
): Promise<string> {
|
|
367
|
-
debugTools('Executing %d tool calls', toolCalls.length);
|
|
368
|
-
let toolResults = '';
|
|
369
|
-
|
|
370
|
-
for (const toolCall of toolCalls) {
|
|
371
|
-
try {
|
|
372
|
-
const toolCallObj = toolCall as { toolName: string; args: Record<string, unknown> };
|
|
373
|
-
debugTools('Executing tool: %s with args: %o', toolCallObj.toolName, toolCallObj.args);
|
|
374
|
-
const tool = registeredTools[toolCallObj.toolName];
|
|
375
|
-
if (tool?.execute) {
|
|
376
|
-
const toolResult = await tool.execute(toolCallObj.args);
|
|
377
|
-
toolResults += `Tool ${toolCallObj.toolName} returned: ${String(toolResult)}\n\n`;
|
|
378
|
-
debugTools('Tool %s executed successfully', toolCallObj.toolName);
|
|
379
|
-
} else {
|
|
380
|
-
toolResults += `Error: Tool ${toolCallObj.toolName} not found or missing execute function\n\n`;
|
|
381
|
-
debugTools('Tool %s not found or missing execute function', toolCallObj.toolName);
|
|
382
|
-
}
|
|
383
|
-
} catch (error) {
|
|
384
|
-
const toolCallObj = toolCall as { toolName: string };
|
|
385
|
-
debugError('Tool execution error for %s: %O', toolCallObj.toolName, error);
|
|
386
|
-
toolResults += `Error executing tool ${toolCallObj.toolName}: ${String(error)}\n\n`;
|
|
387
|
-
}
|
|
310
|
+
return { finalResult: result.text, allToolCalls };
|
|
311
|
+
} catch (error) {
|
|
312
|
+
extractAndLogError(error, provider, 'generateTextWithTools');
|
|
313
|
+
throw error;
|
|
388
314
|
}
|
|
389
|
-
|
|
390
|
-
return toolResults;
|
|
391
315
|
}
|
|
392
316
|
|
|
393
317
|
export async function generateTextWithTools(
|
|
394
318
|
context: AIContext,
|
|
395
319
|
prompt: string,
|
|
396
320
|
options: AIOptions = {},
|
|
397
|
-
|
|
321
|
+
_registeredTools: Record<string, RegisteredToolForAI> = {},
|
|
398
322
|
): Promise<{ text: string; toolCalls?: unknown[] }> {
|
|
399
323
|
const resolvedProvider = options.provider ?? getDefaultProvider(context);
|
|
400
324
|
debugTools('generateTextWithTools called - provider: %s', resolvedProvider);
|
|
@@ -402,17 +326,11 @@ export async function generateTextWithTools(
|
|
|
402
326
|
const model = finalOptions.model ?? getDefaultModel(resolvedProvider, context);
|
|
403
327
|
const modelInstance = getModel(resolvedProvider, model, context);
|
|
404
328
|
|
|
405
|
-
debugTools('Registered tools: %o', Object.keys(registeredTools));
|
|
406
|
-
const hasTools = Object.keys(registeredTools).length > 0;
|
|
407
|
-
debugTools('Has tools available: %s', hasTools);
|
|
408
|
-
|
|
409
329
|
const messages: Array<{ role: 'user' | 'assistant'; content: string }> = [{ role: 'user', content: prompt }];
|
|
410
330
|
|
|
411
331
|
const { finalResult, allToolCalls } = await executeToolConversation(
|
|
412
332
|
modelInstance,
|
|
413
333
|
messages,
|
|
414
|
-
registeredTools,
|
|
415
|
-
hasTools,
|
|
416
334
|
finalOptions,
|
|
417
335
|
resolvedProvider,
|
|
418
336
|
);
|
package/src/core/types.ts
CHANGED
package/src/node/wrappers.ts
CHANGED
|
@@ -113,8 +113,6 @@ export async function generateStructuredDataWithAI<T>(prompt: string, options: S
|
|
|
113
113
|
} catch (e) {
|
|
114
114
|
throw new Error(`MCP server failed to start: ${(e as Error).message}`);
|
|
115
115
|
}
|
|
116
|
-
const tools = getRegisteredToolsForAI();
|
|
117
|
-
return coreGenerateStructuredData(context, prompt, options, tools);
|
|
118
116
|
}
|
|
119
117
|
|
|
120
118
|
return coreGenerateStructuredData(context, prompt, options);
|