@auto-engineer/ai-gateway 0.4.3 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +1 -1
- package/.turbo/turbo-format.log +10 -9
- package/.turbo/turbo-lint.log +1 -1
- package/.turbo/turbo-test.log +35 -17
- package/.turbo/turbo-type-check.log +5 -4
- package/CHANGELOG.md +6 -0
- package/DEBUG.md +212 -0
- package/dist/config.d.ts.map +1 -1
- package/dist/config.js +35 -8
- package/dist/config.js.map +1 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +270 -88
- package/dist/index.js.map +1 -1
- package/dist/mcp-server.d.ts.map +1 -1
- package/dist/mcp-server.js +46 -6
- package/dist/mcp-server.js.map +1 -1
- package/package.json +2 -7
- package/src/config.ts +46 -9
- package/src/index.ts +340 -104
- package/src/mcp-server.ts +49 -6
- package/tsconfig.tsbuildinfo +1 -1
package/src/index.ts
CHANGED
|
@@ -7,6 +7,116 @@ import { configureAIProvider } from './config';
|
|
|
7
7
|
import { z } from 'zod';
|
|
8
8
|
import { getRegisteredToolsForAI } from './mcp-server';
|
|
9
9
|
import { startServer } from './mcp-server';
|
|
10
|
+
import createDebug from 'debug';
|
|
11
|
+
|
|
12
|
+
// const debug = createDebug('ai-gateway'); // TODO: Use for general debugging
|
|
13
|
+
const debugConfig = createDebug('ai-gateway:config');
|
|
14
|
+
const debugAPI = createDebug('ai-gateway:api');
|
|
15
|
+
const debugError = createDebug('ai-gateway:error');
|
|
16
|
+
const debugTools = createDebug('ai-gateway:tools');
|
|
17
|
+
const debugStream = createDebug('ai-gateway:stream');
|
|
18
|
+
const debugValidation = createDebug('ai-gateway:validation');
|
|
19
|
+
|
|
20
|
+
// Error type definitions
|
|
21
|
+
const ERROR_PATTERNS = [
|
|
22
|
+
{
|
|
23
|
+
patterns: ['rate limit', '429'],
|
|
24
|
+
statusCode: 429,
|
|
25
|
+
icon: '⚠️',
|
|
26
|
+
message: 'RATE LIMIT ERROR detected for %s',
|
|
27
|
+
checkRetryAfter: true,
|
|
28
|
+
},
|
|
29
|
+
{
|
|
30
|
+
patterns: ['401', 'unauthorized'],
|
|
31
|
+
statusCode: 401,
|
|
32
|
+
icon: '🔐',
|
|
33
|
+
message: 'AUTHENTICATION ERROR - Check your %s API key',
|
|
34
|
+
},
|
|
35
|
+
{
|
|
36
|
+
patterns: ['quota', 'credits', 'insufficient'],
|
|
37
|
+
icon: '💳',
|
|
38
|
+
message: 'QUOTA/CREDITS ERROR - Insufficient credits for %s',
|
|
39
|
+
},
|
|
40
|
+
{
|
|
41
|
+
patterns: ['model', 'not found'],
|
|
42
|
+
icon: '🤖',
|
|
43
|
+
message: 'MODEL ERROR - Model might not be available for %s',
|
|
44
|
+
},
|
|
45
|
+
{
|
|
46
|
+
patterns: ['timeout', 'timed out'],
|
|
47
|
+
icon: '⏱️',
|
|
48
|
+
message: 'TIMEOUT ERROR - Request timed out for %s',
|
|
49
|
+
},
|
|
50
|
+
{
|
|
51
|
+
patterns: ['ECONNREFUSED', 'ENOTFOUND', 'network'],
|
|
52
|
+
icon: '🌐',
|
|
53
|
+
message: 'NETWORK ERROR - Connection failed to %s',
|
|
54
|
+
},
|
|
55
|
+
];
|
|
56
|
+
|
|
57
|
+
// Helper to check and log specific error types
|
|
58
|
+
function checkErrorType(message: string, errorAny: Record<string, unknown>, provider: AIProvider): void {
|
|
59
|
+
for (const errorType of ERROR_PATTERNS) {
|
|
60
|
+
const hasPattern = errorType.patterns.some((pattern) => message.includes(pattern));
|
|
61
|
+
const hasStatusCode = errorType.statusCode !== undefined && errorAny.status === errorType.statusCode;
|
|
62
|
+
|
|
63
|
+
if (hasPattern || hasStatusCode) {
|
|
64
|
+
debugError(`${errorType.icon} ${errorType.message}`, provider);
|
|
65
|
+
|
|
66
|
+
if (errorType.checkRetryAfter === true && errorAny.retryAfter !== undefined) {
|
|
67
|
+
debugError('Retry after: %s seconds', String(errorAny.retryAfter));
|
|
68
|
+
}
|
|
69
|
+
return;
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
// Helper to extract additional error details
|
|
75
|
+
function extractErrorDetails(errorAny: Record<string, unknown>): void {
|
|
76
|
+
if (errorAny.response !== undefined && typeof errorAny.response === 'object' && errorAny.response !== null) {
|
|
77
|
+
const response = errorAny.response as Record<string, unknown>;
|
|
78
|
+
debugError('Response status: %s', String(response.status ?? 'unknown'));
|
|
79
|
+
debugError('Response status text: %s', String(response.statusText ?? 'unknown'));
|
|
80
|
+
if (response.data !== undefined) {
|
|
81
|
+
debugError('Response data: %o', response.data);
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
if (errorAny.code !== undefined) {
|
|
86
|
+
debugError('Error code: %s', String(errorAny.code));
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
if (errorAny.type !== undefined) {
|
|
90
|
+
debugError('Error type: %s', String(errorAny.type));
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
/**
|
|
95
|
+
* Extract and log meaningful error information from Vercel AI SDK errors
|
|
96
|
+
*/
|
|
97
|
+
function extractAndLogError(error: unknown, provider: AIProvider, operation: string): void {
|
|
98
|
+
debugError('%s failed for provider %s', operation, provider);
|
|
99
|
+
|
|
100
|
+
if (error instanceof Error) {
|
|
101
|
+
debugError('Error message: %s', error.message);
|
|
102
|
+
debugError('Error name: %s', error.name);
|
|
103
|
+
debugError('Error stack: %s', error.stack);
|
|
104
|
+
|
|
105
|
+
// Check for specific error types from Vercel AI SDK
|
|
106
|
+
const errorAny = error as unknown as Record<string, unknown>;
|
|
107
|
+
|
|
108
|
+
// Check various error types
|
|
109
|
+
checkErrorType(error.message, errorAny, provider);
|
|
110
|
+
|
|
111
|
+
// Extract additional error details if available
|
|
112
|
+
extractErrorDetails(errorAny);
|
|
113
|
+
|
|
114
|
+
// Log raw error object for debugging
|
|
115
|
+
debugError('Full error object: %O', error);
|
|
116
|
+
} else {
|
|
117
|
+
debugError('Unknown error type: %O', error);
|
|
118
|
+
}
|
|
119
|
+
}
|
|
10
120
|
|
|
11
121
|
interface AIToolValidationError extends Error {
|
|
12
122
|
cause?: {
|
|
@@ -58,23 +168,28 @@ const defaultOptions: AIOptions = {
|
|
|
58
168
|
};
|
|
59
169
|
|
|
60
170
|
function getDefaultModel(provider: AIProvider): string {
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
171
|
+
const model = (() => {
|
|
172
|
+
switch (provider) {
|
|
173
|
+
case AIProvider.OpenAI:
|
|
174
|
+
return 'gpt-4o-mini'; // maybe 5
|
|
175
|
+
case AIProvider.Anthropic:
|
|
176
|
+
return 'claude-sonnet-4-20250514'; // 4
|
|
177
|
+
case AIProvider.Google:
|
|
178
|
+
return 'gemini-2.5-pro';
|
|
179
|
+
case AIProvider.XAI:
|
|
180
|
+
return 'grok-4';
|
|
181
|
+
default:
|
|
182
|
+
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
|
|
183
|
+
throw new Error(`Unknown provider: ${provider}`);
|
|
184
|
+
}
|
|
185
|
+
})();
|
|
186
|
+
debugConfig('Selected default model %s for provider %s', model, provider);
|
|
187
|
+
return model;
|
|
74
188
|
}
|
|
75
189
|
|
|
76
190
|
function getModel(provider: AIProvider, model?: string) {
|
|
77
191
|
const modelName = model ?? getDefaultModel(provider);
|
|
192
|
+
debugConfig('Creating model instance for provider %s with model %s', provider, modelName);
|
|
78
193
|
|
|
79
194
|
switch (provider) {
|
|
80
195
|
case AIProvider.OpenAI:
|
|
@@ -95,25 +210,35 @@ export async function generateTextWithAI(
|
|
|
95
210
|
provider: AIProvider,
|
|
96
211
|
options: AIOptions = {},
|
|
97
212
|
): Promise<string> {
|
|
213
|
+
debugAPI('generateTextWithAI called - provider: %s, promptLength: %d', provider, prompt.length);
|
|
98
214
|
const finalOptions = { ...defaultOptions, ...options };
|
|
99
215
|
const model = finalOptions.model ?? getDefaultModel(provider);
|
|
100
216
|
const modelInstance = getModel(provider, model);
|
|
101
217
|
|
|
102
218
|
if (finalOptions.includeTools === true) {
|
|
219
|
+
debugTools('Tools requested, starting MCP server');
|
|
103
220
|
await startServer();
|
|
104
221
|
const result = await generateTextWithToolsAI(prompt, provider, options);
|
|
105
|
-
|
|
106
222
|
return result.text;
|
|
107
223
|
}
|
|
108
224
|
|
|
109
|
-
|
|
110
|
-
model
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
225
|
+
try {
|
|
226
|
+
debugAPI('Making API call to %s with model %s', provider, model);
|
|
227
|
+
debugAPI('Request params - temperature: %d, maxTokens: %d', finalOptions.temperature, finalOptions.maxTokens);
|
|
228
|
+
|
|
229
|
+
const result = await generateText({
|
|
230
|
+
model: modelInstance,
|
|
231
|
+
prompt,
|
|
232
|
+
temperature: finalOptions.temperature,
|
|
233
|
+
maxTokens: finalOptions.maxTokens,
|
|
234
|
+
});
|
|
115
235
|
|
|
116
|
-
|
|
236
|
+
debugAPI('API call successful - response length: %d, usage: %o', result.text.length, result.usage);
|
|
237
|
+
return result.text;
|
|
238
|
+
} catch (error) {
|
|
239
|
+
extractAndLogError(error, provider, 'generateTextWithAI');
|
|
240
|
+
throw error;
|
|
241
|
+
}
|
|
117
242
|
}
|
|
118
243
|
|
|
119
244
|
export async function* streamTextWithAI(
|
|
@@ -121,18 +246,31 @@ export async function* streamTextWithAI(
|
|
|
121
246
|
provider: AIProvider,
|
|
122
247
|
options: AIOptions = {},
|
|
123
248
|
): AsyncGenerator<string> {
|
|
249
|
+
debugStream('streamTextWithAI called - provider: %s, promptLength: %d', provider, prompt.length);
|
|
124
250
|
const finalOptions = { ...defaultOptions, ...options };
|
|
125
251
|
const model = getModel(provider, finalOptions.model);
|
|
126
252
|
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
253
|
+
try {
|
|
254
|
+
debugStream('Starting stream from %s', provider);
|
|
255
|
+
const stream = await streamText({
|
|
256
|
+
model,
|
|
257
|
+
prompt,
|
|
258
|
+
temperature: finalOptions.temperature,
|
|
259
|
+
maxTokens: finalOptions.maxTokens,
|
|
260
|
+
});
|
|
261
|
+
|
|
262
|
+
let totalChunks = 0;
|
|
263
|
+
let totalLength = 0;
|
|
264
|
+
for await (const chunk of stream.textStream) {
|
|
265
|
+
totalChunks++;
|
|
266
|
+
totalLength += chunk.length;
|
|
267
|
+
debugStream('Chunk %d received - size: %d bytes', totalChunks, chunk.length);
|
|
268
|
+
yield chunk;
|
|
269
|
+
}
|
|
270
|
+
debugStream('Stream completed - total chunks: %d, total length: %d', totalChunks, totalLength);
|
|
271
|
+
} catch (error) {
|
|
272
|
+
extractAndLogError(error, provider, 'streamTextWithAI');
|
|
273
|
+
throw error;
|
|
136
274
|
}
|
|
137
275
|
}
|
|
138
276
|
|
|
@@ -146,12 +284,15 @@ export async function generateTextStreamingWithAI(
|
|
|
146
284
|
provider: AIProvider,
|
|
147
285
|
options: AIOptions = {},
|
|
148
286
|
): Promise<string> {
|
|
287
|
+
debugStream('generateTextStreamingWithAI called - provider: %s', provider);
|
|
149
288
|
const finalOptions = { ...defaultOptions, ...options };
|
|
150
289
|
let collectedResult = '';
|
|
151
290
|
|
|
152
291
|
const stream = streamTextWithAI(prompt, provider, finalOptions);
|
|
153
292
|
|
|
293
|
+
let tokenCount = 0;
|
|
154
294
|
for await (const token of stream) {
|
|
295
|
+
tokenCount++;
|
|
155
296
|
// Collect all tokens for the final result
|
|
156
297
|
collectedResult += token;
|
|
157
298
|
|
|
@@ -161,26 +302,25 @@ export async function generateTextStreamingWithAI(
|
|
|
161
302
|
}
|
|
162
303
|
}
|
|
163
304
|
|
|
305
|
+
debugStream('Streaming complete - total tokens: %d, result length: %d', tokenCount, collectedResult.length);
|
|
164
306
|
return collectedResult;
|
|
165
307
|
}
|
|
166
308
|
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
const model = finalOptions.model ?? getDefaultModel(provider);
|
|
174
|
-
const modelInstance = getModel(provider, model);
|
|
175
|
-
|
|
176
|
-
const registeredTools = finalOptions.includeTools === true ? getRegisteredToolsForAI() : {};
|
|
177
|
-
console.log('registeredTools', registeredTools);
|
|
178
|
-
const hasTools = Object.keys(registeredTools).length > 0;
|
|
179
|
-
console.log('hasTools', hasTools);
|
|
180
|
-
|
|
181
|
-
// Build conversation messages
|
|
182
|
-
const messages: Array<{ role: 'user' | 'assistant'; content: string }> = [{ role: 'user', content: prompt }];
|
|
309
|
+
// Helper function to handle tool conversation loop
|
|
310
|
+
type RegisteredToolForAI = {
|
|
311
|
+
parameters: z.ZodSchema;
|
|
312
|
+
description: string;
|
|
313
|
+
execute?: (args: Record<string, unknown>) => Promise<string>;
|
|
314
|
+
};
|
|
183
315
|
|
|
316
|
+
async function executeToolConversation(
|
|
317
|
+
modelInstance: ReturnType<typeof getModel>,
|
|
318
|
+
messages: Array<{ role: 'user' | 'assistant'; content: string }>,
|
|
319
|
+
registeredTools: Record<string, RegisteredToolForAI>,
|
|
320
|
+
hasTools: boolean,
|
|
321
|
+
finalOptions: AIOptions & { temperature?: number; maxTokens?: number },
|
|
322
|
+
provider: AIProvider,
|
|
323
|
+
): Promise<{ finalResult: string; allToolCalls: unknown[] }> {
|
|
184
324
|
let finalResult = '';
|
|
185
325
|
const allToolCalls: unknown[] = [];
|
|
186
326
|
let attempts = 0;
|
|
@@ -188,6 +328,7 @@ export async function generateTextWithToolsAI(
|
|
|
188
328
|
|
|
189
329
|
while (attempts < maxAttempts) {
|
|
190
330
|
attempts++;
|
|
331
|
+
debugTools('Tool execution attempt %d/%d', attempts, maxAttempts);
|
|
191
332
|
|
|
192
333
|
const opts = {
|
|
193
334
|
model: modelInstance,
|
|
@@ -199,37 +340,77 @@ export async function generateTextWithToolsAI(
|
|
|
199
340
|
toolChoice: 'auto' as const,
|
|
200
341
|
}),
|
|
201
342
|
};
|
|
202
|
-
|
|
203
|
-
const result = await generateText(opts);
|
|
204
|
-
console.log('result', JSON.stringify(result, null, 2));
|
|
205
|
-
|
|
206
|
-
// Add assistant message to conversation
|
|
207
|
-
if (result.text) {
|
|
208
|
-
messages.push({ role: 'assistant', content: result.text });
|
|
209
|
-
finalResult = result.text;
|
|
210
|
-
}
|
|
343
|
+
debugTools('Request options: %o', { ...opts, tools: hasTools ? '[tools included]' : undefined });
|
|
211
344
|
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
345
|
+
try {
|
|
346
|
+
const result = await generateText(opts);
|
|
347
|
+
debugTools('Result received - has text: %s, tool calls: %d', !!result.text, result.toolCalls?.length ?? 0);
|
|
348
|
+
|
|
349
|
+
// Add assistant message to conversation
|
|
350
|
+
if (result.text) {
|
|
351
|
+
messages.push({ role: 'assistant', content: result.text });
|
|
352
|
+
finalResult = result.text;
|
|
353
|
+
debugTools('Assistant message added to conversation');
|
|
354
|
+
}
|
|
215
355
|
|
|
216
|
-
//
|
|
217
|
-
|
|
356
|
+
// If there are tool calls, execute them and continue conversation
|
|
357
|
+
if (result.toolCalls !== undefined && result.toolCalls.length > 0) {
|
|
358
|
+
allToolCalls.push(...result.toolCalls);
|
|
359
|
+
debugTools('Executing %d tool calls', result.toolCalls.length);
|
|
218
360
|
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
content: `${toolResults}Based on this product catalog data, please provide specific product recommendations for a soccer-loving daughter. Include product names, prices, and reasons why each item would be suitable.`,
|
|
223
|
-
});
|
|
361
|
+
// Execute tools and create a simple follow-up prompt
|
|
362
|
+
const toolResults = await executeToolCalls(result.toolCalls, registeredTools);
|
|
363
|
+
debugTools('Tool execution completed, results length: %d', toolResults.length);
|
|
224
364
|
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
365
|
+
// Add the tool results as a user message and request a final response
|
|
366
|
+
messages.push({
|
|
367
|
+
role: 'user',
|
|
368
|
+
content: `${toolResults}Based on this product catalog data, please provide specific product recommendations for a soccer-loving daughter. Include product names, prices, and reasons why each item would be suitable.`,
|
|
369
|
+
});
|
|
370
|
+
|
|
371
|
+
// Continue the conversation to get AI's response to tool results
|
|
372
|
+
continue;
|
|
373
|
+
}
|
|
228
374
|
|
|
229
|
-
|
|
230
|
-
|
|
375
|
+
// If no tool calls, we're done
|
|
376
|
+
debugTools('No tool calls, conversation complete');
|
|
377
|
+
break;
|
|
378
|
+
} catch (error) {
|
|
379
|
+
extractAndLogError(error, provider, 'generateTextWithToolsAI');
|
|
380
|
+
throw error;
|
|
381
|
+
}
|
|
231
382
|
}
|
|
232
383
|
|
|
384
|
+
return { finalResult, allToolCalls };
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
export async function generateTextWithToolsAI(
|
|
388
|
+
prompt: string,
|
|
389
|
+
provider: AIProvider,
|
|
390
|
+
options: AIOptions = {},
|
|
391
|
+
): Promise<{ text: string; toolCalls?: unknown[] }> {
|
|
392
|
+
debugTools('generateTextWithToolsAI called - provider: %s', provider);
|
|
393
|
+
const finalOptions = { ...defaultOptions, ...options };
|
|
394
|
+
const model = finalOptions.model ?? getDefaultModel(provider);
|
|
395
|
+
const modelInstance = getModel(provider, model);
|
|
396
|
+
|
|
397
|
+
const registeredTools = finalOptions.includeTools === true ? getRegisteredToolsForAI() : {};
|
|
398
|
+
debugTools('Registered tools: %o', Object.keys(registeredTools));
|
|
399
|
+
const hasTools = Object.keys(registeredTools).length > 0;
|
|
400
|
+
debugTools('Has tools available: %s', hasTools);
|
|
401
|
+
|
|
402
|
+
// Build conversation messages
|
|
403
|
+
const messages: Array<{ role: 'user' | 'assistant'; content: string }> = [{ role: 'user', content: prompt }];
|
|
404
|
+
|
|
405
|
+
const { finalResult, allToolCalls } = await executeToolConversation(
|
|
406
|
+
modelInstance,
|
|
407
|
+
messages,
|
|
408
|
+
registeredTools,
|
|
409
|
+
hasTools,
|
|
410
|
+
finalOptions,
|
|
411
|
+
provider,
|
|
412
|
+
);
|
|
413
|
+
|
|
233
414
|
return {
|
|
234
415
|
text: finalResult,
|
|
235
416
|
toolCalls: allToolCalls,
|
|
@@ -238,26 +419,27 @@ export async function generateTextWithToolsAI(
|
|
|
238
419
|
|
|
239
420
|
async function executeToolCalls(
|
|
240
421
|
toolCalls: unknown[],
|
|
241
|
-
registeredTools: Record<
|
|
242
|
-
string,
|
|
243
|
-
{ execute?: (args: Record<string, unknown>) => Promise<string>; description?: string }
|
|
244
|
-
>,
|
|
422
|
+
registeredTools: Record<string, RegisteredToolForAI>,
|
|
245
423
|
): Promise<string> {
|
|
424
|
+
debugTools('Executing %d tool calls', toolCalls.length);
|
|
246
425
|
let toolResults = '';
|
|
247
426
|
|
|
248
427
|
for (const toolCall of toolCalls) {
|
|
249
428
|
try {
|
|
250
429
|
const toolCallObj = toolCall as { toolName: string; args: Record<string, unknown> };
|
|
430
|
+
debugTools('Executing tool: %s with args: %o', toolCallObj.toolName, toolCallObj.args);
|
|
251
431
|
const tool = registeredTools[toolCallObj.toolName];
|
|
252
432
|
if (tool?.execute) {
|
|
253
433
|
const toolResult = await tool.execute(toolCallObj.args);
|
|
254
434
|
toolResults += `Tool ${toolCallObj.toolName} returned: ${String(toolResult)}\n\n`;
|
|
435
|
+
debugTools('Tool %s executed successfully', toolCallObj.toolName);
|
|
255
436
|
} else {
|
|
256
437
|
toolResults += `Error: Tool ${toolCallObj.toolName} not found or missing execute function\n\n`;
|
|
438
|
+
debugTools('Tool %s not found or missing execute function', toolCallObj.toolName);
|
|
257
439
|
}
|
|
258
440
|
} catch (error) {
|
|
259
441
|
const toolCallObj = toolCall as { toolName: string };
|
|
260
|
-
|
|
442
|
+
debugError('Tool execution error for %s: %O', toolCallObj.toolName, error);
|
|
261
443
|
toolResults += `Error executing tool ${toolCallObj.toolName}: ${String(error)}\n\n`;
|
|
262
444
|
}
|
|
263
445
|
}
|
|
@@ -271,30 +453,44 @@ export async function generateTextWithImageAI(
|
|
|
271
453
|
provider: AIProvider,
|
|
272
454
|
options: AIOptions = {},
|
|
273
455
|
): Promise<string> {
|
|
456
|
+
debugAPI(
|
|
457
|
+
'generateTextWithImageAI called - provider: %s, textLength: %d, imageSize: %d',
|
|
458
|
+
provider,
|
|
459
|
+
text.length,
|
|
460
|
+
imageBase64.length,
|
|
461
|
+
);
|
|
274
462
|
const finalOptions = { ...defaultOptions, ...options };
|
|
275
463
|
const model = finalOptions.model ?? getDefaultModel(provider);
|
|
276
464
|
const modelInstance = getModel(provider, model);
|
|
277
465
|
|
|
278
466
|
if (provider !== AIProvider.OpenAI && provider !== AIProvider.XAI) {
|
|
467
|
+
debugError('Provider %s does not support image inputs', provider);
|
|
279
468
|
throw new Error(`Provider ${provider} does not support image inputs`);
|
|
280
469
|
}
|
|
281
470
|
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
471
|
+
try {
|
|
472
|
+
debugAPI('Sending image+text to %s', provider);
|
|
473
|
+
const result = await generateText({
|
|
474
|
+
model: modelInstance,
|
|
475
|
+
messages: [
|
|
476
|
+
{
|
|
477
|
+
role: 'user',
|
|
478
|
+
content: [
|
|
479
|
+
{ type: 'text', text },
|
|
480
|
+
{ type: 'image', image: imageBase64 },
|
|
481
|
+
],
|
|
482
|
+
},
|
|
483
|
+
],
|
|
484
|
+
temperature: finalOptions.temperature,
|
|
485
|
+
maxTokens: finalOptions.maxTokens,
|
|
486
|
+
});
|
|
487
|
+
|
|
488
|
+
debugAPI('Image API call successful - response length: %d', result.text.length);
|
|
489
|
+
return result.text;
|
|
490
|
+
} catch (error) {
|
|
491
|
+
extractAndLogError(error, provider, 'generateTextWithImageAI');
|
|
492
|
+
throw error;
|
|
493
|
+
}
|
|
298
494
|
}
|
|
299
495
|
|
|
300
496
|
export function getAvailableProviders(): AIProvider[] {
|
|
@@ -304,6 +500,7 @@ export function getAvailableProviders(): AIProvider[] {
|
|
|
304
500
|
if (config.anthropic != null) providers.push(AIProvider.Anthropic);
|
|
305
501
|
if (config.google != null) providers.push(AIProvider.Google);
|
|
306
502
|
if (config.xai != null) providers.push(AIProvider.XAI);
|
|
503
|
+
debugConfig('Available providers: %o', providers);
|
|
307
504
|
return providers;
|
|
308
505
|
}
|
|
309
506
|
|
|
@@ -314,6 +511,7 @@ function getEnhancedPrompt(prompt: string, lastError: AIToolValidationError): st
|
|
|
314
511
|
? JSON.stringify(lastError.validationDetails.cause.issues, null, 2)
|
|
315
512
|
: lastError.message;
|
|
316
513
|
|
|
514
|
+
debugValidation('Enhancing prompt with validation error details: %o', errorDetails);
|
|
317
515
|
return `${prompt}\\n\\n⚠️ IMPORTANT: Your previous response failed validation with the following errors:\\n${errorDetails}\\n\\nPlease fix these errors and ensure your response EXACTLY matches the required schema structure.`;
|
|
318
516
|
}
|
|
319
517
|
|
|
@@ -355,35 +553,42 @@ function handleFailedRequest(
|
|
|
355
553
|
attempt: number,
|
|
356
554
|
): { shouldRetry: boolean; enhancedError?: AIToolValidationError } {
|
|
357
555
|
if (!lastError || !isSchemaError(lastError) || attempt >= maxRetries - 1) {
|
|
556
|
+
debugValidation(
|
|
557
|
+
'Not retrying - isLastError: %s, isSchemaError: %s, attempt: %d/%d',
|
|
558
|
+
!!lastError,
|
|
559
|
+
lastError ? isSchemaError(lastError) : false,
|
|
560
|
+
attempt + 1,
|
|
561
|
+
maxRetries,
|
|
562
|
+
);
|
|
358
563
|
return { shouldRetry: false, enhancedError: lastError };
|
|
359
564
|
}
|
|
360
565
|
|
|
361
|
-
|
|
566
|
+
debugValidation('Schema validation failed on attempt %d/%d, will retry', attempt + 1, maxRetries);
|
|
362
567
|
const enhancedError = enhanceValidationError(lastError);
|
|
363
568
|
|
|
364
569
|
return { shouldRetry: true, enhancedError };
|
|
365
570
|
}
|
|
366
571
|
|
|
367
|
-
|
|
572
|
+
// Helper function to attempt structured data generation with retry logic
|
|
573
|
+
async function attemptStructuredGeneration<T>(
|
|
368
574
|
prompt: string,
|
|
369
575
|
provider: AIProvider,
|
|
370
576
|
options: StructuredAIOptions<T>,
|
|
577
|
+
registeredTools: Record<string, RegisteredToolForAI>,
|
|
578
|
+
hasTools: boolean,
|
|
371
579
|
): Promise<T> {
|
|
372
580
|
const maxSchemaRetries = 3;
|
|
373
581
|
let lastError: AIToolValidationError | undefined;
|
|
374
582
|
|
|
375
|
-
if (options.includeTools === true) await startServer();
|
|
376
|
-
const registeredTools = options.includeTools === true ? getRegisteredToolsForAI() : {};
|
|
377
|
-
console.log('registeredTools', registeredTools);
|
|
378
|
-
const hasTools = Object.keys(registeredTools).length > 0;
|
|
379
|
-
console.log('hasTools', hasTools);
|
|
380
|
-
|
|
381
583
|
for (let attempt = 0; attempt < maxSchemaRetries; attempt++) {
|
|
382
584
|
try {
|
|
585
|
+
debugValidation('Structured data generation attempt %d/%d', attempt + 1, maxSchemaRetries);
|
|
383
586
|
const model = getModel(provider, options.model);
|
|
384
587
|
|
|
385
588
|
const enhancedPrompt = attempt > 0 && lastError ? getEnhancedPrompt(prompt, lastError) : prompt;
|
|
386
|
-
|
|
589
|
+
if (attempt > 0) {
|
|
590
|
+
debugValidation('Using enhanced prompt for retry attempt %d', attempt + 1);
|
|
591
|
+
}
|
|
387
592
|
|
|
388
593
|
const opts = {
|
|
389
594
|
model,
|
|
@@ -398,9 +603,9 @@ export async function generateStructuredDataWithAI<T>(
|
|
|
398
603
|
toolChoice: 'auto' as const,
|
|
399
604
|
}),
|
|
400
605
|
};
|
|
401
|
-
|
|
606
|
+
debugAPI('Generating structured object with schema: %s', options.schemaName ?? 'unnamed');
|
|
402
607
|
const result = await generateObject(opts);
|
|
403
|
-
|
|
608
|
+
debugAPI('Structured object generated successfully');
|
|
404
609
|
return result.object;
|
|
405
610
|
} catch (error: unknown) {
|
|
406
611
|
lastError =
|
|
@@ -421,16 +626,40 @@ export async function generateStructuredDataWithAI<T>(
|
|
|
421
626
|
throw lastError;
|
|
422
627
|
}
|
|
423
628
|
|
|
629
|
+
export async function generateStructuredDataWithAI<T>(
|
|
630
|
+
prompt: string,
|
|
631
|
+
provider: AIProvider,
|
|
632
|
+
options: StructuredAIOptions<T>,
|
|
633
|
+
): Promise<T> {
|
|
634
|
+
debugAPI('generateStructuredDataWithAI called - provider: %s, schema: %s', provider, options.schemaName ?? 'unnamed');
|
|
635
|
+
|
|
636
|
+
if (options.includeTools === true) {
|
|
637
|
+
debugTools('Tools requested, starting MCP server');
|
|
638
|
+
await startServer();
|
|
639
|
+
}
|
|
640
|
+
const registeredTools = options.includeTools === true ? getRegisteredToolsForAI() : {};
|
|
641
|
+
debugTools('Registered tools for structured data: %o', Object.keys(registeredTools));
|
|
642
|
+
const hasTools = Object.keys(registeredTools).length > 0;
|
|
643
|
+
|
|
644
|
+
return attemptStructuredGeneration(prompt, provider, options, registeredTools, hasTools);
|
|
645
|
+
}
|
|
646
|
+
|
|
424
647
|
export async function streamStructuredDataWithAI<T>(
|
|
425
648
|
prompt: string,
|
|
426
649
|
provider: AIProvider,
|
|
427
650
|
options: StreamStructuredAIOptions<T>,
|
|
428
651
|
): Promise<T> {
|
|
652
|
+
debugStream(
|
|
653
|
+
'streamStructuredDataWithAI called - provider: %s, schema: %s',
|
|
654
|
+
provider,
|
|
655
|
+
options.schemaName ?? 'unnamed',
|
|
656
|
+
);
|
|
429
657
|
const maxSchemaRetries = 3;
|
|
430
658
|
let lastError: AIToolValidationError | undefined;
|
|
431
659
|
|
|
432
660
|
for (let attempt = 0; attempt < maxSchemaRetries; attempt++) {
|
|
433
661
|
try {
|
|
662
|
+
debugValidation('Stream structured data attempt %d/%d', attempt + 1, maxSchemaRetries);
|
|
434
663
|
const model = getModel(provider, options.model);
|
|
435
664
|
|
|
436
665
|
const enhancedPrompt = attempt > 0 && lastError ? getEnhancedPrompt(prompt, lastError) : prompt;
|
|
@@ -447,19 +676,26 @@ export async function streamStructuredDataWithAI<T>(
|
|
|
447
676
|
|
|
448
677
|
// Stream partial objects if callback provided
|
|
449
678
|
if (options.onPartialObject) {
|
|
679
|
+
debugStream('Starting partial object stream');
|
|
450
680
|
void (async () => {
|
|
451
681
|
try {
|
|
682
|
+
let partialCount = 0;
|
|
452
683
|
for await (const partialObject of result.partialObjectStream) {
|
|
684
|
+
partialCount++;
|
|
685
|
+
debugStream('Partial object %d received', partialCount);
|
|
453
686
|
options.onPartialObject?.(partialObject);
|
|
454
687
|
}
|
|
688
|
+
debugStream('Partial object stream complete - total partials: %d', partialCount);
|
|
455
689
|
} catch (streamError) {
|
|
456
|
-
|
|
690
|
+
debugError('Error in partial object stream: %O', streamError);
|
|
457
691
|
}
|
|
458
692
|
})();
|
|
459
693
|
}
|
|
460
694
|
|
|
461
695
|
// Return the final complete object
|
|
462
|
-
|
|
696
|
+
const finalObject = await result.object;
|
|
697
|
+
debugStream('Final structured object received');
|
|
698
|
+
return finalObject;
|
|
463
699
|
} catch (error: unknown) {
|
|
464
700
|
lastError =
|
|
465
701
|
error instanceof Error
|