@auto-engineer/ai-gateway 0.11.15 → 0.11.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +1 -1
- package/CHANGELOG.md +6 -0
- package/dist/src/config.d.ts.map +1 -1
- package/dist/src/config.js +0 -2
- package/dist/src/config.js.map +1 -1
- package/dist/src/config.specs.js +1 -2
- package/dist/src/config.specs.js.map +1 -1
- package/dist/src/core/constants.d.ts +7 -0
- package/dist/src/core/constants.d.ts.map +1 -0
- package/dist/src/core/constants.js +8 -0
- package/dist/src/core/constants.js.map +1 -0
- package/dist/src/core/context.d.ts +7 -0
- package/dist/src/core/context.d.ts.map +1 -0
- package/dist/src/core/context.js +106 -0
- package/dist/src/core/context.js.map +1 -0
- package/dist/src/core/generators.d.ts +12 -0
- package/dist/src/core/generators.d.ts.map +1 -0
- package/dist/src/core/generators.js +310 -0
- package/dist/src/core/generators.js.map +1 -0
- package/dist/src/core/index.d.ts +8 -0
- package/dist/src/core/index.d.ts.map +1 -0
- package/dist/src/core/index.js +7 -0
- package/dist/src/core/index.js.map +1 -0
- package/dist/src/core/providers/custom.d.ts +3 -0
- package/dist/src/core/providers/custom.d.ts.map +1 -0
- package/dist/src/core/providers/custom.js +9 -0
- package/dist/src/core/providers/custom.js.map +1 -0
- package/dist/src/core/types.d.ts +73 -0
- package/dist/src/core/types.d.ts.map +1 -0
- package/dist/src/core/types.js +9 -0
- package/dist/src/core/types.js.map +1 -0
- package/dist/src/core/utils/errors.d.ts +3 -0
- package/dist/src/core/utils/errors.d.ts.map +1 -0
- package/dist/src/core/utils/errors.js +82 -0
- package/dist/src/core/utils/errors.js.map +1 -0
- package/dist/src/core/utils/log.d.ts +3 -0
- package/dist/src/core/utils/log.d.ts.map +1 -0
- package/dist/src/core/utils/log.js +52 -0
- package/dist/src/core/utils/log.js.map +1 -0
- package/dist/src/core/utils/validation.d.ts +9 -0
- package/dist/src/core/utils/validation.d.ts.map +1 -0
- package/dist/src/core/utils/validation.js +45 -0
- package/dist/src/core/utils/validation.js.map +1 -0
- package/dist/src/index-custom.specs.js +16 -12
- package/dist/src/index-custom.specs.js.map +1 -1
- package/dist/src/index.d.ts +1 -39
- package/dist/src/index.d.ts.map +1 -1
- package/dist/src/index.js +1 -566
- package/dist/src/index.js.map +1 -1
- package/dist/src/index.specs.js +6 -3
- package/dist/src/index.specs.js.map +1 -1
- package/dist/src/node/config.d.ts +3 -0
- package/dist/src/node/config.d.ts.map +1 -0
- package/dist/src/node/config.js +72 -0
- package/dist/src/node/config.js.map +1 -0
- package/dist/src/node/index.d.ts +11 -0
- package/dist/src/node/index.d.ts.map +1 -0
- package/dist/src/node/index.js +10 -0
- package/dist/src/node/index.js.map +1 -0
- package/dist/src/node/mcp-server.d.ts +50 -0
- package/dist/src/node/mcp-server.d.ts.map +1 -0
- package/dist/src/node/mcp-server.js +176 -0
- package/dist/src/node/mcp-server.js.map +1 -0
- package/dist/src/node/wrappers.d.ts +16 -0
- package/dist/src/node/wrappers.d.ts.map +1 -0
- package/dist/src/node/wrappers.js +100 -0
- package/dist/src/node/wrappers.js.map +1 -0
- package/dist/src/providers/custom.specs.js +1 -1
- package/dist/src/providers/custom.specs.js.map +1 -1
- package/dist/tsconfig.tsbuildinfo +1 -1
- package/package.json +19 -3
- package/src/config.specs.ts +1 -2
- package/src/config.ts +0 -2
- package/src/core/constants.ts +8 -0
- package/src/core/context.ts +106 -0
- package/src/core/generators.ts +424 -0
- package/src/core/index.ts +29 -0
- package/src/core/providers/custom.ts +10 -0
- package/src/core/types.ts +81 -0
- package/src/core/utils/errors.ts +91 -0
- package/src/core/utils/log.ts +65 -0
- package/src/core/utils/validation.ts +69 -0
- package/src/index-custom.specs.ts +16 -12
- package/src/index.specs.ts +7 -4
- package/src/index.ts +1 -756
- package/src/node/config.ts +100 -0
- package/src/node/index.ts +60 -0
- package/src/node/mcp-server.ts +261 -0
- package/src/node/wrappers.ts +136 -0
- package/src/providers/custom.specs.ts +2 -2
package/package.json
CHANGED
|
@@ -3,21 +3,37 @@
|
|
|
3
3
|
"type": "module",
|
|
4
4
|
"main": "./dist/src/index.js",
|
|
5
5
|
"types": "./dist/src/index.d.ts",
|
|
6
|
+
"exports": {
|
|
7
|
+
".": {
|
|
8
|
+
"types": "./dist/src/index.d.ts",
|
|
9
|
+
"import": "./dist/src/index.js"
|
|
10
|
+
},
|
|
11
|
+
"./core": {
|
|
12
|
+
"types": "./dist/src/core/index.d.ts",
|
|
13
|
+
"import": "./dist/src/core/index.js"
|
|
14
|
+
},
|
|
15
|
+
"./node": {
|
|
16
|
+
"types": "./dist/src/node/index.d.ts",
|
|
17
|
+
"import": "./dist/src/node/index.js"
|
|
18
|
+
}
|
|
19
|
+
},
|
|
6
20
|
"dependencies": {
|
|
7
21
|
"@ai-sdk/anthropic": "^1.2.12",
|
|
8
22
|
"@ai-sdk/google": "^1.2.19",
|
|
9
23
|
"@ai-sdk/openai": "^1.3.22",
|
|
10
24
|
"@ai-sdk/xai": "^1.2.16",
|
|
11
|
-
"@modelcontextprotocol/sdk": "^1.3.0",
|
|
12
25
|
"ai": "^4.3.16",
|
|
13
26
|
"debug": "^4.4.0",
|
|
14
|
-
"dotenv": "^16.4.5",
|
|
15
27
|
"zod": "^3.25.67"
|
|
16
28
|
},
|
|
29
|
+
"optionalDependencies": {
|
|
30
|
+
"@modelcontextprotocol/sdk": "^1.3.0",
|
|
31
|
+
"dotenv": "^16.4.5"
|
|
32
|
+
},
|
|
17
33
|
"publishConfig": {
|
|
18
34
|
"access": "public"
|
|
19
35
|
},
|
|
20
|
-
"version": "0.11.
|
|
36
|
+
"version": "0.11.16",
|
|
21
37
|
"scripts": {
|
|
22
38
|
"build": "tsc && tsx ../../scripts/fix-esm-imports.ts",
|
|
23
39
|
"test": "vitest run --reporter=dot",
|
package/src/config.specs.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
|
2
|
-
import { configureAIProvider } from './
|
|
2
|
+
import { configureAIProvider } from './node';
|
|
3
3
|
|
|
4
4
|
// Mock environment variables
|
|
5
5
|
const originalEnv = process.env;
|
|
@@ -7,7 +7,6 @@ const originalEnv = process.env;
|
|
|
7
7
|
describe('AI Configuration with Custom Providers', () => {
|
|
8
8
|
beforeEach(() => {
|
|
9
9
|
vi.resetModules();
|
|
10
|
-
// Clear all AI-related environment variables for clean test state
|
|
11
10
|
process.env = {};
|
|
12
11
|
});
|
|
13
12
|
|
package/src/config.ts
CHANGED
|
@@ -30,14 +30,12 @@ export interface AIConfig {
|
|
|
30
30
|
custom?: CustomProviderConfig;
|
|
31
31
|
}
|
|
32
32
|
|
|
33
|
-
// Helper to log provider configuration
|
|
34
33
|
function logProviderConfig(providerName: string, apiKey: string | undefined): void {
|
|
35
34
|
if (apiKey !== undefined) {
|
|
36
35
|
debug('%s provider configured with API key ending in: ...%s', providerName, apiKey.slice(-4));
|
|
37
36
|
}
|
|
38
37
|
}
|
|
39
38
|
|
|
40
|
-
// Helper to build custom provider config
|
|
41
39
|
function buildCustomProviderConfig(): CustomProviderConfig | undefined {
|
|
42
40
|
const name = process.env.CUSTOM_PROVIDER_NAME;
|
|
43
41
|
const baseUrl = process.env.CUSTOM_PROVIDER_BASE_URL;
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
import { createOpenAI } from '@ai-sdk/openai';
|
|
2
|
+
import { createAnthropic } from '@ai-sdk/anthropic';
|
|
3
|
+
import { createGoogleGenerativeAI } from '@ai-sdk/google';
|
|
4
|
+
import { createXai } from '@ai-sdk/xai';
|
|
5
|
+
import { AIProvider, AIContext, AIConfig } from './types';
|
|
6
|
+
import { DEFAULT_MODELS } from './constants';
|
|
7
|
+
import { createCustomProvider } from './providers/custom';
|
|
8
|
+
import { makeLogger } from './utils/log';
|
|
9
|
+
|
|
10
|
+
const debugConfig = makeLogger('auto:ai-gateway:config');
|
|
11
|
+
|
|
12
|
+
export function createAIContext(config: AIConfig, defaultProvider?: AIProvider): AIContext {
|
|
13
|
+
return {
|
|
14
|
+
config,
|
|
15
|
+
defaultProvider,
|
|
16
|
+
};
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export function getAvailableProviders(context: AIContext): AIProvider[] {
|
|
20
|
+
const providers: AIProvider[] = [];
|
|
21
|
+
if (context.config.anthropic != null) providers.push(AIProvider.Anthropic);
|
|
22
|
+
if (context.config.openai != null) providers.push(AIProvider.OpenAI);
|
|
23
|
+
if (context.config.google != null) providers.push(AIProvider.Google);
|
|
24
|
+
if (context.config.xai != null) providers.push(AIProvider.XAI);
|
|
25
|
+
if (context.config.custom != null) providers.push(AIProvider.Custom);
|
|
26
|
+
debugConfig('Available providers: %o', providers);
|
|
27
|
+
return providers;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
export function getDefaultProvider(context: AIContext): AIProvider {
|
|
31
|
+
if (context.defaultProvider != null) {
|
|
32
|
+
return context.defaultProvider;
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
const available = getAvailableProviders(context);
|
|
36
|
+
if (available.length === 0) {
|
|
37
|
+
throw new Error('No AI providers configured in context');
|
|
38
|
+
}
|
|
39
|
+
return available[0];
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
export function getDefaultModel(provider: AIProvider, context: AIContext): string {
|
|
43
|
+
if (provider === AIProvider.Custom) {
|
|
44
|
+
const config = context.config.custom;
|
|
45
|
+
if (config == null) {
|
|
46
|
+
throw new Error('Custom provider not configured');
|
|
47
|
+
}
|
|
48
|
+
debugConfig('Selected custom provider default model %s for provider %s', config.defaultModel, provider);
|
|
49
|
+
return config.defaultModel;
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
const model =
|
|
53
|
+
DEFAULT_MODELS[provider] ??
|
|
54
|
+
(() => {
|
|
55
|
+
throw new Error(`Unknown provider: ${provider}`);
|
|
56
|
+
})();
|
|
57
|
+
debugConfig('Selected default model %s for provider %s', model, provider);
|
|
58
|
+
return model;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
const providerFactories = {
|
|
62
|
+
[AIProvider.OpenAI]: (modelName: string, context: AIContext) => {
|
|
63
|
+
const config = context.config.openai;
|
|
64
|
+
if (config == null) throw new Error('OpenAI provider not configured');
|
|
65
|
+
const openaiProvider = createOpenAI({ apiKey: config.apiKey });
|
|
66
|
+
return openaiProvider(modelName);
|
|
67
|
+
},
|
|
68
|
+
[AIProvider.Anthropic]: (modelName: string, context: AIContext) => {
|
|
69
|
+
const config = context.config.anthropic;
|
|
70
|
+
if (config == null) throw new Error('Anthropic provider not configured');
|
|
71
|
+
const anthropicProvider = createAnthropic({ apiKey: config.apiKey });
|
|
72
|
+
return anthropicProvider(modelName);
|
|
73
|
+
},
|
|
74
|
+
[AIProvider.Google]: (modelName: string, context: AIContext) => {
|
|
75
|
+
const config = context.config.google;
|
|
76
|
+
if (config == null) throw new Error('Google provider not configured');
|
|
77
|
+
const googleProvider = createGoogleGenerativeAI({ apiKey: config.apiKey });
|
|
78
|
+
return googleProvider(modelName);
|
|
79
|
+
},
|
|
80
|
+
[AIProvider.XAI]: (modelName: string, context: AIContext) => {
|
|
81
|
+
const config = context.config.xai;
|
|
82
|
+
if (config == null) throw new Error('XAI provider not configured');
|
|
83
|
+
const xaiProvider = createXai({ apiKey: config.apiKey });
|
|
84
|
+
return xaiProvider(modelName);
|
|
85
|
+
},
|
|
86
|
+
[AIProvider.Custom]: (modelName: string, context: AIContext) => {
|
|
87
|
+
const config = context.config.custom;
|
|
88
|
+
if (config == null) throw new Error('Custom provider not configured');
|
|
89
|
+
const customProvider = createCustomProvider(config);
|
|
90
|
+
return customProvider.languageModel(modelName);
|
|
91
|
+
},
|
|
92
|
+
};
|
|
93
|
+
|
|
94
|
+
function createProviderModel(provider: AIProvider, modelName: string, context: AIContext) {
|
|
95
|
+
const factory = providerFactories[provider];
|
|
96
|
+
if (factory == null) {
|
|
97
|
+
throw new Error(`Unknown provider: ${provider as string}`);
|
|
98
|
+
}
|
|
99
|
+
return factory(modelName, context);
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
export function getModel(provider: AIProvider, model: string | undefined, context: AIContext) {
|
|
103
|
+
const modelName = model ?? getDefaultModel(provider, context);
|
|
104
|
+
debugConfig('Creating model instance for provider %s with model %s', provider, modelName);
|
|
105
|
+
return createProviderModel(provider, modelName, context);
|
|
106
|
+
}
|
|
@@ -0,0 +1,424 @@
|
|
|
1
|
+
import { generateText as aiGenerateText, streamText as aiStreamText, generateObject, streamObject } from 'ai';
|
|
2
|
+
import {
|
|
3
|
+
AIContext,
|
|
4
|
+
AIOptions,
|
|
5
|
+
StructuredAIOptions,
|
|
6
|
+
StreamStructuredAIOptions,
|
|
7
|
+
AIProvider,
|
|
8
|
+
RegisteredToolForAI,
|
|
9
|
+
AIToolValidationError,
|
|
10
|
+
} from './types';
|
|
11
|
+
import { getModel, getDefaultProvider, getDefaultModel } from './context';
|
|
12
|
+
import { extractAndLogError } from './utils/errors';
|
|
13
|
+
import { getEnhancedPrompt, handleFailedRequest } from './utils/validation';
|
|
14
|
+
import { makeLogger } from './utils/log';
|
|
15
|
+
|
|
16
|
+
const debugAPI = makeLogger('auto:ai-gateway:api');
|
|
17
|
+
const debugStream = makeLogger('auto:ai-gateway:stream');
|
|
18
|
+
const debugTools = makeLogger('auto:ai-gateway:tools');
|
|
19
|
+
const debugValidation = makeLogger('auto:ai-gateway:validation');
|
|
20
|
+
const debugError = makeLogger('auto:ai-gateway:error');
|
|
21
|
+
|
|
22
|
+
const defaultOptions: AIOptions = {
|
|
23
|
+
temperature: 0.7,
|
|
24
|
+
maxTokens: 1000,
|
|
25
|
+
};
|
|
26
|
+
|
|
27
|
+
export async function generateText(context: AIContext, prompt: string, options: AIOptions = {}): Promise<string> {
|
|
28
|
+
const resolvedProvider = options.provider ?? getDefaultProvider(context);
|
|
29
|
+
debugAPI('generateText called - provider: %s, promptLength: %d', resolvedProvider, prompt.length);
|
|
30
|
+
const finalOptions = { ...defaultOptions, ...options };
|
|
31
|
+
const model = finalOptions.model ?? getDefaultModel(resolvedProvider, context);
|
|
32
|
+
const modelInstance = getModel(resolvedProvider, finalOptions.model, context);
|
|
33
|
+
|
|
34
|
+
try {
|
|
35
|
+
debugAPI('Making API call to %s with model %s', resolvedProvider, model);
|
|
36
|
+
debugAPI('Request params - temperature: %d, maxTokens: %d', finalOptions.temperature, finalOptions.maxTokens);
|
|
37
|
+
|
|
38
|
+
const result = await aiGenerateText({
|
|
39
|
+
model: modelInstance,
|
|
40
|
+
prompt,
|
|
41
|
+
temperature: finalOptions.temperature,
|
|
42
|
+
maxTokens: finalOptions.maxTokens,
|
|
43
|
+
});
|
|
44
|
+
|
|
45
|
+
debugAPI('API call successful - response length: %d, usage: %o', result.text.length, result.usage);
|
|
46
|
+
return result.text;
|
|
47
|
+
} catch (error) {
|
|
48
|
+
extractAndLogError(error, resolvedProvider, 'generateText');
|
|
49
|
+
throw error;
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
export async function* streamText(context: AIContext, prompt: string, options: AIOptions = {}): AsyncGenerator<string> {
|
|
54
|
+
const resolvedProvider = options.provider ?? getDefaultProvider(context);
|
|
55
|
+
debugStream('streamText called - provider: %s, promptLength: %d', resolvedProvider, prompt.length);
|
|
56
|
+
const finalOptions = { ...defaultOptions, ...options };
|
|
57
|
+
const modelInstance = getModel(resolvedProvider, finalOptions.model, context);
|
|
58
|
+
|
|
59
|
+
try {
|
|
60
|
+
debugStream('Starting stream from %s', resolvedProvider);
|
|
61
|
+
const stream = aiStreamText({
|
|
62
|
+
model: modelInstance,
|
|
63
|
+
prompt,
|
|
64
|
+
temperature: finalOptions.temperature,
|
|
65
|
+
maxTokens: finalOptions.maxTokens,
|
|
66
|
+
});
|
|
67
|
+
|
|
68
|
+
let totalChunks = 0;
|
|
69
|
+
let totalLength = 0;
|
|
70
|
+
for await (const chunk of stream.textStream) {
|
|
71
|
+
totalChunks++;
|
|
72
|
+
totalLength += chunk.length;
|
|
73
|
+
debugStream('Chunk %d received - size: %d bytes', totalChunks, chunk.length);
|
|
74
|
+
yield chunk;
|
|
75
|
+
}
|
|
76
|
+
debugStream('Stream completed - total chunks: %d, total length: %d', totalChunks, totalLength);
|
|
77
|
+
} catch (error) {
|
|
78
|
+
extractAndLogError(error, resolvedProvider, 'streamText');
|
|
79
|
+
throw error;
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
export async function generateTextStreaming(
|
|
84
|
+
context: AIContext,
|
|
85
|
+
prompt: string,
|
|
86
|
+
options: AIOptions = {},
|
|
87
|
+
): Promise<string> {
|
|
88
|
+
const resolvedProvider = options.provider ?? getDefaultProvider(context);
|
|
89
|
+
debugStream('generateTextStreaming called - provider: %s', resolvedProvider);
|
|
90
|
+
const finalOptions = { ...defaultOptions, ...options };
|
|
91
|
+
let collectedResult = '';
|
|
92
|
+
|
|
93
|
+
const stream = streamText(context, prompt, finalOptions);
|
|
94
|
+
|
|
95
|
+
let tokenCount = 0;
|
|
96
|
+
for await (const token of stream) {
|
|
97
|
+
tokenCount++;
|
|
98
|
+
collectedResult += token;
|
|
99
|
+
|
|
100
|
+
if (finalOptions.streamCallback) {
|
|
101
|
+
finalOptions.streamCallback(token);
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
debugStream('Streaming complete - total tokens: %d, result length: %d', tokenCount, collectedResult.length);
|
|
106
|
+
return collectedResult;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
export async function generateTextWithImage(
|
|
110
|
+
context: AIContext,
|
|
111
|
+
text: string,
|
|
112
|
+
imageBase64: string,
|
|
113
|
+
options: AIOptions = {},
|
|
114
|
+
): Promise<string> {
|
|
115
|
+
const resolvedProvider = options.provider ?? getDefaultProvider(context);
|
|
116
|
+
debugAPI(
|
|
117
|
+
'generateTextWithImage called - provider: %s, textLength: %d, imageSize: %d',
|
|
118
|
+
resolvedProvider,
|
|
119
|
+
text.length,
|
|
120
|
+
imageBase64.length,
|
|
121
|
+
);
|
|
122
|
+
const finalOptions = { ...defaultOptions, ...options };
|
|
123
|
+
const modelInstance = getModel(resolvedProvider, finalOptions.model, context);
|
|
124
|
+
|
|
125
|
+
if (resolvedProvider !== AIProvider.OpenAI && resolvedProvider !== AIProvider.XAI) {
|
|
126
|
+
debugError('Provider %s does not support image inputs', resolvedProvider);
|
|
127
|
+
throw new Error(`Provider ${resolvedProvider} does not support image inputs`);
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
try {
|
|
131
|
+
debugAPI('Sending image+text to %s', resolvedProvider);
|
|
132
|
+
const result = await aiGenerateText({
|
|
133
|
+
model: modelInstance,
|
|
134
|
+
messages: [
|
|
135
|
+
{
|
|
136
|
+
role: 'user',
|
|
137
|
+
content: [
|
|
138
|
+
{ type: 'text', text },
|
|
139
|
+
{ type: 'image', image: imageBase64 },
|
|
140
|
+
],
|
|
141
|
+
},
|
|
142
|
+
],
|
|
143
|
+
temperature: finalOptions.temperature,
|
|
144
|
+
maxTokens: finalOptions.maxTokens,
|
|
145
|
+
});
|
|
146
|
+
|
|
147
|
+
debugAPI('Image API call successful - response length: %d', result.text.length);
|
|
148
|
+
return result.text;
|
|
149
|
+
} catch (error) {
|
|
150
|
+
extractAndLogError(error, resolvedProvider, 'generateTextWithImage');
|
|
151
|
+
throw error;
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
async function attemptStructuredGeneration<T>(
|
|
156
|
+
context: AIContext,
|
|
157
|
+
prompt: string,
|
|
158
|
+
provider: AIProvider,
|
|
159
|
+
options: StructuredAIOptions<T>,
|
|
160
|
+
registeredTools: Record<string, RegisteredToolForAI>,
|
|
161
|
+
hasTools: boolean,
|
|
162
|
+
): Promise<T> {
|
|
163
|
+
const maxSchemaRetries = 3;
|
|
164
|
+
let lastError: AIToolValidationError | undefined;
|
|
165
|
+
|
|
166
|
+
for (let attempt = 0; attempt < maxSchemaRetries; attempt++) {
|
|
167
|
+
try {
|
|
168
|
+
debugValidation('Structured data generation attempt %d/%d', attempt + 1, maxSchemaRetries);
|
|
169
|
+
const modelInstance = getModel(provider, options.model, context);
|
|
170
|
+
|
|
171
|
+
const enhancedPrompt = attempt > 0 && lastError ? getEnhancedPrompt(prompt, lastError) : prompt;
|
|
172
|
+
if (attempt > 0) {
|
|
173
|
+
debugValidation('Using enhanced prompt for retry attempt %d', attempt + 1);
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
const opts = {
|
|
177
|
+
model: modelInstance,
|
|
178
|
+
prompt: enhancedPrompt,
|
|
179
|
+
schema: options.schema,
|
|
180
|
+
schemaName: options.schemaName,
|
|
181
|
+
schemaDescription: options.schemaDescription,
|
|
182
|
+
temperature: options.temperature,
|
|
183
|
+
maxTokens: options.maxTokens,
|
|
184
|
+
...(hasTools && {
|
|
185
|
+
tools: registeredTools,
|
|
186
|
+
toolChoice: 'auto' as const,
|
|
187
|
+
}),
|
|
188
|
+
};
|
|
189
|
+
debugAPI('Generating structured object with schema: %s', options.schemaName ?? 'unnamed');
|
|
190
|
+
const result = await generateObject(opts);
|
|
191
|
+
debugAPI('Structured object generated successfully');
|
|
192
|
+
return result.object;
|
|
193
|
+
} catch (error: unknown) {
|
|
194
|
+
lastError =
|
|
195
|
+
error instanceof Error
|
|
196
|
+
? (error as AIToolValidationError)
|
|
197
|
+
: (new Error('An unknown error occurred') as AIToolValidationError);
|
|
198
|
+
|
|
199
|
+
const { shouldRetry, enhancedError } = handleFailedRequest(lastError, maxSchemaRetries, attempt);
|
|
200
|
+
lastError = enhancedError;
|
|
201
|
+
|
|
202
|
+
if (!shouldRetry) {
|
|
203
|
+
throw lastError;
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
throw lastError;
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
export async function generateStructuredData<T>(
|
|
212
|
+
context: AIContext,
|
|
213
|
+
prompt: string,
|
|
214
|
+
options: StructuredAIOptions<T>,
|
|
215
|
+
registeredTools: Record<string, RegisteredToolForAI> = {},
|
|
216
|
+
): Promise<T> {
|
|
217
|
+
const resolvedProvider = options.provider ?? getDefaultProvider(context);
|
|
218
|
+
debugAPI(
|
|
219
|
+
'generateStructuredData called - provider: %s, schema: %s',
|
|
220
|
+
resolvedProvider,
|
|
221
|
+
options.schemaName ?? 'unnamed',
|
|
222
|
+
);
|
|
223
|
+
|
|
224
|
+
const hasTools = Object.keys(registeredTools).length > 0;
|
|
225
|
+
|
|
226
|
+
return attemptStructuredGeneration(context, prompt, resolvedProvider, options, registeredTools, hasTools);
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
export async function streamStructuredData<T>(
|
|
230
|
+
context: AIContext,
|
|
231
|
+
prompt: string,
|
|
232
|
+
options: StreamStructuredAIOptions<T>,
|
|
233
|
+
): Promise<T> {
|
|
234
|
+
const resolvedProvider = options.provider ?? getDefaultProvider(context);
|
|
235
|
+
debugStream(
|
|
236
|
+
'streamStructuredData called - provider: %s, schema: %s',
|
|
237
|
+
resolvedProvider,
|
|
238
|
+
options.schemaName ?? 'unnamed',
|
|
239
|
+
);
|
|
240
|
+
const maxSchemaRetries = 3;
|
|
241
|
+
let lastError: AIToolValidationError | undefined;
|
|
242
|
+
|
|
243
|
+
for (let attempt = 0; attempt < maxSchemaRetries; attempt++) {
|
|
244
|
+
try {
|
|
245
|
+
debugValidation('Stream structured data attempt %d/%d', attempt + 1, maxSchemaRetries);
|
|
246
|
+
const modelInstance = getModel(resolvedProvider, options.model, context);
|
|
247
|
+
|
|
248
|
+
const enhancedPrompt = attempt > 0 && lastError ? getEnhancedPrompt(prompt, lastError) : prompt;
|
|
249
|
+
|
|
250
|
+
const result = streamObject({
|
|
251
|
+
model: modelInstance,
|
|
252
|
+
prompt: enhancedPrompt,
|
|
253
|
+
schema: options.schema,
|
|
254
|
+
schemaName: options.schemaName,
|
|
255
|
+
schemaDescription: options.schemaDescription,
|
|
256
|
+
temperature: options.temperature,
|
|
257
|
+
maxTokens: options.maxTokens,
|
|
258
|
+
});
|
|
259
|
+
|
|
260
|
+
if (options.onPartialObject) {
|
|
261
|
+
debugStream('Starting partial object stream');
|
|
262
|
+
void (async () => {
|
|
263
|
+
try {
|
|
264
|
+
let partialCount = 0;
|
|
265
|
+
for await (const partialObject of result.partialObjectStream) {
|
|
266
|
+
partialCount++;
|
|
267
|
+
debugStream('Partial object %d received', partialCount);
|
|
268
|
+
options.onPartialObject?.(partialObject);
|
|
269
|
+
}
|
|
270
|
+
debugStream('Partial object stream complete - total partials: %d', partialCount);
|
|
271
|
+
} catch (streamError) {
|
|
272
|
+
debugError('Error in partial object stream: %O', streamError);
|
|
273
|
+
}
|
|
274
|
+
})();
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
const finalObject = await result.object;
|
|
278
|
+
debugStream('Final structured object received');
|
|
279
|
+
return finalObject;
|
|
280
|
+
} catch (error: unknown) {
|
|
281
|
+
lastError =
|
|
282
|
+
error instanceof Error
|
|
283
|
+
? (error as AIToolValidationError)
|
|
284
|
+
: (new Error('An unknown error occurred') as AIToolValidationError);
|
|
285
|
+
|
|
286
|
+
const { shouldRetry, enhancedError } = handleFailedRequest(lastError, maxSchemaRetries, attempt);
|
|
287
|
+
lastError = enhancedError;
|
|
288
|
+
|
|
289
|
+
if (!shouldRetry) {
|
|
290
|
+
throw lastError;
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
throw lastError;
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
async function executeToolConversation(
|
|
299
|
+
modelInstance: ReturnType<typeof getModel>,
|
|
300
|
+
messages: Array<{ role: 'user' | 'assistant'; content: string }>,
|
|
301
|
+
registeredTools: Record<string, RegisteredToolForAI>,
|
|
302
|
+
hasTools: boolean,
|
|
303
|
+
finalOptions: AIOptions & { temperature?: number; maxTokens?: number },
|
|
304
|
+
provider: AIProvider,
|
|
305
|
+
): Promise<{ finalResult: string; allToolCalls: unknown[] }> {
|
|
306
|
+
let finalResult = '';
|
|
307
|
+
const allToolCalls: unknown[] = [];
|
|
308
|
+
let attempts = 0;
|
|
309
|
+
const maxAttempts = 5;
|
|
310
|
+
|
|
311
|
+
while (attempts < maxAttempts) {
|
|
312
|
+
attempts++;
|
|
313
|
+
debugTools('Tool execution attempt %d/%d', attempts, maxAttempts);
|
|
314
|
+
|
|
315
|
+
const opts = {
|
|
316
|
+
model: modelInstance,
|
|
317
|
+
messages,
|
|
318
|
+
temperature: finalOptions.temperature,
|
|
319
|
+
maxTokens: finalOptions.maxTokens,
|
|
320
|
+
...(hasTools && {
|
|
321
|
+
tools: registeredTools,
|
|
322
|
+
toolChoice: 'auto' as const,
|
|
323
|
+
}),
|
|
324
|
+
};
|
|
325
|
+
debugTools('Request options: %o', { ...opts, tools: hasTools ? '[tools included]' : undefined });
|
|
326
|
+
|
|
327
|
+
try {
|
|
328
|
+
const result = await aiGenerateText(opts);
|
|
329
|
+
debugTools('Result received - has text: %s, tool calls: %d', !!result.text, result.toolCalls?.length ?? 0);
|
|
330
|
+
|
|
331
|
+
if (result.text) {
|
|
332
|
+
messages.push({ role: 'assistant', content: result.text });
|
|
333
|
+
finalResult = result.text;
|
|
334
|
+
debugTools('Assistant message added to conversation');
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
if (result.toolCalls !== undefined && result.toolCalls.length > 0) {
|
|
338
|
+
allToolCalls.push(...result.toolCalls);
|
|
339
|
+
debugTools('Executing %d tool calls', result.toolCalls.length);
|
|
340
|
+
|
|
341
|
+
const toolResults = await executeToolCalls(result.toolCalls, registeredTools);
|
|
342
|
+
debugTools('Tool execution completed, results length: %d', toolResults.length);
|
|
343
|
+
|
|
344
|
+
messages.push({
|
|
345
|
+
role: 'user',
|
|
346
|
+
content: `${toolResults}\n\nUsing the tool outputs above, continue your response to the original request.`,
|
|
347
|
+
});
|
|
348
|
+
|
|
349
|
+
continue;
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
debugTools('No tool calls, conversation complete');
|
|
353
|
+
break;
|
|
354
|
+
} catch (error) {
|
|
355
|
+
extractAndLogError(error, provider, 'generateTextWithTools');
|
|
356
|
+
throw error;
|
|
357
|
+
}
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
return { finalResult, allToolCalls };
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
async function executeToolCalls(
|
|
364
|
+
toolCalls: unknown[],
|
|
365
|
+
registeredTools: Record<string, RegisteredToolForAI>,
|
|
366
|
+
): Promise<string> {
|
|
367
|
+
debugTools('Executing %d tool calls', toolCalls.length);
|
|
368
|
+
let toolResults = '';
|
|
369
|
+
|
|
370
|
+
for (const toolCall of toolCalls) {
|
|
371
|
+
try {
|
|
372
|
+
const toolCallObj = toolCall as { toolName: string; args: Record<string, unknown> };
|
|
373
|
+
debugTools('Executing tool: %s with args: %o', toolCallObj.toolName, toolCallObj.args);
|
|
374
|
+
const tool = registeredTools[toolCallObj.toolName];
|
|
375
|
+
if (tool?.execute) {
|
|
376
|
+
const toolResult = await tool.execute(toolCallObj.args);
|
|
377
|
+
toolResults += `Tool ${toolCallObj.toolName} returned: ${String(toolResult)}\n\n`;
|
|
378
|
+
debugTools('Tool %s executed successfully', toolCallObj.toolName);
|
|
379
|
+
} else {
|
|
380
|
+
toolResults += `Error: Tool ${toolCallObj.toolName} not found or missing execute function\n\n`;
|
|
381
|
+
debugTools('Tool %s not found or missing execute function', toolCallObj.toolName);
|
|
382
|
+
}
|
|
383
|
+
} catch (error) {
|
|
384
|
+
const toolCallObj = toolCall as { toolName: string };
|
|
385
|
+
debugError('Tool execution error for %s: %O', toolCallObj.toolName, error);
|
|
386
|
+
toolResults += `Error executing tool ${toolCallObj.toolName}: ${String(error)}\n\n`;
|
|
387
|
+
}
|
|
388
|
+
}
|
|
389
|
+
|
|
390
|
+
return toolResults;
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
export async function generateTextWithTools(
|
|
394
|
+
context: AIContext,
|
|
395
|
+
prompt: string,
|
|
396
|
+
options: AIOptions = {},
|
|
397
|
+
registeredTools: Record<string, RegisteredToolForAI> = {},
|
|
398
|
+
): Promise<{ text: string; toolCalls?: unknown[] }> {
|
|
399
|
+
const resolvedProvider = options.provider ?? getDefaultProvider(context);
|
|
400
|
+
debugTools('generateTextWithTools called - provider: %s', resolvedProvider);
|
|
401
|
+
const finalOptions = { ...defaultOptions, ...options };
|
|
402
|
+
const model = finalOptions.model ?? getDefaultModel(resolvedProvider, context);
|
|
403
|
+
const modelInstance = getModel(resolvedProvider, model, context);
|
|
404
|
+
|
|
405
|
+
debugTools('Registered tools: %o', Object.keys(registeredTools));
|
|
406
|
+
const hasTools = Object.keys(registeredTools).length > 0;
|
|
407
|
+
debugTools('Has tools available: %s', hasTools);
|
|
408
|
+
|
|
409
|
+
const messages: Array<{ role: 'user' | 'assistant'; content: string }> = [{ role: 'user', content: prompt }];
|
|
410
|
+
|
|
411
|
+
const { finalResult, allToolCalls } = await executeToolConversation(
|
|
412
|
+
modelInstance,
|
|
413
|
+
messages,
|
|
414
|
+
registeredTools,
|
|
415
|
+
hasTools,
|
|
416
|
+
finalOptions,
|
|
417
|
+
resolvedProvider,
|
|
418
|
+
);
|
|
419
|
+
|
|
420
|
+
return {
|
|
421
|
+
text: finalResult,
|
|
422
|
+
toolCalls: allToolCalls,
|
|
423
|
+
};
|
|
424
|
+
}
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
export { AIProvider } from './types';
|
|
2
|
+
export type {
|
|
3
|
+
CustomProviderConfig,
|
|
4
|
+
AIConfig,
|
|
5
|
+
AIContext,
|
|
6
|
+
AIOptions,
|
|
7
|
+
StructuredAIOptions,
|
|
8
|
+
StreamStructuredAIOptions,
|
|
9
|
+
AIToolValidationError,
|
|
10
|
+
RegisteredToolForAI,
|
|
11
|
+
} from './types';
|
|
12
|
+
|
|
13
|
+
export { DEFAULT_MODELS } from './constants';
|
|
14
|
+
|
|
15
|
+
export { createAIContext, getAvailableProviders, getDefaultProvider, getDefaultModel, getModel } from './context';
|
|
16
|
+
|
|
17
|
+
export {
|
|
18
|
+
generateText,
|
|
19
|
+
streamText,
|
|
20
|
+
generateTextStreaming,
|
|
21
|
+
generateTextWithImage,
|
|
22
|
+
generateStructuredData,
|
|
23
|
+
streamStructuredData,
|
|
24
|
+
generateTextWithTools,
|
|
25
|
+
} from './generators';
|
|
26
|
+
|
|
27
|
+
export { createCustomProvider } from './providers/custom';
|
|
28
|
+
|
|
29
|
+
export { z } from 'zod';
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import { createOpenAI } from '@ai-sdk/openai';
|
|
2
|
+
import { CustomProviderConfig } from '../types';
|
|
3
|
+
|
|
4
|
+
export function createCustomProvider(config: CustomProviderConfig) {
|
|
5
|
+
return createOpenAI({
|
|
6
|
+
name: config.name,
|
|
7
|
+
baseURL: config.baseUrl,
|
|
8
|
+
apiKey: config.apiKey,
|
|
9
|
+
});
|
|
10
|
+
}
|