@juspay/neurolink 1.5.0 → 1.5.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +85 -0
- package/LICENSE +21 -0
- package/README.md +4 -2
- package/dist/cli/commands/config.d.ts +35 -35
- package/dist/cli/index.js +63 -19
- package/dist/core/factory.js +12 -11
- package/dist/lib/core/factory.d.ts +40 -0
- package/dist/lib/core/factory.js +162 -0
- package/dist/lib/core/types.d.ts +111 -0
- package/dist/lib/core/types.js +68 -0
- package/dist/lib/index.d.ts +56 -0
- package/dist/lib/index.js +62 -0
- package/dist/lib/mcp/context-manager.d.ts +164 -0
- package/dist/lib/mcp/context-manager.js +273 -0
- package/dist/lib/mcp/factory.d.ts +144 -0
- package/dist/lib/mcp/factory.js +141 -0
- package/dist/lib/mcp/orchestrator.d.ts +170 -0
- package/dist/lib/mcp/orchestrator.js +372 -0
- package/dist/lib/mcp/registry.d.ts +188 -0
- package/dist/lib/mcp/registry.js +373 -0
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.d.ts +21 -0
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +215 -0
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.d.ts +10 -0
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +303 -0
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +101 -0
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +428 -0
- package/dist/lib/neurolink.d.ts +53 -0
- package/dist/lib/neurolink.js +155 -0
- package/dist/lib/providers/amazonBedrock.d.ts +11 -0
- package/dist/lib/providers/amazonBedrock.js +256 -0
- package/dist/lib/providers/anthropic.d.ts +34 -0
- package/dist/lib/providers/anthropic.js +308 -0
- package/dist/lib/providers/azureOpenAI.d.ts +37 -0
- package/dist/lib/providers/azureOpenAI.js +339 -0
- package/dist/lib/providers/googleAIStudio.d.ts +30 -0
- package/dist/lib/providers/googleAIStudio.js +216 -0
- package/dist/lib/providers/googleVertexAI.d.ts +30 -0
- package/dist/lib/providers/googleVertexAI.js +409 -0
- package/dist/lib/providers/index.d.ts +30 -0
- package/dist/lib/providers/index.js +25 -0
- package/dist/lib/providers/openAI.d.ts +10 -0
- package/dist/lib/providers/openAI.js +169 -0
- package/dist/lib/utils/logger.d.ts +12 -0
- package/dist/lib/utils/logger.js +25 -0
- package/dist/lib/utils/providerUtils.d.ts +17 -0
- package/dist/lib/utils/providerUtils.js +73 -0
- package/dist/mcp/servers/ai-providers/ai-core-server.js +11 -10
- package/dist/neurolink.js +13 -12
- package/dist/providers/amazonBedrock.js +22 -21
- package/dist/providers/anthropic.js +21 -20
- package/dist/providers/azureOpenAI.js +21 -20
- package/dist/providers/googleAIStudio.js +13 -12
- package/dist/providers/googleVertexAI.js +27 -26
- package/dist/providers/openAI.js +12 -11
- package/dist/utils/logger.d.ts +12 -0
- package/dist/utils/logger.js +25 -0
- package/dist/utils/providerUtils.d.ts +0 -3
- package/dist/utils/providerUtils.js +3 -2
- package/package.json +3 -17
|
@@ -0,0 +1,409 @@
|
|
|
1
|
+
import { createVertex } from '@ai-sdk/google-vertex';
|
|
2
|
+
// Cache for anthropic module to avoid repeated imports
|
|
3
|
+
let _createVertexAnthropic = null;
|
|
4
|
+
let _anthropicImportAttempted = false;
|
|
5
|
+
// Function to dynamically import anthropic support
|
|
6
|
+
async function getCreateVertexAnthropic() {
|
|
7
|
+
if (_anthropicImportAttempted) {
|
|
8
|
+
return _createVertexAnthropic;
|
|
9
|
+
}
|
|
10
|
+
_anthropicImportAttempted = true;
|
|
11
|
+
try {
|
|
12
|
+
// Try to import the anthropic module - available in @ai-sdk/google-vertex ^2.2.0+
|
|
13
|
+
const anthropicModule = await import('@ai-sdk/google-vertex/anthropic');
|
|
14
|
+
_createVertexAnthropic = anthropicModule.createVertexAnthropic;
|
|
15
|
+
logger.debug('[GoogleVertexAI] Anthropic module successfully loaded');
|
|
16
|
+
return _createVertexAnthropic;
|
|
17
|
+
}
|
|
18
|
+
catch (error) {
|
|
19
|
+
// Anthropic module not available
|
|
20
|
+
logger.warn('[GoogleVertexAI] Anthropic module not available. Install @ai-sdk/google-vertex ^2.2.0 for Anthropic model support.');
|
|
21
|
+
return null;
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
import { streamText, generateText, Output } from 'ai';
|
|
25
|
+
import { logger } from '../utils/logger.js';
|
|
26
|
+
// Default system context
|
|
27
|
+
const DEFAULT_SYSTEM_CONTEXT = {
|
|
28
|
+
systemPrompt: 'You are a helpful AI assistant.'
|
|
29
|
+
};
|
|
30
|
+
// Configuration helpers
|
|
31
|
+
const getGCPVertexBreezeProjectId = () => {
|
|
32
|
+
const projectId = process.env.GOOGLE_VERTEX_PROJECT;
|
|
33
|
+
if (!projectId) {
|
|
34
|
+
throw new Error('GOOGLE_VERTEX_PROJECT environment variable is not set');
|
|
35
|
+
}
|
|
36
|
+
return projectId;
|
|
37
|
+
};
|
|
38
|
+
const getGCPVertexBreezeLocation = () => {
|
|
39
|
+
return process.env.GOOGLE_VERTEX_LOCATION || 'us-east5';
|
|
40
|
+
};
|
|
41
|
+
const getGoogleApplicationCredentials = () => {
|
|
42
|
+
return process.env.GOOGLE_APPLICATION_CREDENTIALS;
|
|
43
|
+
};
|
|
44
|
+
const getGoogleServiceAccountKey = () => {
|
|
45
|
+
return process.env.GOOGLE_SERVICE_ACCOUNT_KEY;
|
|
46
|
+
};
|
|
47
|
+
const getGoogleClientEmail = () => {
|
|
48
|
+
return process.env.GOOGLE_AUTH_CLIENT_EMAIL;
|
|
49
|
+
};
|
|
50
|
+
const getGooglePrivateKey = () => {
|
|
51
|
+
return process.env.GOOGLE_AUTH_PRIVATE_KEY;
|
|
52
|
+
};
|
|
53
|
+
const getVertexModelId = () => {
|
|
54
|
+
return process.env.VERTEX_MODEL_ID || 'claude-sonnet-4@20250514';
|
|
55
|
+
};
|
|
56
|
+
const hasPrincipalAccountAuth = () => {
|
|
57
|
+
return !!getGoogleApplicationCredentials();
|
|
58
|
+
};
|
|
59
|
+
const hasServiceAccountKeyAuth = () => {
|
|
60
|
+
return !!getGoogleServiceAccountKey();
|
|
61
|
+
};
|
|
62
|
+
const hasServiceAccountEnvAuth = () => {
|
|
63
|
+
return !!(getGoogleClientEmail() && getGooglePrivateKey());
|
|
64
|
+
};
|
|
65
|
+
const hasValidAuth = () => {
|
|
66
|
+
return hasPrincipalAccountAuth() || hasServiceAccountKeyAuth() || hasServiceAccountEnvAuth();
|
|
67
|
+
};
|
|
68
|
+
// Setup environment for Google authentication
|
|
69
|
+
const setupGoogleAuth = async () => {
|
|
70
|
+
const functionTag = 'setupGoogleAuth';
|
|
71
|
+
// Method 2: Service Account Key (JSON string) - Create temporary file
|
|
72
|
+
if (hasServiceAccountKeyAuth() && !hasPrincipalAccountAuth()) {
|
|
73
|
+
const serviceAccountKey = getGoogleServiceAccountKey();
|
|
74
|
+
logger.debug(`[${functionTag}] Service account key auth (JSON string)`, {
|
|
75
|
+
hasServiceAccountKey: !!serviceAccountKey,
|
|
76
|
+
authMethod: 'service_account_key'
|
|
77
|
+
});
|
|
78
|
+
try {
|
|
79
|
+
// Parse to validate JSON
|
|
80
|
+
JSON.parse(serviceAccountKey);
|
|
81
|
+
// Write to temporary file and set environment variable using dynamic imports
|
|
82
|
+
const { writeFileSync } = await import('fs');
|
|
83
|
+
const { join } = await import('path');
|
|
84
|
+
const { tmpdir } = await import('os');
|
|
85
|
+
const tempFile = join(tmpdir(), `gcp-credentials-${Date.now()}.json`);
|
|
86
|
+
writeFileSync(tempFile, serviceAccountKey);
|
|
87
|
+
process.env.GOOGLE_APPLICATION_CREDENTIALS = tempFile;
|
|
88
|
+
logger.debug(`[${functionTag}] Created temporary credentials file`, {
|
|
89
|
+
tempFile: '[CREATED]',
|
|
90
|
+
authMethod: 'service_account_key_temp_file'
|
|
91
|
+
});
|
|
92
|
+
}
|
|
93
|
+
catch (error) {
|
|
94
|
+
logger.error(`[${functionTag}] Failed to parse service account key`, {
|
|
95
|
+
error: error instanceof Error ? error.message : String(error)
|
|
96
|
+
});
|
|
97
|
+
throw new Error('Invalid GOOGLE_SERVICE_ACCOUNT_KEY format. Must be valid JSON.');
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
// Method 3: Service Account Environment Variables - Set as individual env vars
|
|
101
|
+
if (hasServiceAccountEnvAuth() && !hasPrincipalAccountAuth() && !hasServiceAccountKeyAuth()) {
|
|
102
|
+
const clientEmail = getGoogleClientEmail();
|
|
103
|
+
const privateKey = getGooglePrivateKey();
|
|
104
|
+
logger.debug(`[${functionTag}] Service account env auth (separate variables)`, {
|
|
105
|
+
hasClientEmail: !!clientEmail,
|
|
106
|
+
hasPrivateKey: !!privateKey,
|
|
107
|
+
authMethod: 'service_account_env'
|
|
108
|
+
});
|
|
109
|
+
// Create service account object and write to temporary file
|
|
110
|
+
const serviceAccount = {
|
|
111
|
+
type: 'service_account',
|
|
112
|
+
project_id: getGCPVertexBreezeProjectId(),
|
|
113
|
+
client_email: clientEmail,
|
|
114
|
+
private_key: privateKey.replace(/\\n/g, '\n'),
|
|
115
|
+
auth_uri: 'https://accounts.google.com/o/oauth2/auth',
|
|
116
|
+
token_uri: 'https://oauth2.googleapis.com/token'
|
|
117
|
+
};
|
|
118
|
+
try {
|
|
119
|
+
// Use dynamic imports for ESM compatibility
|
|
120
|
+
const { writeFileSync } = await import('fs');
|
|
121
|
+
const { join } = await import('path');
|
|
122
|
+
const { tmpdir } = await import('os');
|
|
123
|
+
const tempFile = join(tmpdir(), `gcp-credentials-env-${Date.now()}.json`);
|
|
124
|
+
writeFileSync(tempFile, JSON.stringify(serviceAccount, null, 2));
|
|
125
|
+
process.env.GOOGLE_APPLICATION_CREDENTIALS = tempFile;
|
|
126
|
+
logger.debug(`[${functionTag}] Created temporary credentials file from env vars`, {
|
|
127
|
+
tempFile: '[CREATED]',
|
|
128
|
+
authMethod: 'service_account_env_temp_file'
|
|
129
|
+
});
|
|
130
|
+
}
|
|
131
|
+
catch (error) {
|
|
132
|
+
logger.error(`[${functionTag}] Failed to create service account file from env vars`, {
|
|
133
|
+
error: error instanceof Error ? error.message : String(error)
|
|
134
|
+
});
|
|
135
|
+
throw new Error('Failed to create temporary service account file from environment variables.');
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
};
|
|
139
|
+
// Vertex AI setup with multiple authentication support
|
|
140
|
+
const createVertexSettings = async () => {
|
|
141
|
+
const functionTag = 'createVertexSettings';
|
|
142
|
+
// Setup authentication first
|
|
143
|
+
await setupGoogleAuth();
|
|
144
|
+
const baseSettings = {
|
|
145
|
+
project: getGCPVertexBreezeProjectId(),
|
|
146
|
+
location: getGCPVertexBreezeLocation()
|
|
147
|
+
};
|
|
148
|
+
// Method 1: Principal Account Authentication (file path) - Recommended for production
|
|
149
|
+
if (hasPrincipalAccountAuth()) {
|
|
150
|
+
const credentialsPath = getGoogleApplicationCredentials();
|
|
151
|
+
logger.debug(`[${functionTag}] Principal account auth (file path)`, {
|
|
152
|
+
credentialsPath: credentialsPath ? '[PROVIDED]' : '[NOT_PROVIDED]',
|
|
153
|
+
authMethod: 'principal_account_file'
|
|
154
|
+
});
|
|
155
|
+
return baseSettings;
|
|
156
|
+
}
|
|
157
|
+
// Method 2 & 3: Other methods now set GOOGLE_APPLICATION_CREDENTIALS in setupGoogleAuth()
|
|
158
|
+
if (hasServiceAccountKeyAuth() || hasServiceAccountEnvAuth()) {
|
|
159
|
+
logger.debug(`[${functionTag}] Alternative auth method configured`, {
|
|
160
|
+
authMethod: hasServiceAccountKeyAuth() ? 'service_account_key' : 'service_account_env',
|
|
161
|
+
credentialsSet: !!process.env.GOOGLE_APPLICATION_CREDENTIALS
|
|
162
|
+
});
|
|
163
|
+
return baseSettings;
|
|
164
|
+
}
|
|
165
|
+
// No valid authentication found
|
|
166
|
+
logger.error(`[${functionTag}] No valid authentication method found`, {
|
|
167
|
+
authMethod: 'none',
|
|
168
|
+
hasPrincipalAccount: hasPrincipalAccountAuth(),
|
|
169
|
+
hasServiceAccountKey: hasServiceAccountKeyAuth(),
|
|
170
|
+
hasServiceAccountEnv: hasServiceAccountEnvAuth(),
|
|
171
|
+
availableMethods: [
|
|
172
|
+
'GOOGLE_APPLICATION_CREDENTIALS (file path)',
|
|
173
|
+
'GOOGLE_SERVICE_ACCOUNT_KEY (JSON string)',
|
|
174
|
+
'GOOGLE_AUTH_CLIENT_EMAIL + GOOGLE_AUTH_PRIVATE_KEY (env vars)'
|
|
175
|
+
]
|
|
176
|
+
});
|
|
177
|
+
throw new Error('No valid Google Vertex AI authentication found. Please provide one of:\n' +
|
|
178
|
+
'1. GOOGLE_APPLICATION_CREDENTIALS (path to service account file)\n' +
|
|
179
|
+
'2. GOOGLE_SERVICE_ACCOUNT_KEY (JSON string of service account)\n' +
|
|
180
|
+
'3. GOOGLE_AUTH_CLIENT_EMAIL + GOOGLE_AUTH_PRIVATE_KEY (environment variables)');
|
|
181
|
+
};
|
|
182
|
+
// Helper function to determine if a model is an Anthropic model
|
|
183
|
+
const isAnthropicModel = (modelName) => {
|
|
184
|
+
// Anthropic models in Vertex AI contain "claude" anywhere in the model name
|
|
185
|
+
return modelName.toLowerCase().includes('claude');
|
|
186
|
+
};
|
|
187
|
+
// Lazy initialization cache
|
|
188
|
+
let _vertex = null;
|
|
189
|
+
async function getVertexInstance() {
|
|
190
|
+
if (!_vertex) {
|
|
191
|
+
const settings = await createVertexSettings();
|
|
192
|
+
_vertex = createVertex(settings);
|
|
193
|
+
}
|
|
194
|
+
return _vertex;
|
|
195
|
+
}
|
|
196
|
+
// Google Vertex AI class with enhanced error handling and Anthropic model support
|
|
197
|
+
export class GoogleVertexAI {
|
|
198
|
+
modelName;
|
|
199
|
+
/**
|
|
200
|
+
* Initializes a new instance of GoogleVertexAI
|
|
201
|
+
* @param modelName - Optional model name to override the default from config
|
|
202
|
+
*/
|
|
203
|
+
constructor(modelName) {
|
|
204
|
+
const functionTag = 'GoogleVertexAI.constructor';
|
|
205
|
+
this.modelName = modelName || getVertexModelId();
|
|
206
|
+
try {
|
|
207
|
+
logger.debug(`[${functionTag}] Initialization started`, {
|
|
208
|
+
modelName: this.modelName,
|
|
209
|
+
isAnthropic: isAnthropicModel(this.modelName)
|
|
210
|
+
});
|
|
211
|
+
const hasPrincipal = hasPrincipalAccountAuth();
|
|
212
|
+
logger.debug(`[${functionTag}] Authentication validation`, {
|
|
213
|
+
hasPrincipalAccountAuth: hasPrincipal,
|
|
214
|
+
projectId: getGCPVertexBreezeProjectId() || 'MISSING',
|
|
215
|
+
location: getGCPVertexBreezeLocation() || 'MISSING'
|
|
216
|
+
});
|
|
217
|
+
if (hasPrincipal) {
|
|
218
|
+
logger.debug(`[${functionTag}] Auth method selected`, {
|
|
219
|
+
authMethod: 'principal_account',
|
|
220
|
+
hasGoogleApplicationCredentials: !!getGoogleApplicationCredentials()
|
|
221
|
+
});
|
|
222
|
+
}
|
|
223
|
+
else {
|
|
224
|
+
logger.warn(`[${functionTag}] Auth method missing`, {
|
|
225
|
+
authMethod: 'none',
|
|
226
|
+
hasPrincipalAccountAuth: hasPrincipal
|
|
227
|
+
});
|
|
228
|
+
}
|
|
229
|
+
logger.debug(`[${functionTag}] Initialization completed`, {
|
|
230
|
+
modelName: this.modelName,
|
|
231
|
+
isAnthropic: isAnthropicModel(this.modelName),
|
|
232
|
+
authMethod: hasPrincipalAccountAuth() ? 'principal_account' : 'none',
|
|
233
|
+
success: true
|
|
234
|
+
});
|
|
235
|
+
}
|
|
236
|
+
catch (err) {
|
|
237
|
+
logger.error(`[${functionTag}] Initialization failed`, {
|
|
238
|
+
message: 'Error in initializing Google Vertex AI',
|
|
239
|
+
modelName: this.modelName,
|
|
240
|
+
isAnthropic: isAnthropicModel(this.modelName),
|
|
241
|
+
error: err instanceof Error ? err.message : String(err),
|
|
242
|
+
stack: err instanceof Error ? err.stack : undefined
|
|
243
|
+
});
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
/**
|
|
247
|
+
* Gets the appropriate model instance (Google or Anthropic)
|
|
248
|
+
* @private
|
|
249
|
+
*/
|
|
250
|
+
async getModel() {
|
|
251
|
+
if (isAnthropicModel(this.modelName)) {
|
|
252
|
+
logger.debug('GoogleVertexAI.getModel - Anthropic model selected', {
|
|
253
|
+
modelName: this.modelName
|
|
254
|
+
});
|
|
255
|
+
const createVertexAnthropic = await getCreateVertexAnthropic();
|
|
256
|
+
if (!createVertexAnthropic) {
|
|
257
|
+
throw new Error(`Anthropic model "${this.modelName}" requested but @ai-sdk/google-vertex/anthropic is not available. ` +
|
|
258
|
+
'Please install @ai-sdk/google-vertex ^2.2.0 or use a Google model instead.');
|
|
259
|
+
}
|
|
260
|
+
const settings = await createVertexSettings();
|
|
261
|
+
const vertexAnthropic = createVertexAnthropic(settings);
|
|
262
|
+
return vertexAnthropic(this.modelName);
|
|
263
|
+
}
|
|
264
|
+
const vertex = await getVertexInstance();
|
|
265
|
+
return vertex(this.modelName);
|
|
266
|
+
}
|
|
267
|
+
/**
|
|
268
|
+
* Processes text using streaming approach with enhanced error handling callbacks
|
|
269
|
+
* @param prompt - The input text prompt to analyze
|
|
270
|
+
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
271
|
+
* @returns Promise resolving to StreamTextResult or null if operation fails
|
|
272
|
+
*/
|
|
273
|
+
async streamText(optionsOrPrompt, analysisSchema) {
|
|
274
|
+
const functionTag = 'GoogleVertexAI.streamText';
|
|
275
|
+
const provider = 'vertex';
|
|
276
|
+
let chunkCount = 0;
|
|
277
|
+
try {
|
|
278
|
+
// Parse parameters - support both string and options object
|
|
279
|
+
const options = typeof optionsOrPrompt === 'string'
|
|
280
|
+
? { prompt: optionsOrPrompt }
|
|
281
|
+
: optionsOrPrompt;
|
|
282
|
+
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
|
|
283
|
+
// Use schema from options or fallback parameter
|
|
284
|
+
const finalSchema = schema || analysisSchema;
|
|
285
|
+
logger.debug(`[${functionTag}] Stream request started`, {
|
|
286
|
+
provider,
|
|
287
|
+
modelName: this.modelName,
|
|
288
|
+
isAnthropic: isAnthropicModel(this.modelName),
|
|
289
|
+
promptLength: prompt.length,
|
|
290
|
+
temperature,
|
|
291
|
+
maxTokens,
|
|
292
|
+
hasSchema: !!finalSchema
|
|
293
|
+
});
|
|
294
|
+
const model = await this.getModel();
|
|
295
|
+
const streamOptions = {
|
|
296
|
+
model: model,
|
|
297
|
+
prompt: prompt,
|
|
298
|
+
system: systemPrompt,
|
|
299
|
+
temperature,
|
|
300
|
+
maxTokens,
|
|
301
|
+
onError: (event) => {
|
|
302
|
+
const error = event.error;
|
|
303
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
304
|
+
const errorStack = error instanceof Error ? error.stack : undefined;
|
|
305
|
+
logger.error(`[${functionTag}] Stream text error`, {
|
|
306
|
+
provider,
|
|
307
|
+
modelName: this.modelName,
|
|
308
|
+
error: errorMessage,
|
|
309
|
+
stack: errorStack,
|
|
310
|
+
promptLength: prompt.length,
|
|
311
|
+
chunkCount
|
|
312
|
+
});
|
|
313
|
+
},
|
|
314
|
+
onFinish: (event) => {
|
|
315
|
+
logger.debug(`[${functionTag}] Stream text finished`, {
|
|
316
|
+
provider,
|
|
317
|
+
modelName: this.modelName,
|
|
318
|
+
finishReason: event.finishReason,
|
|
319
|
+
usage: event.usage,
|
|
320
|
+
totalChunks: chunkCount,
|
|
321
|
+
promptLength: prompt.length,
|
|
322
|
+
responseLength: event.text?.length || 0
|
|
323
|
+
});
|
|
324
|
+
},
|
|
325
|
+
onChunk: (event) => {
|
|
326
|
+
chunkCount++;
|
|
327
|
+
logger.debug(`[${functionTag}] Stream text chunk`, {
|
|
328
|
+
provider,
|
|
329
|
+
modelName: this.modelName,
|
|
330
|
+
chunkNumber: chunkCount,
|
|
331
|
+
chunkLength: event.chunk.text?.length || 0,
|
|
332
|
+
chunkType: event.chunk.type
|
|
333
|
+
});
|
|
334
|
+
}
|
|
335
|
+
};
|
|
336
|
+
if (analysisSchema) {
|
|
337
|
+
streamOptions.experimental_output = Output.object({ schema: analysisSchema });
|
|
338
|
+
}
|
|
339
|
+
const result = streamText(streamOptions);
|
|
340
|
+
return result;
|
|
341
|
+
}
|
|
342
|
+
catch (err) {
|
|
343
|
+
logger.error(`[${functionTag}] Exception`, {
|
|
344
|
+
provider,
|
|
345
|
+
modelName: this.modelName,
|
|
346
|
+
message: 'Error in streaming text',
|
|
347
|
+
err: String(err),
|
|
348
|
+
promptLength: prompt.length
|
|
349
|
+
});
|
|
350
|
+
throw err; // Re-throw error to trigger fallback
|
|
351
|
+
}
|
|
352
|
+
}
|
|
353
|
+
/**
|
|
354
|
+
* Processes text using non-streaming approach with optional schema validation
|
|
355
|
+
* @param prompt - The input text prompt to analyze
|
|
356
|
+
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
357
|
+
* @returns Promise resolving to GenerateTextResult or null if operation fails
|
|
358
|
+
*/
|
|
359
|
+
async generateText(optionsOrPrompt, analysisSchema) {
|
|
360
|
+
const functionTag = 'GoogleVertexAI.generateText';
|
|
361
|
+
const provider = 'vertex';
|
|
362
|
+
try {
|
|
363
|
+
// Parse parameters - support both string and options object
|
|
364
|
+
const options = typeof optionsOrPrompt === 'string'
|
|
365
|
+
? { prompt: optionsOrPrompt }
|
|
366
|
+
: optionsOrPrompt;
|
|
367
|
+
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
|
|
368
|
+
// Use schema from options or fallback parameter
|
|
369
|
+
const finalSchema = schema || analysisSchema;
|
|
370
|
+
logger.debug(`[${functionTag}] Generate request started`, {
|
|
371
|
+
provider,
|
|
372
|
+
modelName: this.modelName,
|
|
373
|
+
isAnthropic: isAnthropicModel(this.modelName),
|
|
374
|
+
promptLength: prompt.length,
|
|
375
|
+
temperature,
|
|
376
|
+
maxTokens
|
|
377
|
+
});
|
|
378
|
+
const model = await this.getModel();
|
|
379
|
+
const generateOptions = {
|
|
380
|
+
model: model,
|
|
381
|
+
prompt: prompt,
|
|
382
|
+
system: systemPrompt,
|
|
383
|
+
temperature,
|
|
384
|
+
maxTokens
|
|
385
|
+
};
|
|
386
|
+
if (finalSchema) {
|
|
387
|
+
generateOptions.experimental_output = Output.object({ schema: finalSchema });
|
|
388
|
+
}
|
|
389
|
+
const result = await generateText(generateOptions);
|
|
390
|
+
logger.debug(`[${functionTag}] Generate text completed`, {
|
|
391
|
+
provider,
|
|
392
|
+
modelName: this.modelName,
|
|
393
|
+
usage: result.usage,
|
|
394
|
+
finishReason: result.finishReason,
|
|
395
|
+
responseLength: result.text?.length || 0
|
|
396
|
+
});
|
|
397
|
+
return result;
|
|
398
|
+
}
|
|
399
|
+
catch (err) {
|
|
400
|
+
logger.error(`[${functionTag}] Exception`, {
|
|
401
|
+
provider,
|
|
402
|
+
modelName: this.modelName,
|
|
403
|
+
message: 'Error in generating text',
|
|
404
|
+
err: String(err)
|
|
405
|
+
});
|
|
406
|
+
throw err; // Re-throw error to trigger fallback
|
|
407
|
+
}
|
|
408
|
+
}
|
|
409
|
+
}
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Provider exports for Vercel AI SDK integration
|
|
3
|
+
* This file centralizes all AI provider classes for easy import and usage
|
|
4
|
+
*/
|
|
5
|
+
export { GoogleVertexAI } from './googleVertexAI.js';
|
|
6
|
+
export { AmazonBedrock } from './amazonBedrock.js';
|
|
7
|
+
export { OpenAI } from './openAI.js';
|
|
8
|
+
export { AnthropicProvider } from './anthropic.js';
|
|
9
|
+
export { AzureOpenAIProvider } from './azureOpenAI.js';
|
|
10
|
+
export { GoogleAIStudio } from './googleAIStudio.js';
|
|
11
|
+
export type { AIProvider } from '../core/types.js';
|
|
12
|
+
/**
|
|
13
|
+
* Provider registry for dynamic provider instantiation
|
|
14
|
+
*/
|
|
15
|
+
export declare const PROVIDERS: {
|
|
16
|
+
readonly vertex: "GoogleVertexAI";
|
|
17
|
+
readonly bedrock: "AmazonBedrock";
|
|
18
|
+
readonly openai: "OpenAI";
|
|
19
|
+
readonly anthropic: "AnthropicProvider";
|
|
20
|
+
readonly azure: "AzureOpenAIProvider";
|
|
21
|
+
readonly 'google-ai': "GoogleAIStudio";
|
|
22
|
+
};
|
|
23
|
+
/**
|
|
24
|
+
* Type for valid provider names
|
|
25
|
+
*/
|
|
26
|
+
export type ProviderName = keyof typeof PROVIDERS;
|
|
27
|
+
/**
|
|
28
|
+
* List of all available provider names
|
|
29
|
+
*/
|
|
30
|
+
export declare const AVAILABLE_PROVIDERS: ProviderName[];
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Provider exports for Vercel AI SDK integration
|
|
3
|
+
* This file centralizes all AI provider classes for easy import and usage
|
|
4
|
+
*/
|
|
5
|
+
export { GoogleVertexAI } from './googleVertexAI.js';
|
|
6
|
+
export { AmazonBedrock } from './amazonBedrock.js';
|
|
7
|
+
export { OpenAI } from './openAI.js';
|
|
8
|
+
export { AnthropicProvider } from './anthropic.js';
|
|
9
|
+
export { AzureOpenAIProvider } from './azureOpenAI.js';
|
|
10
|
+
export { GoogleAIStudio } from './googleAIStudio.js';
|
|
11
|
+
/**
|
|
12
|
+
* Provider registry for dynamic provider instantiation
|
|
13
|
+
*/
|
|
14
|
+
export const PROVIDERS = {
|
|
15
|
+
vertex: 'GoogleVertexAI',
|
|
16
|
+
bedrock: 'AmazonBedrock',
|
|
17
|
+
openai: 'OpenAI',
|
|
18
|
+
anthropic: 'AnthropicProvider',
|
|
19
|
+
azure: 'AzureOpenAIProvider',
|
|
20
|
+
'google-ai': 'GoogleAIStudio'
|
|
21
|
+
};
|
|
22
|
+
/**
|
|
23
|
+
* List of all available provider names
|
|
24
|
+
*/
|
|
25
|
+
export const AVAILABLE_PROVIDERS = Object.keys(PROVIDERS);
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import type { ZodType, ZodTypeDef } from 'zod';
|
|
2
|
+
import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult } from 'ai';
|
|
3
|
+
import type { AIProvider, TextGenerationOptions, StreamTextOptions } from '../core/types.js';
|
|
4
|
+
export declare class OpenAI implements AIProvider {
|
|
5
|
+
private modelName;
|
|
6
|
+
private model;
|
|
7
|
+
constructor(modelName?: string | null);
|
|
8
|
+
streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
|
|
9
|
+
generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
|
|
10
|
+
}
|
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
import { openai } from '@ai-sdk/openai';
|
|
2
|
+
import { streamText, generateText, Output } from 'ai';
|
|
3
|
+
import { logger } from '../utils/logger.js';
|
|
4
|
+
// Default system context
|
|
5
|
+
const DEFAULT_SYSTEM_CONTEXT = {
|
|
6
|
+
systemPrompt: 'You are a helpful AI assistant.'
|
|
7
|
+
};
|
|
8
|
+
// Configuration helpers
|
|
9
|
+
const getOpenAIApiKey = () => {
|
|
10
|
+
const apiKey = process.env.OPENAI_API_KEY;
|
|
11
|
+
if (!apiKey) {
|
|
12
|
+
throw new Error('OPENAI_API_KEY environment variable is not set');
|
|
13
|
+
}
|
|
14
|
+
return apiKey;
|
|
15
|
+
};
|
|
16
|
+
const getOpenAIModel = () => {
|
|
17
|
+
return process.env.OPENAI_MODEL || 'gpt-4o';
|
|
18
|
+
};
|
|
19
|
+
// OpenAI class with enhanced error handling
|
|
20
|
+
export class OpenAI {
|
|
21
|
+
modelName;
|
|
22
|
+
model;
|
|
23
|
+
constructor(modelName) {
|
|
24
|
+
const functionTag = 'OpenAI.constructor';
|
|
25
|
+
this.modelName = modelName || getOpenAIModel();
|
|
26
|
+
try {
|
|
27
|
+
logger.debug(`[${functionTag}] Function called`, { modelName: this.modelName });
|
|
28
|
+
// Set OpenAI API key as environment variable
|
|
29
|
+
process.env.OPENAI_API_KEY = getOpenAIApiKey();
|
|
30
|
+
this.model = openai(this.modelName);
|
|
31
|
+
logger.debug(`[${functionTag}] Function result`, {
|
|
32
|
+
modelName: this.modelName,
|
|
33
|
+
success: true
|
|
34
|
+
});
|
|
35
|
+
}
|
|
36
|
+
catch (err) {
|
|
37
|
+
logger.debug(`[${functionTag}] Exception`, {
|
|
38
|
+
message: 'Error in initializing OpenAI',
|
|
39
|
+
modelName: this.modelName,
|
|
40
|
+
err: String(err)
|
|
41
|
+
});
|
|
42
|
+
throw err;
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
async streamText(optionsOrPrompt, analysisSchema) {
|
|
46
|
+
const functionTag = 'OpenAI.streamText';
|
|
47
|
+
const provider = 'openai';
|
|
48
|
+
let chunkCount = 0;
|
|
49
|
+
try {
|
|
50
|
+
// Parse parameters - support both string and options object
|
|
51
|
+
const options = typeof optionsOrPrompt === 'string'
|
|
52
|
+
? { prompt: optionsOrPrompt }
|
|
53
|
+
: optionsOrPrompt;
|
|
54
|
+
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
|
|
55
|
+
// Use schema from options or fallback parameter
|
|
56
|
+
const finalSchema = schema || analysisSchema;
|
|
57
|
+
logger.debug(`[${functionTag}] Stream text started`, {
|
|
58
|
+
provider,
|
|
59
|
+
modelName: this.modelName,
|
|
60
|
+
promptLength: prompt.length,
|
|
61
|
+
temperature,
|
|
62
|
+
maxTokens
|
|
63
|
+
});
|
|
64
|
+
const streamOptions = {
|
|
65
|
+
model: this.model,
|
|
66
|
+
prompt: prompt,
|
|
67
|
+
system: systemPrompt,
|
|
68
|
+
temperature,
|
|
69
|
+
maxTokens,
|
|
70
|
+
onError: (event) => {
|
|
71
|
+
const error = event.error;
|
|
72
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
73
|
+
const errorStack = error instanceof Error ? error.stack : undefined;
|
|
74
|
+
logger.debug(`[${functionTag}] Stream text error`, {
|
|
75
|
+
provider,
|
|
76
|
+
modelName: this.modelName,
|
|
77
|
+
error: errorMessage,
|
|
78
|
+
stack: errorStack,
|
|
79
|
+
promptLength: prompt.length,
|
|
80
|
+
chunkCount
|
|
81
|
+
});
|
|
82
|
+
},
|
|
83
|
+
onFinish: (event) => {
|
|
84
|
+
logger.debug(`[${functionTag}] Stream text finished`, {
|
|
85
|
+
provider,
|
|
86
|
+
modelName: this.modelName,
|
|
87
|
+
finishReason: event.finishReason,
|
|
88
|
+
usage: event.usage,
|
|
89
|
+
totalChunks: chunkCount,
|
|
90
|
+
promptLength: prompt.length,
|
|
91
|
+
responseLength: event.text?.length || 0
|
|
92
|
+
});
|
|
93
|
+
},
|
|
94
|
+
onChunk: (event) => {
|
|
95
|
+
chunkCount++;
|
|
96
|
+
logger.debug(`[${functionTag}] Stream text chunk`, {
|
|
97
|
+
provider,
|
|
98
|
+
modelName: this.modelName,
|
|
99
|
+
chunkNumber: chunkCount,
|
|
100
|
+
chunkLength: event.chunk.text?.length || 0,
|
|
101
|
+
chunkType: event.chunk.type
|
|
102
|
+
});
|
|
103
|
+
}
|
|
104
|
+
};
|
|
105
|
+
if (finalSchema) {
|
|
106
|
+
streamOptions.experimental_output = Output.object({ schema: finalSchema });
|
|
107
|
+
}
|
|
108
|
+
const result = streamText(streamOptions);
|
|
109
|
+
return result;
|
|
110
|
+
}
|
|
111
|
+
catch (err) {
|
|
112
|
+
logger.debug(`[${functionTag}] Exception`, {
|
|
113
|
+
provider,
|
|
114
|
+
modelName: this.modelName,
|
|
115
|
+
message: 'Error in streaming text',
|
|
116
|
+
err: String(err)
|
|
117
|
+
});
|
|
118
|
+
throw err; // Re-throw error to trigger fallback
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
async generateText(optionsOrPrompt, analysisSchema) {
|
|
122
|
+
const functionTag = 'OpenAI.generateText';
|
|
123
|
+
const provider = 'openai';
|
|
124
|
+
try {
|
|
125
|
+
// Parse parameters - support both string and options object
|
|
126
|
+
const options = typeof optionsOrPrompt === 'string'
|
|
127
|
+
? { prompt: optionsOrPrompt }
|
|
128
|
+
: optionsOrPrompt;
|
|
129
|
+
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
|
|
130
|
+
// Use schema from options or fallback parameter
|
|
131
|
+
const finalSchema = schema || analysisSchema;
|
|
132
|
+
logger.debug(`[${functionTag}] Generate text started`, {
|
|
133
|
+
provider,
|
|
134
|
+
modelName: this.modelName,
|
|
135
|
+
promptLength: prompt.length,
|
|
136
|
+
temperature,
|
|
137
|
+
maxTokens
|
|
138
|
+
});
|
|
139
|
+
const generateOptions = {
|
|
140
|
+
model: this.model,
|
|
141
|
+
prompt: prompt,
|
|
142
|
+
system: systemPrompt,
|
|
143
|
+
temperature,
|
|
144
|
+
maxTokens
|
|
145
|
+
};
|
|
146
|
+
if (finalSchema) {
|
|
147
|
+
generateOptions.experimental_output = Output.object({ schema: finalSchema });
|
|
148
|
+
}
|
|
149
|
+
const result = await generateText(generateOptions);
|
|
150
|
+
logger.debug(`[${functionTag}] Generate text completed`, {
|
|
151
|
+
provider,
|
|
152
|
+
modelName: this.modelName,
|
|
153
|
+
usage: result.usage,
|
|
154
|
+
finishReason: result.finishReason,
|
|
155
|
+
responseLength: result.text?.length || 0
|
|
156
|
+
});
|
|
157
|
+
return result;
|
|
158
|
+
}
|
|
159
|
+
catch (err) {
|
|
160
|
+
logger.debug(`[${functionTag}] Exception`, {
|
|
161
|
+
provider,
|
|
162
|
+
modelName: this.modelName,
|
|
163
|
+
message: 'Error in generating text',
|
|
164
|
+
err: String(err)
|
|
165
|
+
});
|
|
166
|
+
throw err; // Re-throw error to trigger fallback
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* NeuroLink Logger Utility
|
|
3
|
+
*
|
|
4
|
+
* Provides conditional logging based on NEUROLINK_DEBUG environment variable
|
|
5
|
+
*/
|
|
6
|
+
export declare const logger: {
|
|
7
|
+
debug: (...args: any[]) => void;
|
|
8
|
+
info: (...args: any[]) => void;
|
|
9
|
+
warn: (...args: any[]) => void;
|
|
10
|
+
error: (...args: any[]) => void;
|
|
11
|
+
always: (...args: any[]) => void;
|
|
12
|
+
};
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* NeuroLink Logger Utility
|
|
3
|
+
*
|
|
4
|
+
* Provides conditional logging based on NEUROLINK_DEBUG environment variable
|
|
5
|
+
*/
|
|
6
|
+
export const logger = {
|
|
7
|
+
debug: (...args) => {
|
|
8
|
+
if (process.env.NEUROLINK_DEBUG === 'true') {
|
|
9
|
+
console.log(...args);
|
|
10
|
+
}
|
|
11
|
+
},
|
|
12
|
+
info: (...args) => {
|
|
13
|
+
// Completely disabled for clean CLI demo output
|
|
14
|
+
},
|
|
15
|
+
warn: (...args) => {
|
|
16
|
+
// Completely disabled for clean CLI demo output
|
|
17
|
+
},
|
|
18
|
+
error: (...args) => {
|
|
19
|
+
// Always show errors regardless of debug mode
|
|
20
|
+
console.error(...args);
|
|
21
|
+
},
|
|
22
|
+
always: (...args) => {
|
|
23
|
+
console.log(...args);
|
|
24
|
+
}
|
|
25
|
+
};
|