@juspay/neurolink 1.5.1 → 1.5.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/CHANGELOG.md +49 -0
  2. package/README.md +1 -1
  3. package/dist/cli/commands/config.d.ts +35 -35
  4. package/dist/cli/index.js +63 -19
  5. package/dist/core/factory.js +12 -11
  6. package/dist/lib/core/factory.d.ts +40 -0
  7. package/dist/lib/core/factory.js +162 -0
  8. package/dist/lib/core/types.d.ts +111 -0
  9. package/dist/lib/core/types.js +68 -0
  10. package/dist/lib/index.d.ts +56 -0
  11. package/dist/lib/index.js +62 -0
  12. package/dist/lib/mcp/context-manager.d.ts +164 -0
  13. package/dist/lib/mcp/context-manager.js +273 -0
  14. package/dist/lib/mcp/factory.d.ts +144 -0
  15. package/dist/lib/mcp/factory.js +141 -0
  16. package/dist/lib/mcp/orchestrator.d.ts +170 -0
  17. package/dist/lib/mcp/orchestrator.js +372 -0
  18. package/dist/lib/mcp/registry.d.ts +188 -0
  19. package/dist/lib/mcp/registry.js +373 -0
  20. package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.d.ts +21 -0
  21. package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +215 -0
  22. package/dist/lib/mcp/servers/ai-providers/ai-core-server.d.ts +10 -0
  23. package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +303 -0
  24. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +101 -0
  25. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +428 -0
  26. package/dist/lib/neurolink.d.ts +53 -0
  27. package/dist/lib/neurolink.js +155 -0
  28. package/dist/lib/providers/amazonBedrock.d.ts +11 -0
  29. package/dist/lib/providers/amazonBedrock.js +256 -0
  30. package/dist/lib/providers/anthropic.d.ts +34 -0
  31. package/dist/lib/providers/anthropic.js +308 -0
  32. package/dist/lib/providers/azureOpenAI.d.ts +37 -0
  33. package/dist/lib/providers/azureOpenAI.js +339 -0
  34. package/dist/lib/providers/googleAIStudio.d.ts +30 -0
  35. package/dist/lib/providers/googleAIStudio.js +216 -0
  36. package/dist/lib/providers/googleVertexAI.d.ts +30 -0
  37. package/dist/lib/providers/googleVertexAI.js +409 -0
  38. package/dist/lib/providers/index.d.ts +30 -0
  39. package/dist/lib/providers/index.js +25 -0
  40. package/dist/lib/providers/openAI.d.ts +10 -0
  41. package/dist/lib/providers/openAI.js +169 -0
  42. package/dist/lib/utils/logger.d.ts +12 -0
  43. package/dist/lib/utils/logger.js +25 -0
  44. package/dist/lib/utils/providerUtils.d.ts +17 -0
  45. package/dist/lib/utils/providerUtils.js +73 -0
  46. package/dist/mcp/servers/ai-providers/ai-core-server.js +11 -10
  47. package/dist/neurolink.js +13 -12
  48. package/dist/providers/amazonBedrock.js +22 -21
  49. package/dist/providers/anthropic.js +21 -20
  50. package/dist/providers/azureOpenAI.js +21 -20
  51. package/dist/providers/googleAIStudio.js +13 -12
  52. package/dist/providers/googleVertexAI.js +27 -26
  53. package/dist/providers/openAI.js +12 -11
  54. package/dist/utils/logger.d.ts +12 -0
  55. package/dist/utils/logger.js +25 -0
  56. package/dist/utils/providerUtils.d.ts +0 -3
  57. package/dist/utils/providerUtils.js +3 -2
  58. package/package.json +1 -1
@@ -0,0 +1,339 @@
1
+ /**
2
+ * Azure OpenAI Provider
3
+ *
4
+ * Enterprise-grade OpenAI integration through Microsoft Azure.
5
+ * Supports all OpenAI models with enhanced security and compliance.
6
+ */
7
+ import { AIProviderName } from '../core/types.js';
8
+ import { logger } from '../utils/logger.js';
9
+ export class AzureOpenAIProvider {
10
+ name = AIProviderName.AZURE;
11
+ apiKey;
12
+ endpoint;
13
+ deploymentId;
14
+ apiVersion;
15
+ constructor() {
16
+ this.apiKey = this.getApiKey();
17
+ this.endpoint = this.getEndpoint();
18
+ this.deploymentId = this.getDeploymentId();
19
+ this.apiVersion = process.env.AZURE_OPENAI_API_VERSION || '2024-02-15-preview';
20
+ logger.debug(`[AzureOpenAIProvider] Initialized with endpoint: ${this.endpoint}, deployment: ${this.deploymentId}`);
21
+ }
22
+ getApiKey() {
23
+ const apiKey = process.env.AZURE_OPENAI_API_KEY;
24
+ if (!apiKey) {
25
+ throw new Error('AZURE_OPENAI_API_KEY environment variable is required');
26
+ }
27
+ return apiKey;
28
+ }
29
+ getEndpoint() {
30
+ const endpoint = process.env.AZURE_OPENAI_ENDPOINT;
31
+ if (!endpoint) {
32
+ throw new Error('AZURE_OPENAI_ENDPOINT environment variable is required');
33
+ }
34
+ return endpoint.replace(/\/$/, ''); // Remove trailing slash
35
+ }
36
+ getDeploymentId() {
37
+ const deploymentId = process.env.AZURE_OPENAI_DEPLOYMENT_ID;
38
+ if (!deploymentId) {
39
+ throw new Error('AZURE_OPENAI_DEPLOYMENT_ID environment variable is required');
40
+ }
41
+ return deploymentId;
42
+ }
43
+ getApiUrl(stream = false) {
44
+ return `${this.endpoint}/openai/deployments/${this.deploymentId}/chat/completions?api-version=${this.apiVersion}`;
45
+ }
46
+ async makeRequest(body, stream = false) {
47
+ const url = this.getApiUrl(stream);
48
+ const headers = {
49
+ 'Content-Type': 'application/json',
50
+ 'api-key': this.apiKey
51
+ };
52
+ logger.debug(`[AzureOpenAIProvider.makeRequest] ${stream ? 'Streaming' : 'Non-streaming'} request to deployment: ${this.deploymentId}`);
53
+ logger.debug(`[AzureOpenAIProvider.makeRequest] Max tokens: ${body.max_tokens || 'default'}, Temperature: ${body.temperature || 'default'}`);
54
+ const response = await fetch(url, {
55
+ method: 'POST',
56
+ headers,
57
+ body: JSON.stringify(body)
58
+ });
59
+ if (!response.ok) {
60
+ const errorText = await response.text();
61
+ logger.error(`[AzureOpenAIProvider.makeRequest] API error ${response.status}: ${errorText}`);
62
+ throw new Error(`Azure OpenAI API error ${response.status}: ${errorText}`);
63
+ }
64
+ return response;
65
+ }
66
+ async generateText(optionsOrPrompt, schema) {
67
+ logger.debug('[AzureOpenAIProvider.generateText] Starting text generation');
68
+ // Parse parameters with backward compatibility
69
+ const options = typeof optionsOrPrompt === 'string'
70
+ ? { prompt: optionsOrPrompt }
71
+ : optionsOrPrompt;
72
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = 'You are a helpful AI assistant.' } = options;
73
+ logger.debug(`[AzureOpenAIProvider.generateText] Prompt: "${prompt.substring(0, 100)}...", Temperature: ${temperature}, Max tokens: ${maxTokens}`);
74
+ const messages = [];
75
+ if (systemPrompt) {
76
+ messages.push({
77
+ role: 'system',
78
+ content: systemPrompt
79
+ });
80
+ }
81
+ messages.push({
82
+ role: 'user',
83
+ content: prompt
84
+ });
85
+ const requestBody = {
86
+ messages,
87
+ temperature,
88
+ max_tokens: maxTokens
89
+ };
90
+ try {
91
+ const response = await this.makeRequest(requestBody);
92
+ const data = await response.json();
93
+ logger.debug(`[AzureOpenAIProvider.generateText] Success. Generated ${data.usage.completion_tokens} tokens`);
94
+ const content = data.choices[0]?.message?.content || '';
95
+ return {
96
+ content,
97
+ provider: this.name,
98
+ model: data.model,
99
+ usage: {
100
+ promptTokens: data.usage.prompt_tokens,
101
+ completionTokens: data.usage.completion_tokens,
102
+ totalTokens: data.usage.total_tokens
103
+ },
104
+ finishReason: data.choices[0]?.finish_reason || 'stop'
105
+ };
106
+ }
107
+ catch (error) {
108
+ logger.error('[AzureOpenAIProvider.generateText] Error:', error);
109
+ throw error;
110
+ }
111
+ }
112
+ async streamText(optionsOrPrompt, schema) {
113
+ logger.debug('[AzureOpenAIProvider.streamText] Starting text streaming');
114
+ // Parse parameters with backward compatibility
115
+ const options = typeof optionsOrPrompt === 'string'
116
+ ? { prompt: optionsOrPrompt }
117
+ : optionsOrPrompt;
118
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = 'You are a helpful AI assistant.' } = options;
119
+ logger.debug(`[AzureOpenAIProvider.streamText] Streaming prompt: "${prompt.substring(0, 100)}..."`);
120
+ const messages = [];
121
+ if (systemPrompt) {
122
+ messages.push({
123
+ role: 'system',
124
+ content: systemPrompt
125
+ });
126
+ }
127
+ messages.push({
128
+ role: 'user',
129
+ content: prompt
130
+ });
131
+ const requestBody = {
132
+ messages,
133
+ temperature,
134
+ max_tokens: maxTokens,
135
+ stream: true
136
+ };
137
+ try {
138
+ const response = await this.makeRequest(requestBody, true);
139
+ if (!response.body) {
140
+ throw new Error('No response body received');
141
+ }
142
+ // Return a StreamTextResult-like object
143
+ return {
144
+ textStream: this.createAsyncIterable(response.body),
145
+ text: '',
146
+ usage: { promptTokens: 0, completionTokens: 0, totalTokens: 0 },
147
+ finishReason: 'stop'
148
+ };
149
+ }
150
+ catch (error) {
151
+ logger.error('[AzureOpenAIProvider.streamText] Error:', error);
152
+ throw error;
153
+ }
154
+ }
155
+ async *createAsyncIterable(body) {
156
+ const reader = body.getReader();
157
+ const decoder = new TextDecoder();
158
+ let buffer = '';
159
+ try {
160
+ while (true) {
161
+ const { done, value } = await reader.read();
162
+ if (done)
163
+ break;
164
+ buffer += decoder.decode(value, { stream: true });
165
+ const lines = buffer.split('\n');
166
+ buffer = lines.pop() || '';
167
+ for (const line of lines) {
168
+ if (line.trim() === '')
169
+ continue;
170
+ if (line.startsWith('data: ')) {
171
+ const data = line.slice(6);
172
+ if (data.trim() === '[DONE]')
173
+ continue;
174
+ try {
175
+ const chunk = JSON.parse(data);
176
+ // Extract text content from chunk
177
+ if (chunk.choices?.[0]?.delta?.content) {
178
+ yield chunk.choices[0].delta.content;
179
+ }
180
+ }
181
+ catch (parseError) {
182
+ logger.warn('[AzureOpenAIProvider.createAsyncIterable] Failed to parse chunk:', parseError);
183
+ continue;
184
+ }
185
+ }
186
+ }
187
+ }
188
+ }
189
+ finally {
190
+ reader.releaseLock();
191
+ }
192
+ }
193
+ async *generateTextStream(optionsOrPrompt) {
194
+ logger.debug('[AzureOpenAIProvider.generateTextStream] Starting text streaming');
195
+ // Parse parameters with backward compatibility
196
+ const options = typeof optionsOrPrompt === 'string'
197
+ ? { prompt: optionsOrPrompt }
198
+ : optionsOrPrompt;
199
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = 'You are a helpful AI assistant.' } = options;
200
+ logger.debug(`[AzureOpenAIProvider.generateTextStream] Streaming prompt: "${prompt.substring(0, 100)}..."`);
201
+ const messages = [];
202
+ if (systemPrompt) {
203
+ messages.push({
204
+ role: 'system',
205
+ content: systemPrompt
206
+ });
207
+ }
208
+ messages.push({
209
+ role: 'user',
210
+ content: prompt
211
+ });
212
+ const requestBody = {
213
+ messages,
214
+ temperature,
215
+ max_tokens: maxTokens,
216
+ stream: true
217
+ };
218
+ try {
219
+ const response = await this.makeRequest(requestBody, true);
220
+ if (!response.body) {
221
+ throw new Error('No response body received');
222
+ }
223
+ const reader = response.body.getReader();
224
+ const decoder = new TextDecoder();
225
+ let buffer = '';
226
+ try {
227
+ while (true) {
228
+ const { done, value } = await reader.read();
229
+ if (done)
230
+ break;
231
+ buffer += decoder.decode(value, { stream: true });
232
+ const lines = buffer.split('\n');
233
+ buffer = lines.pop() || '';
234
+ for (const line of lines) {
235
+ if (line.trim() === '')
236
+ continue;
237
+ if (line.startsWith('data: ')) {
238
+ const data = line.slice(6);
239
+ if (data.trim() === '[DONE]')
240
+ continue;
241
+ try {
242
+ const chunk = JSON.parse(data);
243
+ // Extract text content from chunk
244
+ if (chunk.choices?.[0]?.delta?.content) {
245
+ yield {
246
+ content: chunk.choices[0].delta.content,
247
+ provider: this.name,
248
+ model: chunk.model || this.deploymentId
249
+ };
250
+ }
251
+ }
252
+ catch (parseError) {
253
+ logger.warn('[AzureOpenAIProvider.generateTextStream] Failed to parse chunk:', parseError);
254
+ continue;
255
+ }
256
+ }
257
+ }
258
+ }
259
+ }
260
+ finally {
261
+ reader.releaseLock();
262
+ }
263
+ logger.debug('[AzureOpenAIProvider.generateTextStream] Streaming completed');
264
+ }
265
+ catch (error) {
266
+ logger.error('[AzureOpenAIProvider.generateTextStream] Error:', error);
267
+ throw error;
268
+ }
269
+ }
270
+ async testConnection() {
271
+ logger.debug('[AzureOpenAIProvider.testConnection] Testing connection to Azure OpenAI');
272
+ const startTime = Date.now();
273
+ try {
274
+ await this.generateText({
275
+ prompt: 'Hello',
276
+ maxTokens: 5
277
+ });
278
+ const responseTime = Date.now() - startTime;
279
+ logger.debug(`[AzureOpenAIProvider.testConnection] Connection test successful (${responseTime}ms)`);
280
+ return {
281
+ success: true,
282
+ responseTime
283
+ };
284
+ }
285
+ catch (error) {
286
+ const responseTime = Date.now() - startTime;
287
+ logger.error(`[AzureOpenAIProvider.testConnection] Connection test failed (${responseTime}ms):`, error);
288
+ return {
289
+ success: false,
290
+ error: error instanceof Error ? error.message : 'Unknown error',
291
+ responseTime
292
+ };
293
+ }
294
+ }
295
+ isConfigured() {
296
+ try {
297
+ this.getApiKey();
298
+ this.getEndpoint();
299
+ this.getDeploymentId();
300
+ return true;
301
+ }
302
+ catch {
303
+ return false;
304
+ }
305
+ }
306
+ getRequiredConfig() {
307
+ return ['AZURE_OPENAI_API_KEY', 'AZURE_OPENAI_ENDPOINT', 'AZURE_OPENAI_DEPLOYMENT_ID'];
308
+ }
309
+ getOptionalConfig() {
310
+ return ['AZURE_OPENAI_API_VERSION'];
311
+ }
312
+ getModels() {
313
+ return [
314
+ 'gpt-4',
315
+ 'gpt-4-turbo',
316
+ 'gpt-4-32k',
317
+ 'gpt-35-turbo',
318
+ 'gpt-35-turbo-16k'
319
+ ];
320
+ }
321
+ supportsStreaming() {
322
+ return true;
323
+ }
324
+ supportsSchema() {
325
+ return true; // Azure OpenAI supports JSON mode and function calling
326
+ }
327
+ getCapabilities() {
328
+ return [
329
+ 'text-generation',
330
+ 'streaming',
331
+ 'conversation',
332
+ 'system-prompts',
333
+ 'json-mode',
334
+ 'function-calling',
335
+ 'enterprise-security',
336
+ 'content-filtering'
337
+ ];
338
+ }
339
+ }
@@ -0,0 +1,30 @@
1
+ import type { ZodType, ZodTypeDef } from 'zod';
2
+ import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult } from 'ai';
3
+ import type { AIProvider, TextGenerationOptions, StreamTextOptions } from '../core/types.js';
4
+ export declare class GoogleAIStudio implements AIProvider {
5
+ private modelName;
6
+ /**
7
+ * Initializes a new instance of GoogleAIStudio
8
+ * @param modelName - Optional model name to override the default from config
9
+ */
10
+ constructor(modelName?: string | null);
11
+ /**
12
+ * Gets the appropriate model instance
13
+ * @private
14
+ */
15
+ private getModel;
16
+ /**
17
+ * Processes text using streaming approach with enhanced error handling callbacks
18
+ * @param prompt - The input text prompt to analyze
19
+ * @param analysisSchema - Optional Zod schema or Schema object for output validation
20
+ * @returns Promise resolving to StreamTextResult or null if operation fails
21
+ */
22
+ streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
23
+ /**
24
+ * Processes text using non-streaming approach with optional schema validation
25
+ * @param prompt - The input text prompt to analyze
26
+ * @param analysisSchema - Optional Zod schema or Schema object for output validation
27
+ * @returns Promise resolving to GenerateTextResult or null if operation fails
28
+ */
29
+ generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
30
+ }
@@ -0,0 +1,216 @@
1
+ import { createGoogleGenerativeAI } from '@ai-sdk/google';
2
+ import { streamText, generateText, Output } from 'ai';
3
+ import { logger } from '../utils/logger.js';
4
+ // Default system context
5
+ const DEFAULT_SYSTEM_CONTEXT = {
6
+ systemPrompt: 'You are a helpful AI assistant.'
7
+ };
8
+ // Configuration helpers
9
+ const getGoogleAIApiKey = () => {
10
+ const apiKey = process.env.GOOGLE_AI_API_KEY || process.env.GOOGLE_GENERATIVE_AI_API_KEY;
11
+ if (!apiKey) {
12
+ throw new Error('GOOGLE_AI_API_KEY environment variable is not set');
13
+ }
14
+ return apiKey;
15
+ };
16
+ const getGoogleAIModelId = () => {
17
+ return process.env.GOOGLE_AI_MODEL || 'gemini-1.5-pro-latest';
18
+ };
19
+ const hasValidAuth = () => {
20
+ return !!(process.env.GOOGLE_AI_API_KEY || process.env.GOOGLE_GENERATIVE_AI_API_KEY);
21
+ };
22
+ // Lazy initialization cache
23
+ let _google = null;
24
+ function getGoogleInstance() {
25
+ if (!_google) {
26
+ const apiKey = getGoogleAIApiKey();
27
+ _google = createGoogleGenerativeAI({
28
+ apiKey: apiKey,
29
+ headers: {
30
+ 'X-Powered-By': 'NeuroLink'
31
+ }
32
+ });
33
+ }
34
+ return _google;
35
+ }
36
+ // Google AI Studio class with enhanced error handling
37
+ export class GoogleAIStudio {
38
+ modelName;
39
+ /**
40
+ * Initializes a new instance of GoogleAIStudio
41
+ * @param modelName - Optional model name to override the default from config
42
+ */
43
+ constructor(modelName) {
44
+ const functionTag = 'GoogleAIStudio.constructor';
45
+ this.modelName = modelName || getGoogleAIModelId();
46
+ try {
47
+ logger.debug(`[${functionTag}] Initialization started`, {
48
+ modelName: this.modelName,
49
+ hasApiKey: hasValidAuth()
50
+ });
51
+ logger.debug(`[${functionTag}] Initialization completed`, {
52
+ modelName: this.modelName,
53
+ success: true
54
+ });
55
+ }
56
+ catch (err) {
57
+ logger.error(`[${functionTag}] Initialization failed`, {
58
+ message: 'Error in initializing Google AI Studio',
59
+ modelName: this.modelName,
60
+ error: err instanceof Error ? err.message : String(err),
61
+ stack: err instanceof Error ? err.stack : undefined
62
+ });
63
+ }
64
+ }
65
+ /**
66
+ * Gets the appropriate model instance
67
+ * @private
68
+ */
69
+ getModel() {
70
+ logger.debug('GoogleAIStudio.getModel - Google AI model selected', {
71
+ modelName: this.modelName
72
+ });
73
+ const google = getGoogleInstance();
74
+ return google(this.modelName);
75
+ }
76
+ /**
77
+ * Processes text using streaming approach with enhanced error handling callbacks
78
+ * @param prompt - The input text prompt to analyze
79
+ * @param analysisSchema - Optional Zod schema or Schema object for output validation
80
+ * @returns Promise resolving to StreamTextResult or null if operation fails
81
+ */
82
+ async streamText(optionsOrPrompt, analysisSchema) {
83
+ const functionTag = 'GoogleAIStudio.streamText';
84
+ const provider = 'google-ai';
85
+ let chunkCount = 0;
86
+ try {
87
+ // Parse parameters - support both string and options object
88
+ const options = typeof optionsOrPrompt === 'string'
89
+ ? { prompt: optionsOrPrompt }
90
+ : optionsOrPrompt;
91
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
92
+ // Use schema from options or fallback parameter
93
+ const finalSchema = schema || analysisSchema;
94
+ logger.debug(`[${functionTag}] Stream request started`, {
95
+ provider,
96
+ modelName: this.modelName,
97
+ promptLength: prompt.length,
98
+ temperature,
99
+ maxTokens,
100
+ hasSchema: !!finalSchema
101
+ });
102
+ const model = this.getModel();
103
+ const streamOptions = {
104
+ model: model,
105
+ prompt: prompt,
106
+ system: systemPrompt,
107
+ temperature,
108
+ maxTokens,
109
+ onError: (event) => {
110
+ const error = event.error;
111
+ const errorMessage = error instanceof Error ? error.message : String(error);
112
+ const errorStack = error instanceof Error ? error.stack : undefined;
113
+ logger.error(`[${functionTag}] Stream text error`, {
114
+ provider,
115
+ modelName: this.modelName,
116
+ error: errorMessage,
117
+ stack: errorStack,
118
+ promptLength: prompt.length,
119
+ chunkCount
120
+ });
121
+ },
122
+ onFinish: (event) => {
123
+ logger.debug(`[${functionTag}] Stream text finished`, {
124
+ provider,
125
+ modelName: this.modelName,
126
+ finishReason: event.finishReason,
127
+ usage: event.usage,
128
+ totalChunks: chunkCount,
129
+ promptLength: prompt.length,
130
+ responseLength: event.text?.length || 0
131
+ });
132
+ },
133
+ onChunk: (event) => {
134
+ chunkCount++;
135
+ logger.debug(`[${functionTag}] Stream text chunk`, {
136
+ provider,
137
+ modelName: this.modelName,
138
+ chunkNumber: chunkCount,
139
+ chunkLength: event.chunk.text?.length || 0,
140
+ chunkType: event.chunk.type
141
+ });
142
+ }
143
+ };
144
+ if (analysisSchema) {
145
+ streamOptions.experimental_output = Output.object({ schema: analysisSchema });
146
+ }
147
+ const result = streamText(streamOptions);
148
+ return result;
149
+ }
150
+ catch (err) {
151
+ logger.error(`[${functionTag}] Exception`, {
152
+ provider,
153
+ modelName: this.modelName,
154
+ message: 'Error in streaming text',
155
+ err: String(err),
156
+ promptLength: typeof optionsOrPrompt === 'string' ? optionsOrPrompt.length : optionsOrPrompt.prompt.length
157
+ });
158
+ throw err; // Re-throw error to trigger fallback
159
+ }
160
+ }
161
+ /**
162
+ * Processes text using non-streaming approach with optional schema validation
163
+ * @param prompt - The input text prompt to analyze
164
+ * @param analysisSchema - Optional Zod schema or Schema object for output validation
165
+ * @returns Promise resolving to GenerateTextResult or null if operation fails
166
+ */
167
+ async generateText(optionsOrPrompt, analysisSchema) {
168
+ const functionTag = 'GoogleAIStudio.generateText';
169
+ const provider = 'google-ai';
170
+ try {
171
+ // Parse parameters - support both string and options object
172
+ const options = typeof optionsOrPrompt === 'string'
173
+ ? { prompt: optionsOrPrompt }
174
+ : optionsOrPrompt;
175
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
176
+ // Use schema from options or fallback parameter
177
+ const finalSchema = schema || analysisSchema;
178
+ logger.debug(`[${functionTag}] Generate request started`, {
179
+ provider,
180
+ modelName: this.modelName,
181
+ promptLength: prompt.length,
182
+ temperature,
183
+ maxTokens
184
+ });
185
+ const model = this.getModel();
186
+ const generateOptions = {
187
+ model: model,
188
+ prompt: prompt,
189
+ system: systemPrompt,
190
+ temperature,
191
+ maxTokens
192
+ };
193
+ if (finalSchema) {
194
+ generateOptions.experimental_output = Output.object({ schema: finalSchema });
195
+ }
196
+ const result = await generateText(generateOptions);
197
+ logger.debug(`[${functionTag}] Generate text completed`, {
198
+ provider,
199
+ modelName: this.modelName,
200
+ usage: result.usage,
201
+ finishReason: result.finishReason,
202
+ responseLength: result.text?.length || 0
203
+ });
204
+ return result;
205
+ }
206
+ catch (err) {
207
+ logger.error(`[${functionTag}] Exception`, {
208
+ provider,
209
+ modelName: this.modelName,
210
+ message: 'Error in generating text',
211
+ err: String(err)
212
+ });
213
+ throw err; // Re-throw error to trigger fallback
214
+ }
215
+ }
216
+ }
@@ -0,0 +1,30 @@
1
+ import type { ZodType, ZodTypeDef } from 'zod';
2
+ import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult } from 'ai';
3
+ import type { AIProvider, TextGenerationOptions, StreamTextOptions } from '../core/types.js';
4
+ export declare class GoogleVertexAI implements AIProvider {
5
+ private modelName;
6
+ /**
7
+ * Initializes a new instance of GoogleVertexAI
8
+ * @param modelName - Optional model name to override the default from config
9
+ */
10
+ constructor(modelName?: string | null);
11
+ /**
12
+ * Gets the appropriate model instance (Google or Anthropic)
13
+ * @private
14
+ */
15
+ private getModel;
16
+ /**
17
+ * Processes text using streaming approach with enhanced error handling callbacks
18
+ * @param prompt - The input text prompt to analyze
19
+ * @param analysisSchema - Optional Zod schema or Schema object for output validation
20
+ * @returns Promise resolving to StreamTextResult or null if operation fails
21
+ */
22
+ streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
23
+ /**
24
+ * Processes text using non-streaming approach with optional schema validation
25
+ * @param prompt - The input text prompt to analyze
26
+ * @param analysisSchema - Optional Zod schema or Schema object for output validation
27
+ * @returns Promise resolving to GenerateTextResult or null if operation fails
28
+ */
29
+ generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
30
+ }