@juspay/neurolink 5.1.0 ā 5.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +21 -9
- package/README.md +123 -126
- package/dist/agent/direct-tools.d.ts +6 -6
- package/dist/cli/commands/config.d.ts +3 -3
- package/dist/cli/commands/mcp.js +8 -7
- package/dist/cli/factories/command-factory.d.ts +4 -0
- package/dist/cli/factories/command-factory.js +63 -8
- package/dist/cli/index.js +87 -140
- package/dist/core/base-provider.d.ts +423 -0
- package/dist/core/base-provider.js +376 -0
- package/dist/core/constants.d.ts +2 -1
- package/dist/core/constants.js +2 -1
- package/dist/core/dynamic-models.d.ts +6 -6
- package/dist/core/evaluation.d.ts +19 -80
- package/dist/core/evaluation.js +185 -484
- package/dist/core/factory.d.ts +3 -3
- package/dist/core/factory.js +31 -91
- package/dist/core/service-registry.d.ts +47 -0
- package/dist/core/service-registry.js +112 -0
- package/dist/core/types.d.ts +8 -1
- package/dist/factories/compatibility-factory.js +1 -1
- package/dist/factories/provider-factory.d.ts +72 -0
- package/dist/factories/provider-factory.js +144 -0
- package/dist/factories/provider-registry.d.ts +38 -0
- package/dist/factories/provider-registry.js +107 -0
- package/dist/index.d.ts +4 -3
- package/dist/index.js +2 -4
- package/dist/lib/agent/direct-tools.d.ts +6 -6
- package/dist/lib/core/base-provider.d.ts +423 -0
- package/dist/lib/core/base-provider.js +376 -0
- package/dist/lib/core/constants.d.ts +2 -1
- package/dist/lib/core/constants.js +2 -1
- package/dist/lib/core/dynamic-models.d.ts +6 -6
- package/dist/lib/core/evaluation.d.ts +19 -80
- package/dist/lib/core/evaluation.js +185 -484
- package/dist/lib/core/factory.d.ts +3 -3
- package/dist/lib/core/factory.js +30 -91
- package/dist/lib/core/service-registry.d.ts +47 -0
- package/dist/lib/core/service-registry.js +112 -0
- package/dist/lib/core/types.d.ts +8 -1
- package/dist/lib/factories/compatibility-factory.js +1 -1
- package/dist/lib/factories/provider-factory.d.ts +72 -0
- package/dist/lib/factories/provider-factory.js +144 -0
- package/dist/lib/factories/provider-registry.d.ts +38 -0
- package/dist/lib/factories/provider-registry.js +107 -0
- package/dist/lib/index.d.ts +4 -3
- package/dist/lib/index.js +2 -4
- package/dist/lib/mcp/client.d.ts +1 -0
- package/dist/lib/mcp/client.js +1 -0
- package/dist/lib/mcp/config.js +28 -3
- package/dist/lib/mcp/context-manager.d.ts +1 -0
- package/dist/lib/mcp/context-manager.js +8 -4
- package/dist/lib/mcp/function-calling.d.ts +13 -0
- package/dist/lib/mcp/function-calling.js +134 -35
- package/dist/lib/mcp/initialize-tools.d.ts +1 -1
- package/dist/lib/mcp/initialize-tools.js +45 -1
- package/dist/lib/mcp/initialize.js +16 -6
- package/dist/lib/mcp/neurolink-mcp-client.d.ts +1 -0
- package/dist/lib/mcp/neurolink-mcp-client.js +21 -5
- package/dist/lib/mcp/servers/agent/direct-tools-server.d.ts +8 -0
- package/dist/lib/mcp/servers/agent/direct-tools-server.js +109 -0
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +3 -1
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/lib/mcp/unified-registry.d.ts +4 -0
- package/dist/lib/mcp/unified-registry.js +42 -9
- package/dist/lib/neurolink.d.ts +156 -117
- package/dist/lib/neurolink.js +619 -404
- package/dist/lib/providers/amazon-bedrock.d.ts +32 -0
- package/dist/lib/providers/amazon-bedrock.js +143 -0
- package/dist/lib/providers/analytics-helper.js +7 -4
- package/dist/lib/providers/anthropic-baseprovider.d.ts +23 -0
- package/dist/lib/providers/anthropic-baseprovider.js +114 -0
- package/dist/lib/providers/anthropic.d.ts +19 -43
- package/dist/lib/providers/anthropic.js +82 -306
- package/dist/lib/providers/azure-openai.d.ts +20 -0
- package/dist/lib/providers/azure-openai.js +89 -0
- package/dist/lib/providers/function-calling-provider.d.ts +64 -2
- package/dist/lib/providers/function-calling-provider.js +208 -9
- package/dist/lib/providers/google-ai-studio.d.ts +23 -0
- package/dist/lib/providers/google-ai-studio.js +107 -0
- package/dist/lib/providers/google-vertex.d.ts +47 -0
- package/dist/lib/providers/google-vertex.js +205 -0
- package/dist/lib/providers/huggingFace.d.ts +32 -25
- package/dist/lib/providers/huggingFace.js +97 -431
- package/dist/lib/providers/index.d.ts +9 -9
- package/dist/lib/providers/index.js +9 -9
- package/dist/lib/providers/mcp-provider.js +24 -5
- package/dist/lib/providers/mistral.d.ts +42 -0
- package/dist/lib/providers/mistral.js +160 -0
- package/dist/lib/providers/ollama.d.ts +52 -36
- package/dist/lib/providers/ollama.js +297 -520
- package/dist/lib/providers/openAI.d.ts +19 -18
- package/dist/lib/providers/openAI.js +76 -275
- package/dist/lib/sdk/tool-extension.d.ts +181 -0
- package/dist/lib/sdk/tool-extension.js +283 -0
- package/dist/lib/sdk/tool-registration.d.ts +95 -0
- package/dist/lib/sdk/tool-registration.js +167 -0
- package/dist/lib/services/streaming/streaming-manager.js +11 -10
- package/dist/lib/services/websocket/websocket-server.js +12 -11
- package/dist/lib/telemetry/telemetry-service.js +8 -7
- package/dist/lib/types/generate-types.d.ts +1 -0
- package/dist/lib/types/mcp-types.d.ts +116 -0
- package/dist/lib/types/mcp-types.js +5 -0
- package/dist/lib/types/stream-types.d.ts +30 -18
- package/dist/lib/types/universal-provider-options.d.ts +87 -0
- package/dist/lib/types/universal-provider-options.js +53 -0
- package/dist/mcp/client.d.ts +1 -0
- package/dist/mcp/client.js +1 -0
- package/dist/mcp/config.js +28 -3
- package/dist/mcp/context-manager.d.ts +1 -0
- package/dist/mcp/context-manager.js +8 -4
- package/dist/mcp/function-calling.d.ts +13 -0
- package/dist/mcp/function-calling.js +134 -35
- package/dist/mcp/initialize-tools.d.ts +1 -1
- package/dist/mcp/initialize-tools.js +45 -1
- package/dist/mcp/initialize.js +16 -6
- package/dist/mcp/neurolink-mcp-client.d.ts +1 -0
- package/dist/mcp/neurolink-mcp-client.js +21 -5
- package/dist/mcp/servers/agent/direct-tools-server.d.ts +8 -0
- package/dist/mcp/servers/agent/direct-tools-server.js +109 -0
- package/dist/mcp/servers/ai-providers/ai-core-server.js +3 -1
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/mcp/unified-registry.d.ts +4 -0
- package/dist/mcp/unified-registry.js +42 -9
- package/dist/neurolink.d.ts +156 -117
- package/dist/neurolink.js +619 -404
- package/dist/providers/amazon-bedrock.d.ts +32 -0
- package/dist/providers/amazon-bedrock.js +143 -0
- package/dist/providers/analytics-helper.js +7 -4
- package/dist/providers/anthropic-baseprovider.d.ts +23 -0
- package/dist/providers/anthropic-baseprovider.js +114 -0
- package/dist/providers/anthropic.d.ts +19 -43
- package/dist/providers/anthropic.js +81 -305
- package/dist/providers/azure-openai.d.ts +20 -0
- package/dist/providers/azure-openai.js +89 -0
- package/dist/providers/function-calling-provider.d.ts +64 -2
- package/dist/providers/function-calling-provider.js +208 -9
- package/dist/providers/google-ai-studio.d.ts +23 -0
- package/dist/providers/google-ai-studio.js +108 -0
- package/dist/providers/google-vertex.d.ts +47 -0
- package/dist/providers/google-vertex.js +205 -0
- package/dist/providers/huggingFace.d.ts +32 -25
- package/dist/providers/huggingFace.js +96 -430
- package/dist/providers/index.d.ts +9 -9
- package/dist/providers/index.js +9 -9
- package/dist/providers/mcp-provider.js +24 -5
- package/dist/providers/mistral.d.ts +42 -0
- package/dist/providers/mistral.js +160 -0
- package/dist/providers/ollama.d.ts +52 -36
- package/dist/providers/ollama.js +297 -519
- package/dist/providers/openAI.d.ts +19 -18
- package/dist/providers/openAI.js +76 -276
- package/dist/sdk/tool-extension.d.ts +181 -0
- package/dist/sdk/tool-extension.js +283 -0
- package/dist/sdk/tool-registration.d.ts +95 -0
- package/dist/sdk/tool-registration.js +168 -0
- package/dist/services/streaming/streaming-manager.js +11 -10
- package/dist/services/websocket/websocket-server.js +12 -11
- package/dist/telemetry/telemetry-service.js +8 -7
- package/dist/types/generate-types.d.ts +1 -0
- package/dist/types/mcp-types.d.ts +116 -0
- package/dist/types/mcp-types.js +5 -0
- package/dist/types/stream-types.d.ts +30 -18
- package/dist/types/universal-provider-options.d.ts +87 -0
- package/dist/types/universal-provider-options.js +53 -0
- package/package.json +12 -5
- package/dist/lib/providers/agent-enhanced-provider.d.ts +0 -93
- package/dist/lib/providers/agent-enhanced-provider.js +0 -605
- package/dist/lib/providers/amazonBedrock.d.ts +0 -28
- package/dist/lib/providers/amazonBedrock.js +0 -364
- package/dist/lib/providers/azureOpenAI.d.ts +0 -42
- package/dist/lib/providers/azureOpenAI.js +0 -347
- package/dist/lib/providers/googleAIStudio.d.ts +0 -42
- package/dist/lib/providers/googleAIStudio.js +0 -364
- package/dist/lib/providers/googleVertexAI.d.ts +0 -34
- package/dist/lib/providers/googleVertexAI.js +0 -547
- package/dist/lib/providers/mistralAI.d.ts +0 -37
- package/dist/lib/providers/mistralAI.js +0 -325
- package/dist/providers/agent-enhanced-provider.d.ts +0 -93
- package/dist/providers/agent-enhanced-provider.js +0 -606
- package/dist/providers/amazonBedrock.d.ts +0 -28
- package/dist/providers/amazonBedrock.js +0 -364
- package/dist/providers/azureOpenAI.d.ts +0 -42
- package/dist/providers/azureOpenAI.js +0 -348
- package/dist/providers/googleAIStudio.d.ts +0 -42
- package/dist/providers/googleAIStudio.js +0 -364
- package/dist/providers/googleVertexAI.d.ts +0 -34
- package/dist/providers/googleVertexAI.js +0 -547
- package/dist/providers/mistralAI.d.ts +0 -37
- package/dist/providers/mistralAI.js +0 -325
|
@@ -7,7 +7,7 @@ import { streamText as aiStreamText, generateText as aiGenerate, Output, } from
|
|
|
7
7
|
import { getAvailableFunctionTools, executeFunctionCall, isFunctionCallingAvailable, } from "../mcp/function-calling.js";
|
|
8
8
|
import { createExecutionContext } from "../mcp/context-manager.js";
|
|
9
9
|
import { mcpLogger } from "../mcp/logging.js";
|
|
10
|
-
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
10
|
+
import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
|
|
11
11
|
/**
|
|
12
12
|
* Enhanced provider that enables real function calling with MCP tools
|
|
13
13
|
*/
|
|
@@ -16,11 +16,22 @@ export class FunctionCallingProvider {
|
|
|
16
16
|
enableFunctionCalling;
|
|
17
17
|
sessionId;
|
|
18
18
|
userId;
|
|
19
|
+
cachedToolsObject = null;
|
|
20
|
+
cachedToolMap = null;
|
|
21
|
+
cacheTimestamp = null;
|
|
22
|
+
cacheExpiryMs;
|
|
19
23
|
constructor(baseProvider, options = {}) {
|
|
20
24
|
this.baseProvider = baseProvider;
|
|
21
25
|
this.enableFunctionCalling = options.enableFunctionCalling ?? true;
|
|
22
26
|
this.sessionId = options.sessionId || `function-calling-${Date.now()}`;
|
|
23
27
|
this.userId = options.userId || "function-calling-user";
|
|
28
|
+
// Configurable cache expiry: default 5 minutes, with environment override, then constructor option
|
|
29
|
+
const defaultExpiryMs = 5 * 60 * 1000; // 5 minutes
|
|
30
|
+
const envExpiryMs = process.env.NEUROLINK_CACHE_EXPIRY_MS
|
|
31
|
+
? parseInt(process.env.NEUROLINK_CACHE_EXPIRY_MS, 10)
|
|
32
|
+
: undefined;
|
|
33
|
+
this.cacheExpiryMs =
|
|
34
|
+
options.cacheExpiryMs ?? envExpiryMs ?? defaultExpiryMs;
|
|
24
35
|
}
|
|
25
36
|
/**
|
|
26
37
|
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
@@ -91,8 +102,30 @@ export class FunctionCallingProvider {
|
|
|
91
102
|
}
|
|
92
103
|
return result;
|
|
93
104
|
}
|
|
94
|
-
// Get available function tools
|
|
95
|
-
|
|
105
|
+
// Get available function tools (with automatic cache invalidation)
|
|
106
|
+
let toolsObject, toolMap;
|
|
107
|
+
const now = Date.now();
|
|
108
|
+
const isCacheExpired = this.cacheTimestamp === null ||
|
|
109
|
+
now - this.cacheTimestamp > this.cacheExpiryMs;
|
|
110
|
+
if (this.cachedToolsObject && this.cachedToolMap && !isCacheExpired) {
|
|
111
|
+
toolsObject = this.cachedToolsObject;
|
|
112
|
+
toolMap = this.cachedToolMap;
|
|
113
|
+
mcpLogger.debug(`[${functionTag}] Using cached tools (${Math.round((now - this.cacheTimestamp) / 1000)}s old)`);
|
|
114
|
+
}
|
|
115
|
+
else {
|
|
116
|
+
if (isCacheExpired && this.cachedToolsObject) {
|
|
117
|
+
mcpLogger.debug(`[${functionTag}] Cache expired, refreshing tools`);
|
|
118
|
+
}
|
|
119
|
+
const result = await getAvailableFunctionTools();
|
|
120
|
+
toolsObject = result.toolsObject;
|
|
121
|
+
toolMap = result.toolMap;
|
|
122
|
+
// Cache the results for future use with timestamp
|
|
123
|
+
this.cachedToolsObject = toolsObject;
|
|
124
|
+
this.cachedToolMap = toolMap;
|
|
125
|
+
this.cacheTimestamp = now;
|
|
126
|
+
mcpLogger.debug(`[${functionTag}] Cached ${Object.keys(toolsObject).length} tools with expiry in ${this.cacheExpiryMs / 1000}s`);
|
|
127
|
+
}
|
|
128
|
+
const tools = Object.values(toolsObject);
|
|
96
129
|
if (tools.length === 0) {
|
|
97
130
|
mcpLogger.debug(`[${functionTag}] No tools available, using base provider`);
|
|
98
131
|
const result = await this.baseProvider.generate(options, analysisSchema);
|
|
@@ -113,8 +146,8 @@ export class FunctionCallingProvider {
|
|
|
113
146
|
aiProvider: this.baseProvider.constructor.name,
|
|
114
147
|
});
|
|
115
148
|
// Use the AI SDK's native function calling by calling generate directly
|
|
116
|
-
// We
|
|
117
|
-
const result = await this.
|
|
149
|
+
// We can now use the toolsObject directly instead of converting from array
|
|
150
|
+
const result = await this.generateWithToolsObject(options, toolsObject, toolMap, context, analysisSchema);
|
|
118
151
|
if (!result) {
|
|
119
152
|
return {
|
|
120
153
|
content: "No response generated",
|
|
@@ -150,14 +183,83 @@ export class FunctionCallingProvider {
|
|
|
150
183
|
}
|
|
151
184
|
}
|
|
152
185
|
/**
|
|
153
|
-
* Generate text using AI SDK's
|
|
186
|
+
* Generate text with tools using the AI SDK's generate function (with tools object)
|
|
187
|
+
*/
|
|
188
|
+
async generateWithToolsObject(options, toolsObject, toolMap, context, analysisSchema) {
|
|
189
|
+
const functionTag = "FunctionCallingProvider.generateWithToolsObject";
|
|
190
|
+
try {
|
|
191
|
+
// Use the toolsObject directly with proper execution wrapped
|
|
192
|
+
const toolsWithExecution = this.wrapToolsWithExecution(toolsObject, toolMap, context);
|
|
193
|
+
mcpLogger.debug(`[${functionTag}] Using tools object with ${Object.keys(toolsWithExecution).length} tools`);
|
|
194
|
+
// Get the model from base provider
|
|
195
|
+
const modelInfo = await this.getModelFromProvider();
|
|
196
|
+
if (!modelInfo) {
|
|
197
|
+
mcpLogger.warn(`[${functionTag}] Could not get model from provider, falling back to base provider`);
|
|
198
|
+
const result = await this.baseProvider.generate(options, analysisSchema);
|
|
199
|
+
if (!result) {
|
|
200
|
+
return {
|
|
201
|
+
content: "No response generated",
|
|
202
|
+
provider: "function-calling",
|
|
203
|
+
model: "unknown",
|
|
204
|
+
};
|
|
205
|
+
}
|
|
206
|
+
return result;
|
|
207
|
+
}
|
|
208
|
+
// Use AI SDK's generate directly with tools
|
|
209
|
+
const generateOptions = {
|
|
210
|
+
model: modelInfo.model,
|
|
211
|
+
prompt: options.prompt,
|
|
212
|
+
system: options.systemPrompt || "You are a helpful AI assistant.",
|
|
213
|
+
temperature: options.temperature || 0.7,
|
|
214
|
+
maxTokens: options.maxTokens ?? DEFAULT_MAX_TOKENS,
|
|
215
|
+
tools: toolsWithExecution,
|
|
216
|
+
toolChoice: "auto", // Let the AI decide when to use tools
|
|
217
|
+
maxSteps: options.maxSteps ?? DEFAULT_MAX_STEPS, // Enable multi-turn tool execution
|
|
218
|
+
};
|
|
219
|
+
// Add experimental_output if schema is provided
|
|
220
|
+
if (analysisSchema) {
|
|
221
|
+
generateOptions.experimental_output = Output.object({
|
|
222
|
+
schema: analysisSchema,
|
|
223
|
+
});
|
|
224
|
+
}
|
|
225
|
+
const result = await aiGenerate(generateOptions);
|
|
226
|
+
mcpLogger.debug(`[${functionTag}] AI SDK generate completed`, {
|
|
227
|
+
toolCalls: result.toolCalls?.length || 0,
|
|
228
|
+
finishReason: result.finishReason,
|
|
229
|
+
usage: result.usage,
|
|
230
|
+
});
|
|
231
|
+
return {
|
|
232
|
+
content: result.text,
|
|
233
|
+
provider: "function-calling",
|
|
234
|
+
model: "unknown",
|
|
235
|
+
usage: result.usage
|
|
236
|
+
? {
|
|
237
|
+
inputTokens: result.usage.promptTokens,
|
|
238
|
+
outputTokens: result.usage.completionTokens,
|
|
239
|
+
totalTokens: result.usage.totalTokens,
|
|
240
|
+
}
|
|
241
|
+
: undefined,
|
|
242
|
+
responseTime: 0,
|
|
243
|
+
toolsUsed: result.toolCalls?.map((tc) => tc.toolName) || [],
|
|
244
|
+
toolExecutions: [],
|
|
245
|
+
enhancedWithTools: (result.toolCalls?.length || 0) > 0,
|
|
246
|
+
availableTools: [],
|
|
247
|
+
};
|
|
248
|
+
}
|
|
249
|
+
catch (error) {
|
|
250
|
+
mcpLogger.error(`[${functionTag}] Failed to generate text with tools:`, error);
|
|
251
|
+
throw error;
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
/**
|
|
255
|
+
* Generate text using AI SDK's native function calling (legacy array-based)
|
|
154
256
|
*/
|
|
155
257
|
async generateWithTools(options, tools, toolMap, context, analysisSchema) {
|
|
156
258
|
const functionTag = "FunctionCallingProvider.generateWithTools";
|
|
157
259
|
try {
|
|
158
260
|
// Convert our tools to AI SDK format with proper execution
|
|
159
261
|
const toolsWithExecution = this.convertToAISDKTools(tools, toolMap, context);
|
|
160
|
-
mcpLogger.debug(`[${functionTag}] Calling AI SDK generate with ${Object.keys(toolsWithExecution).length} tools and maxSteps:
|
|
262
|
+
mcpLogger.debug(`[${functionTag}] Calling AI SDK generate with ${Object.keys(toolsWithExecution).length} tools and maxSteps: ${options.maxSteps ?? DEFAULT_MAX_STEPS}`);
|
|
161
263
|
mcpLogger.debug(`[${functionTag}] Sanitized tool names:`, Object.keys(toolsWithExecution));
|
|
162
264
|
// Log the first few tools to debug the issue
|
|
163
265
|
const toolNames = Object.keys(toolsWithExecution);
|
|
@@ -187,7 +289,7 @@ export class FunctionCallingProvider {
|
|
|
187
289
|
maxTokens: options.maxTokens ?? DEFAULT_MAX_TOKENS,
|
|
188
290
|
tools: toolsWithExecution,
|
|
189
291
|
toolChoice: "auto", // Let the AI decide when to use tools
|
|
190
|
-
maxSteps:
|
|
292
|
+
maxSteps: options.maxSteps ?? DEFAULT_MAX_STEPS, // Enable multi-turn tool execution
|
|
191
293
|
};
|
|
192
294
|
// Add experimental_output if schema is provided
|
|
193
295
|
if (analysisSchema) {
|
|
@@ -270,7 +372,52 @@ export class FunctionCallingProvider {
|
|
|
270
372
|
return sanitized;
|
|
271
373
|
}
|
|
272
374
|
/**
|
|
273
|
-
*
|
|
375
|
+
* Wrap tools with proper execution context (for object-based tools)
|
|
376
|
+
*/
|
|
377
|
+
wrapToolsWithExecution(toolsObject, toolMap, context) {
|
|
378
|
+
const functionTag = "FunctionCallingProvider.wrapToolsWithExecution";
|
|
379
|
+
const wrappedTools = {};
|
|
380
|
+
for (const [toolName, tool] of Object.entries(toolsObject)) {
|
|
381
|
+
const toolInfo = toolMap.get(toolName);
|
|
382
|
+
const originalToolName = toolInfo ? toolInfo.toolName : toolName;
|
|
383
|
+
// Create a version with actual MCP execution
|
|
384
|
+
wrappedTools[toolName] = {
|
|
385
|
+
description: tool.description,
|
|
386
|
+
parameters: tool.parameters,
|
|
387
|
+
execute: async (args) => {
|
|
388
|
+
// Debug logging only in debug mode
|
|
389
|
+
if (process.env.NEUROLINK_DEBUG === "true") {
|
|
390
|
+
const providerName = this.baseProvider.constructor.name;
|
|
391
|
+
mcpLogger.debug(`Tool execution - Provider: ${providerName}`);
|
|
392
|
+
mcpLogger.debug(`Tool: ${toolName} (original: ${originalToolName})`);
|
|
393
|
+
mcpLogger.debug(`Args:`, args);
|
|
394
|
+
}
|
|
395
|
+
try {
|
|
396
|
+
// Execute the actual MCP tool
|
|
397
|
+
const result = await executeFunctionCall(toolName, args);
|
|
398
|
+
if (process.env.NEUROLINK_DEBUG === "true") {
|
|
399
|
+
mcpLogger.debug(`Tool result:`, result);
|
|
400
|
+
}
|
|
401
|
+
if (result.success) {
|
|
402
|
+
return result.data || { success: true };
|
|
403
|
+
}
|
|
404
|
+
else {
|
|
405
|
+
return { error: result.error || "Tool execution failed" };
|
|
406
|
+
}
|
|
407
|
+
}
|
|
408
|
+
catch (error) {
|
|
409
|
+
mcpLogger.error(`[${functionTag}] Tool execution error: ${toolName}`, error);
|
|
410
|
+
return {
|
|
411
|
+
error: error instanceof Error ? error.message : String(error),
|
|
412
|
+
};
|
|
413
|
+
}
|
|
414
|
+
},
|
|
415
|
+
};
|
|
416
|
+
}
|
|
417
|
+
return wrappedTools;
|
|
418
|
+
}
|
|
419
|
+
/**
|
|
420
|
+
* Convert our tools to AI SDK format with proper execution (legacy array-based)
|
|
274
421
|
*/
|
|
275
422
|
convertToAISDKTools(tools, toolMap, context) {
|
|
276
423
|
const functionTag = "FunctionCallingProvider.convertToAISDKTools";
|
|
@@ -394,6 +541,57 @@ These functions provide accurate, real-time data. Use them actively to enhance y
|
|
|
394
541
|
/**
|
|
395
542
|
* Alias for generate() - CLI-SDK consistency
|
|
396
543
|
*/
|
|
544
|
+
/**
|
|
545
|
+
* Clear cached tools - Cache Invalidation Strategy
|
|
546
|
+
*
|
|
547
|
+
* WHEN TO CALL clearToolsCache():
|
|
548
|
+
*
|
|
549
|
+
* 1. **MCP Server Changes**: When MCP servers are added, removed, or restarted
|
|
550
|
+
* - After calling unifiedRegistry.addServer() or removeServer()
|
|
551
|
+
* - When MCP server configurations change
|
|
552
|
+
* - After MCP server restart or reconnection
|
|
553
|
+
*
|
|
554
|
+
* 2. **Tool Registration Changes**: When custom tools are modified
|
|
555
|
+
* - After registering new SDK tools via registerTool()
|
|
556
|
+
* - When tool implementations change
|
|
557
|
+
* - After unregistering tools
|
|
558
|
+
*
|
|
559
|
+
* 3. **Provider Reinitialization**: When the provider context changes
|
|
560
|
+
* - Before switching between different AI providers
|
|
561
|
+
* - When session context changes significantly
|
|
562
|
+
* - After provider authentication refresh
|
|
563
|
+
*
|
|
564
|
+
* 4. **Error Recovery**: When tool execution encounters systematic failures
|
|
565
|
+
* - After MCP connection errors are resolved
|
|
566
|
+
* - When tool discovery needs to be re-run
|
|
567
|
+
* - During error recovery workflows
|
|
568
|
+
*
|
|
569
|
+
* 5. **Development/Testing**: During development and testing cycles
|
|
570
|
+
* - Between test cases that modify tool availability
|
|
571
|
+
* - When testing different tool configurations
|
|
572
|
+
* - During hot reloading scenarios
|
|
573
|
+
*
|
|
574
|
+
* CACHE LIFECYCLE:
|
|
575
|
+
* - Cache is populated on first generate() call via getAvailableFunctionTools()
|
|
576
|
+
* - Cache persists across multiple generate() calls for performance
|
|
577
|
+
* - Cache is invalidated by calling this method
|
|
578
|
+
* - Next generate() call will rebuild cache from current tool state
|
|
579
|
+
*
|
|
580
|
+
* PERFORMANCE IMPACT:
|
|
581
|
+
* - Clearing cache forces tool discovery on next usage (~100-500ms overhead)
|
|
582
|
+
* - Recommended to clear cache proactively rather than reactively
|
|
583
|
+
* - Consider batching tool changes before clearing cache
|
|
584
|
+
*
|
|
585
|
+
* THREAD SAFETY:
|
|
586
|
+
* - This method is not thread-safe
|
|
587
|
+
* - Avoid calling during active generate() operations
|
|
588
|
+
* - Safe to call between separate AI generation requests
|
|
589
|
+
*/
|
|
590
|
+
clearToolsCache() {
|
|
591
|
+
this.cachedToolsObject = null;
|
|
592
|
+
this.cachedToolMap = null;
|
|
593
|
+
this.cacheTimestamp = null;
|
|
594
|
+
}
|
|
397
595
|
/**
|
|
398
596
|
* Short alias for generate() - CLI-SDK consistency
|
|
399
597
|
*/
|
|
@@ -422,6 +620,7 @@ export function createMCPAwareProviderV3(baseProvider, options = {}) {
|
|
|
422
620
|
enableFunctionCalling: options.enableFunctionCalling,
|
|
423
621
|
sessionId: options.sessionId,
|
|
424
622
|
userId: options.userId,
|
|
623
|
+
cacheExpiryMs: options.cacheExpiryMs,
|
|
425
624
|
});
|
|
426
625
|
mcpLogger.debug(`[${functionTag}] Created MCP-aware provider with function calling`, {
|
|
427
626
|
providerName: options.providerName,
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import type { ZodType, ZodTypeDef } from "zod";
|
|
2
|
+
import { type Schema, type LanguageModelV1 } from "ai";
|
|
3
|
+
import type { AIProviderName } from "../core/types.js";
|
|
4
|
+
import type { StreamOptions, StreamResult } from "../types/stream-types.js";
|
|
5
|
+
import { BaseProvider } from "../core/base-provider.js";
|
|
6
|
+
/**
|
|
7
|
+
* Google AI Studio provider implementation using BaseProvider
|
|
8
|
+
* Migrated from original GoogleAIStudio class to new factory pattern
|
|
9
|
+
*/
|
|
10
|
+
export declare class GoogleAIStudioProvider extends BaseProvider {
|
|
11
|
+
constructor(modelName?: string, sdk?: any);
|
|
12
|
+
protected getProviderName(): AIProviderName;
|
|
13
|
+
protected getDefaultModel(): string;
|
|
14
|
+
/**
|
|
15
|
+
* š§ PHASE 2: Return AI SDK model instance for tool calling
|
|
16
|
+
*/
|
|
17
|
+
protected getAISDKModel(): LanguageModelV1;
|
|
18
|
+
protected handleProviderError(error: any): Error;
|
|
19
|
+
protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
|
|
20
|
+
private getApiKey;
|
|
21
|
+
private validateStreamOptions;
|
|
22
|
+
}
|
|
23
|
+
export default GoogleAIStudioProvider;
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
import { createGoogleGenerativeAI } from "@ai-sdk/google";
|
|
2
|
+
import { streamText, Output } from "ai";
|
|
3
|
+
import { GoogleAIModels } from "../core/types.js";
|
|
4
|
+
import { BaseProvider } from "../core/base-provider.js";
|
|
5
|
+
import { logger } from "../utils/logger.js";
|
|
6
|
+
import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
|
|
7
|
+
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
8
|
+
import { createProxyFetch } from "../proxy/proxy-fetch.js";
|
|
9
|
+
// Environment variable setup
|
|
10
|
+
if (!process.env.GOOGLE_GENERATIVE_AI_API_KEY &&
|
|
11
|
+
process.env.GOOGLE_AI_API_KEY) {
|
|
12
|
+
process.env.GOOGLE_GENERATIVE_AI_API_KEY = process.env.GOOGLE_AI_API_KEY;
|
|
13
|
+
}
|
|
14
|
+
/**
|
|
15
|
+
* Google AI Studio provider implementation using BaseProvider
|
|
16
|
+
* Migrated from original GoogleAIStudio class to new factory pattern
|
|
17
|
+
*/
|
|
18
|
+
export class GoogleAIStudioProvider extends BaseProvider {
|
|
19
|
+
constructor(modelName, sdk) {
|
|
20
|
+
super(modelName, "google-ai", sdk);
|
|
21
|
+
logger.debug("GoogleAIStudioProvider initialized", {
|
|
22
|
+
model: this.modelName,
|
|
23
|
+
provider: this.providerName,
|
|
24
|
+
sdkProvided: !!sdk,
|
|
25
|
+
});
|
|
26
|
+
}
|
|
27
|
+
// ===================
|
|
28
|
+
// ABSTRACT METHOD IMPLEMENTATIONS
|
|
29
|
+
// ===================
|
|
30
|
+
getProviderName() {
|
|
31
|
+
return "google-ai";
|
|
32
|
+
}
|
|
33
|
+
getDefaultModel() {
|
|
34
|
+
return process.env.GOOGLE_AI_MODEL || GoogleAIModels.GEMINI_2_5_FLASH;
|
|
35
|
+
}
|
|
36
|
+
/**
|
|
37
|
+
* š§ PHASE 2: Return AI SDK model instance for tool calling
|
|
38
|
+
*/
|
|
39
|
+
getAISDKModel() {
|
|
40
|
+
const apiKey = this.getApiKey();
|
|
41
|
+
const google = createGoogleGenerativeAI({ apiKey });
|
|
42
|
+
return google(this.modelName);
|
|
43
|
+
}
|
|
44
|
+
handleProviderError(error) {
|
|
45
|
+
if (error instanceof TimeoutError) {
|
|
46
|
+
return new Error(`Google AI request timed out: ${error.message}`);
|
|
47
|
+
}
|
|
48
|
+
if (error?.message?.includes("API_KEY_INVALID")) {
|
|
49
|
+
return new Error("Invalid Google AI API key. Please check your GOOGLE_AI_API_KEY environment variable.");
|
|
50
|
+
}
|
|
51
|
+
if (error?.message?.includes("RATE_LIMIT_EXCEEDED")) {
|
|
52
|
+
return new Error("Google AI rate limit exceeded. Please try again later.");
|
|
53
|
+
}
|
|
54
|
+
return new Error(`Google AI error: ${error?.message || "Unknown error"}`);
|
|
55
|
+
}
|
|
56
|
+
// executeGenerate removed - BaseProvider handles all generation with tools
|
|
57
|
+
async executeStream(options, analysisSchema) {
|
|
58
|
+
this.validateStreamOptions(options);
|
|
59
|
+
const apiKey = this.getApiKey();
|
|
60
|
+
const google = createGoogleGenerativeAI({ apiKey });
|
|
61
|
+
const model = google(this.modelName);
|
|
62
|
+
const timeout = this.getTimeout(options);
|
|
63
|
+
const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
|
|
64
|
+
try {
|
|
65
|
+
const result = await streamText({
|
|
66
|
+
model,
|
|
67
|
+
prompt: options.input.text,
|
|
68
|
+
system: options.systemPrompt,
|
|
69
|
+
temperature: options.temperature,
|
|
70
|
+
maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
|
|
71
|
+
tools: options.tools,
|
|
72
|
+
toolChoice: "auto",
|
|
73
|
+
abortSignal: timeoutController?.controller.signal,
|
|
74
|
+
});
|
|
75
|
+
timeoutController?.cleanup();
|
|
76
|
+
// Transform string stream to content object stream
|
|
77
|
+
const transformedStream = async function* () {
|
|
78
|
+
for await (const chunk of result.textStream) {
|
|
79
|
+
yield { content: chunk };
|
|
80
|
+
}
|
|
81
|
+
};
|
|
82
|
+
return {
|
|
83
|
+
stream: transformedStream(),
|
|
84
|
+
provider: this.providerName,
|
|
85
|
+
model: this.modelName,
|
|
86
|
+
};
|
|
87
|
+
}
|
|
88
|
+
catch (error) {
|
|
89
|
+
throw this.handleProviderError(error);
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
// ===================
|
|
93
|
+
// HELPER METHODS
|
|
94
|
+
// ===================
|
|
95
|
+
getApiKey() {
|
|
96
|
+
const apiKey = process.env.GOOGLE_AI_API_KEY || process.env.GOOGLE_GENERATIVE_AI_API_KEY;
|
|
97
|
+
if (!apiKey) {
|
|
98
|
+
throw new Error("GOOGLE_AI_API_KEY or GOOGLE_GENERATIVE_AI_API_KEY environment variable is not set");
|
|
99
|
+
}
|
|
100
|
+
return apiKey;
|
|
101
|
+
}
|
|
102
|
+
validateStreamOptions(options) {
|
|
103
|
+
if (!options.input?.text || options.input.text.trim().length === 0) {
|
|
104
|
+
throw new Error("Input text is required and cannot be empty");
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
export default GoogleAIStudioProvider;
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import type { ZodType, ZodTypeDef } from "zod";
|
|
2
|
+
import { type Schema, type LanguageModelV1 } from "ai";
|
|
3
|
+
import type { AIProviderName } from "../core/types.js";
|
|
4
|
+
import type { StreamOptions, StreamResult } from "../types/stream-types.js";
|
|
5
|
+
import { BaseProvider } from "../core/base-provider.js";
|
|
6
|
+
/**
|
|
7
|
+
* Google Vertex AI Provider v2 - BaseProvider Implementation
|
|
8
|
+
*
|
|
9
|
+
* PHASE 3.5: Simple BaseProvider wrap around existing @ai-sdk/google-vertex implementation
|
|
10
|
+
*
|
|
11
|
+
* Features:
|
|
12
|
+
* - Extends BaseProvider for shared functionality
|
|
13
|
+
* - Preserves existing Google Cloud authentication
|
|
14
|
+
* - Maintains Anthropic model support via dynamic imports
|
|
15
|
+
* - Uses pre-initialized Vertex instance for efficiency
|
|
16
|
+
* - Enhanced error handling with setup guidance
|
|
17
|
+
*/
|
|
18
|
+
export declare class GoogleVertexProvider extends BaseProvider {
|
|
19
|
+
private vertex;
|
|
20
|
+
private model;
|
|
21
|
+
private projectId;
|
|
22
|
+
private location;
|
|
23
|
+
private cachedAnthropicModel;
|
|
24
|
+
constructor(modelName?: string);
|
|
25
|
+
protected getProviderName(): AIProviderName;
|
|
26
|
+
protected getDefaultModel(): string;
|
|
27
|
+
/**
|
|
28
|
+
* Returns the Vercel AI SDK model instance for Google Vertex
|
|
29
|
+
* Handles both Google and Anthropic models
|
|
30
|
+
*/
|
|
31
|
+
protected getAISDKModel(): Promise<LanguageModelV1>;
|
|
32
|
+
protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
|
|
33
|
+
protected handleProviderError(error: any): Error;
|
|
34
|
+
private validateStreamOptions;
|
|
35
|
+
/**
|
|
36
|
+
* Check if Anthropic models are available
|
|
37
|
+
* @returns Promise<boolean> indicating if Anthropic support is available
|
|
38
|
+
*/
|
|
39
|
+
hasAnthropicSupport(): Promise<boolean>;
|
|
40
|
+
/**
|
|
41
|
+
* Create an Anthropic model instance if available
|
|
42
|
+
* @param modelName Anthropic model name (e.g., 'claude-3-sonnet@20240229')
|
|
43
|
+
* @returns LanguageModelV1 instance or null if not available
|
|
44
|
+
*/
|
|
45
|
+
createAnthropicModel(modelName: string): Promise<LanguageModelV1 | null>;
|
|
46
|
+
}
|
|
47
|
+
export default GoogleVertexProvider;
|
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
import { createVertex, } from "@ai-sdk/google-vertex";
|
|
2
|
+
import { streamText, Output } from "ai";
|
|
3
|
+
import { BaseProvider } from "../core/base-provider.js";
|
|
4
|
+
import { logger } from "../utils/logger.js";
|
|
5
|
+
import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
|
|
6
|
+
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
7
|
+
// Cache for anthropic module to avoid repeated imports
|
|
8
|
+
let _createVertexAnthropic = null;
|
|
9
|
+
let _anthropicImportAttempted = false;
|
|
10
|
+
// Function to dynamically import anthropic support
|
|
11
|
+
async function getCreateVertexAnthropic() {
|
|
12
|
+
if (_anthropicImportAttempted) {
|
|
13
|
+
return _createVertexAnthropic;
|
|
14
|
+
}
|
|
15
|
+
_anthropicImportAttempted = true;
|
|
16
|
+
try {
|
|
17
|
+
// Try to import the anthropic module - available in @ai-sdk/google-vertex ^2.2.0+
|
|
18
|
+
const anthropicModule = await import("@ai-sdk/google-vertex/anthropic");
|
|
19
|
+
_createVertexAnthropic = anthropicModule.createVertexAnthropic;
|
|
20
|
+
logger.debug("[GoogleVertexAI] Anthropic module successfully loaded");
|
|
21
|
+
return _createVertexAnthropic;
|
|
22
|
+
}
|
|
23
|
+
catch (error) {
|
|
24
|
+
// Anthropic module not available
|
|
25
|
+
logger.warn("[GoogleVertexAI] Anthropic module not available. Install @ai-sdk/google-vertex ^2.2.0 for Anthropic model support.");
|
|
26
|
+
return null;
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
// Configuration helpers
|
|
30
|
+
const getVertexProjectId = () => {
|
|
31
|
+
const projectId = process.env.GOOGLE_CLOUD_PROJECT_ID ||
|
|
32
|
+
process.env.VERTEX_PROJECT_ID ||
|
|
33
|
+
process.env.GOOGLE_VERTEX_PROJECT;
|
|
34
|
+
if (!projectId) {
|
|
35
|
+
throw new Error(`ā Google Vertex AI Provider Configuration Error\n\nMissing required environment variables: GOOGLE_CLOUD_PROJECT_ID or VERTEX_PROJECT_ID\n\nš§ Step 1: Get Google Cloud Credentials\n1. Visit: https://console.cloud.google.com/\n2. Create or select a project\n3. Enable Vertex AI API\n4. Set up authentication\n\nš§ Step 2: Set Environment Variables\nAdd to your .env file:\nGOOGLE_CLOUD_PROJECT_ID=your_project_id_here\nGOOGLE_APPLICATION_CREDENTIALS=/path/to/service-account.json\n\nš§ Step 3: Restart Application\nRestart your application to load the new environment variables.`);
|
|
36
|
+
}
|
|
37
|
+
return projectId;
|
|
38
|
+
};
|
|
39
|
+
const getVertexLocation = () => {
|
|
40
|
+
return (process.env.GOOGLE_CLOUD_LOCATION ||
|
|
41
|
+
process.env.VERTEX_LOCATION ||
|
|
42
|
+
process.env.GOOGLE_VERTEX_LOCATION ||
|
|
43
|
+
"us-central1");
|
|
44
|
+
};
|
|
45
|
+
const getDefaultVertexModel = () => {
|
|
46
|
+
return process.env.VERTEX_MODEL || "gemini-1.5-pro";
|
|
47
|
+
};
|
|
48
|
+
const hasGoogleCredentials = () => {
|
|
49
|
+
return !!(process.env.GOOGLE_APPLICATION_CREDENTIALS ||
|
|
50
|
+
process.env.GOOGLE_SERVICE_ACCOUNT_KEY ||
|
|
51
|
+
(process.env.GOOGLE_AUTH_CLIENT_EMAIL &&
|
|
52
|
+
process.env.GOOGLE_AUTH_PRIVATE_KEY));
|
|
53
|
+
};
|
|
54
|
+
/**
|
|
55
|
+
* Google Vertex AI Provider v2 - BaseProvider Implementation
|
|
56
|
+
*
|
|
57
|
+
* PHASE 3.5: Simple BaseProvider wrap around existing @ai-sdk/google-vertex implementation
|
|
58
|
+
*
|
|
59
|
+
* Features:
|
|
60
|
+
* - Extends BaseProvider for shared functionality
|
|
61
|
+
* - Preserves existing Google Cloud authentication
|
|
62
|
+
* - Maintains Anthropic model support via dynamic imports
|
|
63
|
+
* - Uses pre-initialized Vertex instance for efficiency
|
|
64
|
+
* - Enhanced error handling with setup guidance
|
|
65
|
+
*/
|
|
66
|
+
export class GoogleVertexProvider extends BaseProvider {
|
|
67
|
+
vertex;
|
|
68
|
+
model;
|
|
69
|
+
projectId;
|
|
70
|
+
location;
|
|
71
|
+
cachedAnthropicModel = null;
|
|
72
|
+
constructor(modelName) {
|
|
73
|
+
super(modelName, "vertex");
|
|
74
|
+
// Validate Google Cloud credentials
|
|
75
|
+
if (!hasGoogleCredentials()) {
|
|
76
|
+
throw new Error(`ā Google Vertex AI Provider Configuration Error\n\nMissing Google Cloud authentication. One of the following is required:\n\nš§ Option 1: Service Account Key File\nGOOGLE_APPLICATION_CREDENTIALS=/path/to/service-account.json\n\nš§ Option 2: Service Account Key (Base64)\nGOOGLE_SERVICE_ACCOUNT_KEY=base64_encoded_key\n\nš§ Option 3: Individual Credentials\nGOOGLE_AUTH_CLIENT_EMAIL=your-service-account@project.iam.gserviceaccount.com\nGOOGLE_AUTH_PRIVATE_KEY=-----BEGIN PRIVATE KEY-----...\n\nš§ Step 4: Restart Application\nRestart your application to load the new environment variables.`);
|
|
77
|
+
}
|
|
78
|
+
// Initialize Google Cloud configuration
|
|
79
|
+
this.projectId = getVertexProjectId();
|
|
80
|
+
this.location = getVertexLocation();
|
|
81
|
+
const vertexConfig = {
|
|
82
|
+
project: this.projectId,
|
|
83
|
+
location: this.location,
|
|
84
|
+
};
|
|
85
|
+
// Create Vertex provider instance
|
|
86
|
+
this.vertex = createVertex(vertexConfig);
|
|
87
|
+
// Pre-initialize model for efficiency
|
|
88
|
+
this.model = this.vertex(this.modelName || getDefaultVertexModel());
|
|
89
|
+
logger.debug("Google Vertex AI BaseProvider v2 initialized", {
|
|
90
|
+
modelName: this.modelName,
|
|
91
|
+
projectId: this.projectId,
|
|
92
|
+
location: this.location,
|
|
93
|
+
provider: this.providerName,
|
|
94
|
+
});
|
|
95
|
+
}
|
|
96
|
+
getProviderName() {
|
|
97
|
+
return "vertex";
|
|
98
|
+
}
|
|
99
|
+
getDefaultModel() {
|
|
100
|
+
return getDefaultVertexModel();
|
|
101
|
+
}
|
|
102
|
+
/**
|
|
103
|
+
* Returns the Vercel AI SDK model instance for Google Vertex
|
|
104
|
+
* Handles both Google and Anthropic models
|
|
105
|
+
*/
|
|
106
|
+
async getAISDKModel() {
|
|
107
|
+
// Check if this is an Anthropic model
|
|
108
|
+
if (this.modelName && this.modelName.includes("claude")) {
|
|
109
|
+
// Return cached Anthropic model if available
|
|
110
|
+
if (this.cachedAnthropicModel) {
|
|
111
|
+
return this.cachedAnthropicModel;
|
|
112
|
+
}
|
|
113
|
+
// Create and cache new Anthropic model
|
|
114
|
+
const anthropicModel = await this.createAnthropicModel(this.modelName);
|
|
115
|
+
if (anthropicModel) {
|
|
116
|
+
this.cachedAnthropicModel = anthropicModel;
|
|
117
|
+
return anthropicModel;
|
|
118
|
+
}
|
|
119
|
+
// Fall back to regular model if Anthropic not available
|
|
120
|
+
logger.warn(`Anthropic model ${this.modelName} requested but not available, falling back to Google model`);
|
|
121
|
+
}
|
|
122
|
+
return this.model;
|
|
123
|
+
}
|
|
124
|
+
// executeGenerate removed - BaseProvider handles all generation with tools
|
|
125
|
+
async executeStream(options, analysisSchema) {
|
|
126
|
+
try {
|
|
127
|
+
this.validateStreamOptions(options);
|
|
128
|
+
const result = await streamText({
|
|
129
|
+
model: this.model,
|
|
130
|
+
prompt: options.input.text,
|
|
131
|
+
system: options.systemPrompt,
|
|
132
|
+
maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
|
|
133
|
+
temperature: options.temperature,
|
|
134
|
+
});
|
|
135
|
+
return {
|
|
136
|
+
stream: (async function* () {
|
|
137
|
+
for await (const chunk of result.textStream) {
|
|
138
|
+
yield { content: chunk };
|
|
139
|
+
}
|
|
140
|
+
})(),
|
|
141
|
+
provider: this.providerName,
|
|
142
|
+
model: this.modelName,
|
|
143
|
+
};
|
|
144
|
+
}
|
|
145
|
+
catch (error) {
|
|
146
|
+
throw this.handleProviderError(error);
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
handleProviderError(error) {
|
|
150
|
+
if (error.name === "TimeoutError") {
|
|
151
|
+
return new TimeoutError(`Google Vertex AI request timed out. Consider increasing timeout or using a lighter model.`, this.defaultTimeout);
|
|
152
|
+
}
|
|
153
|
+
if (error.message?.includes("PERMISSION_DENIED")) {
|
|
154
|
+
return new Error(`ā Google Vertex AI Permission Denied\n\nYour Google Cloud credentials don't have permission to access Vertex AI.\n\nš§ Required Steps:\n1. Ensure your service account has Vertex AI User role\n2. Check if Vertex AI API is enabled in your project\n3. Verify your project ID is correct\n4. Confirm your location/region has Vertex AI available`);
|
|
155
|
+
}
|
|
156
|
+
if (error.message?.includes("NOT_FOUND")) {
|
|
157
|
+
return new Error(`ā Google Vertex AI Model Not Found\n\n${error.message}\n\nš§ Check:\n1. Model name is correct (e.g., 'gemini-1.5-pro')\n2. Model is available in your region (${this.location})\n3. Your project has access to the model\n4. Model supports your request parameters`);
|
|
158
|
+
}
|
|
159
|
+
if (error.message?.includes("QUOTA_EXCEEDED")) {
|
|
160
|
+
return new Error(`ā Google Vertex AI Quota Exceeded\n\n${error.message}\n\nš§ Solutions:\n1. Check your Vertex AI quotas in Google Cloud Console\n2. Request quota increase if needed\n3. Try a different model or reduce request frequency\n4. Consider using a different region`);
|
|
161
|
+
}
|
|
162
|
+
if (error.message?.includes("INVALID_ARGUMENT")) {
|
|
163
|
+
return new Error(`ā Google Vertex AI Invalid Request\n\n${error.message}\n\nš§ Check:\n1. Request parameters are within model limits\n2. Input text is properly formatted\n3. Temperature and other settings are valid\n4. Model supports your request type`);
|
|
164
|
+
}
|
|
165
|
+
return new Error(`ā Google Vertex AI Provider Error\n\n${error.message || "Unknown error occurred"}\n\nš§ Troubleshooting:\n1. Check Google Cloud credentials and permissions\n2. Verify project ID and location settings\n3. Ensure Vertex AI API is enabled\n4. Check network connectivity`);
|
|
166
|
+
}
|
|
167
|
+
validateStreamOptions(options) {
|
|
168
|
+
if (!options.input?.text?.trim()) {
|
|
169
|
+
throw new Error("Prompt is required for streaming");
|
|
170
|
+
}
|
|
171
|
+
if (options.maxTokens &&
|
|
172
|
+
(options.maxTokens < 1 || options.maxTokens > 8192)) {
|
|
173
|
+
throw new Error("maxTokens must be between 1 and 8192 for Google Vertex AI");
|
|
174
|
+
}
|
|
175
|
+
if (options.temperature &&
|
|
176
|
+
(options.temperature < 0 || options.temperature > 2)) {
|
|
177
|
+
throw new Error("temperature must be between 0 and 2");
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
/**
|
|
181
|
+
* Check if Anthropic models are available
|
|
182
|
+
* @returns Promise<boolean> indicating if Anthropic support is available
|
|
183
|
+
*/
|
|
184
|
+
async hasAnthropicSupport() {
|
|
185
|
+
const createVertexAnthropic = await getCreateVertexAnthropic();
|
|
186
|
+
return createVertexAnthropic !== null;
|
|
187
|
+
}
|
|
188
|
+
/**
|
|
189
|
+
* Create an Anthropic model instance if available
|
|
190
|
+
* @param modelName Anthropic model name (e.g., 'claude-3-sonnet@20240229')
|
|
191
|
+
* @returns LanguageModelV1 instance or null if not available
|
|
192
|
+
*/
|
|
193
|
+
async createAnthropicModel(modelName) {
|
|
194
|
+
const createVertexAnthropic = await getCreateVertexAnthropic();
|
|
195
|
+
if (!createVertexAnthropic) {
|
|
196
|
+
return null;
|
|
197
|
+
}
|
|
198
|
+
const vertexAnthropic = createVertexAnthropic({
|
|
199
|
+
project: this.projectId,
|
|
200
|
+
location: this.location,
|
|
201
|
+
});
|
|
202
|
+
return vertexAnthropic(modelName);
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
export default GoogleVertexProvider;
|