@juspay/neurolink 5.0.0 ā 5.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +20 -7
- package/README.md +160 -172
- package/dist/agent/direct-tools.d.ts +6 -6
- package/dist/chat/sse-handler.js +5 -4
- package/dist/chat/websocket-chat-handler.js +9 -9
- package/dist/cli/commands/config.d.ts +3 -3
- package/dist/cli/commands/mcp.js +9 -8
- package/dist/cli/commands/ollama.js +3 -3
- package/dist/cli/factories/command-factory.d.ts +18 -0
- package/dist/cli/factories/command-factory.js +183 -0
- package/dist/cli/index.js +105 -157
- package/dist/cli/utils/interactive-setup.js +2 -2
- package/dist/core/base-provider.d.ts +423 -0
- package/dist/core/base-provider.js +365 -0
- package/dist/core/constants.d.ts +1 -1
- package/dist/core/constants.js +1 -1
- package/dist/core/dynamic-models.d.ts +6 -6
- package/dist/core/evaluation.d.ts +19 -80
- package/dist/core/evaluation.js +185 -484
- package/dist/core/factory.d.ts +3 -3
- package/dist/core/factory.js +31 -91
- package/dist/core/service-registry.d.ts +47 -0
- package/dist/core/service-registry.js +112 -0
- package/dist/core/types.d.ts +49 -49
- package/dist/core/types.js +1 -0
- package/dist/factories/compatibility-factory.d.ts +20 -0
- package/dist/factories/compatibility-factory.js +69 -0
- package/dist/factories/provider-factory.d.ts +72 -0
- package/dist/factories/provider-factory.js +144 -0
- package/dist/factories/provider-generate-factory.d.ts +20 -0
- package/dist/factories/provider-generate-factory.js +87 -0
- package/dist/factories/provider-registry.d.ts +38 -0
- package/dist/factories/provider-registry.js +107 -0
- package/dist/index.d.ts +8 -5
- package/dist/index.js +5 -5
- package/dist/lib/agent/direct-tools.d.ts +6 -6
- package/dist/lib/chat/sse-handler.js +5 -4
- package/dist/lib/chat/websocket-chat-handler.js +9 -9
- package/dist/lib/core/base-provider.d.ts +423 -0
- package/dist/lib/core/base-provider.js +365 -0
- package/dist/lib/core/constants.d.ts +1 -1
- package/dist/lib/core/constants.js +1 -1
- package/dist/lib/core/dynamic-models.d.ts +6 -6
- package/dist/lib/core/evaluation.d.ts +19 -80
- package/dist/lib/core/evaluation.js +185 -484
- package/dist/lib/core/factory.d.ts +3 -3
- package/dist/lib/core/factory.js +30 -91
- package/dist/lib/core/service-registry.d.ts +47 -0
- package/dist/lib/core/service-registry.js +112 -0
- package/dist/lib/core/types.d.ts +49 -49
- package/dist/lib/core/types.js +1 -0
- package/dist/lib/factories/compatibility-factory.d.ts +20 -0
- package/dist/lib/factories/compatibility-factory.js +69 -0
- package/dist/lib/factories/provider-factory.d.ts +72 -0
- package/dist/lib/factories/provider-factory.js +144 -0
- package/dist/lib/factories/provider-generate-factory.d.ts +20 -0
- package/dist/lib/factories/provider-generate-factory.js +87 -0
- package/dist/lib/factories/provider-registry.d.ts +38 -0
- package/dist/lib/factories/provider-registry.js +107 -0
- package/dist/lib/index.d.ts +8 -5
- package/dist/lib/index.js +5 -5
- package/dist/lib/mcp/client.js +5 -5
- package/dist/lib/mcp/config.js +28 -3
- package/dist/lib/mcp/dynamic-orchestrator.js +8 -8
- package/dist/lib/mcp/external-client.js +2 -2
- package/dist/lib/mcp/factory.d.ts +1 -1
- package/dist/lib/mcp/factory.js +1 -1
- package/dist/lib/mcp/function-calling.js +1 -1
- package/dist/lib/mcp/initialize-tools.d.ts +1 -1
- package/dist/lib/mcp/initialize-tools.js +45 -1
- package/dist/lib/mcp/initialize.js +16 -6
- package/dist/lib/mcp/neurolink-mcp-client.js +10 -10
- package/dist/lib/mcp/orchestrator.js +4 -4
- package/dist/lib/mcp/servers/agent/direct-tools-server.d.ts +8 -0
- package/dist/lib/mcp/servers/agent/direct-tools-server.js +109 -0
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +8 -6
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
- package/dist/lib/mcp/unified-registry.d.ts +4 -0
- package/dist/lib/mcp/unified-registry.js +42 -9
- package/dist/lib/neurolink.d.ts +161 -174
- package/dist/lib/neurolink.js +723 -397
- package/dist/lib/providers/amazon-bedrock.d.ts +32 -0
- package/dist/lib/providers/amazon-bedrock.js +143 -0
- package/dist/lib/providers/analytics-helper.js +7 -4
- package/dist/lib/providers/anthropic-baseprovider.d.ts +23 -0
- package/dist/lib/providers/anthropic-baseprovider.js +114 -0
- package/dist/lib/providers/anthropic.d.ts +19 -39
- package/dist/lib/providers/anthropic.js +84 -378
- package/dist/lib/providers/azure-openai.d.ts +20 -0
- package/dist/lib/providers/azure-openai.js +89 -0
- package/dist/lib/providers/function-calling-provider.d.ts +14 -12
- package/dist/lib/providers/function-calling-provider.js +114 -64
- package/dist/lib/providers/google-ai-studio.d.ts +23 -0
- package/dist/lib/providers/google-ai-studio.js +107 -0
- package/dist/lib/providers/google-vertex.d.ts +47 -0
- package/dist/lib/providers/google-vertex.js +205 -0
- package/dist/lib/providers/huggingFace.d.ts +33 -27
- package/dist/lib/providers/huggingFace.js +103 -400
- package/dist/lib/providers/index.d.ts +9 -9
- package/dist/lib/providers/index.js +9 -9
- package/dist/lib/providers/mcp-provider.d.ts +13 -8
- package/dist/lib/providers/mcp-provider.js +63 -18
- package/dist/lib/providers/mistral.d.ts +42 -0
- package/dist/lib/providers/mistral.js +160 -0
- package/dist/lib/providers/ollama.d.ts +52 -35
- package/dist/lib/providers/ollama.js +297 -477
- package/dist/lib/providers/openAI.d.ts +21 -21
- package/dist/lib/providers/openAI.js +81 -245
- package/dist/lib/sdk/tool-extension.d.ts +181 -0
- package/dist/lib/sdk/tool-extension.js +283 -0
- package/dist/lib/sdk/tool-registration.d.ts +95 -0
- package/dist/lib/sdk/tool-registration.js +167 -0
- package/dist/lib/types/generate-types.d.ts +80 -0
- package/dist/lib/types/generate-types.js +1 -0
- package/dist/lib/types/mcp-types.d.ts +116 -0
- package/dist/lib/types/mcp-types.js +5 -0
- package/dist/lib/types/stream-types.d.ts +95 -0
- package/dist/lib/types/stream-types.js +1 -0
- package/dist/lib/types/universal-provider-options.d.ts +87 -0
- package/dist/lib/types/universal-provider-options.js +53 -0
- package/dist/lib/utils/providerUtils-fixed.js +1 -1
- package/dist/lib/utils/streaming-utils.d.ts +14 -2
- package/dist/lib/utils/streaming-utils.js +0 -3
- package/dist/mcp/client.js +5 -5
- package/dist/mcp/config.js +28 -3
- package/dist/mcp/dynamic-orchestrator.js +8 -8
- package/dist/mcp/external-client.js +2 -2
- package/dist/mcp/factory.d.ts +1 -1
- package/dist/mcp/factory.js +1 -1
- package/dist/mcp/function-calling.js +1 -1
- package/dist/mcp/initialize-tools.d.ts +1 -1
- package/dist/mcp/initialize-tools.js +45 -1
- package/dist/mcp/initialize.js +16 -6
- package/dist/mcp/neurolink-mcp-client.js +10 -10
- package/dist/mcp/orchestrator.js +4 -4
- package/dist/mcp/servers/agent/direct-tools-server.d.ts +8 -0
- package/dist/mcp/servers/agent/direct-tools-server.js +109 -0
- package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
- package/dist/mcp/servers/ai-providers/ai-core-server.js +8 -6
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
- package/dist/mcp/unified-registry.d.ts +4 -0
- package/dist/mcp/unified-registry.js +42 -9
- package/dist/neurolink.d.ts +161 -174
- package/dist/neurolink.js +723 -397
- package/dist/providers/amazon-bedrock.d.ts +32 -0
- package/dist/providers/amazon-bedrock.js +143 -0
- package/dist/providers/analytics-helper.js +7 -4
- package/dist/providers/anthropic-baseprovider.d.ts +23 -0
- package/dist/providers/anthropic-baseprovider.js +114 -0
- package/dist/providers/anthropic.d.ts +19 -39
- package/dist/providers/anthropic.js +83 -377
- package/dist/providers/azure-openai.d.ts +20 -0
- package/dist/providers/azure-openai.js +89 -0
- package/dist/providers/function-calling-provider.d.ts +14 -12
- package/dist/providers/function-calling-provider.js +114 -64
- package/dist/providers/google-ai-studio.d.ts +23 -0
- package/dist/providers/google-ai-studio.js +108 -0
- package/dist/providers/google-vertex.d.ts +47 -0
- package/dist/providers/google-vertex.js +205 -0
- package/dist/providers/huggingFace.d.ts +33 -27
- package/dist/providers/huggingFace.js +102 -399
- package/dist/providers/index.d.ts +9 -9
- package/dist/providers/index.js +9 -9
- package/dist/providers/mcp-provider.d.ts +13 -8
- package/dist/providers/mcp-provider.js +63 -18
- package/dist/providers/mistral.d.ts +42 -0
- package/dist/providers/mistral.js +160 -0
- package/dist/providers/ollama.d.ts +52 -35
- package/dist/providers/ollama.js +297 -476
- package/dist/providers/openAI.d.ts +21 -21
- package/dist/providers/openAI.js +81 -246
- package/dist/sdk/tool-extension.d.ts +181 -0
- package/dist/sdk/tool-extension.js +283 -0
- package/dist/sdk/tool-registration.d.ts +95 -0
- package/dist/sdk/tool-registration.js +168 -0
- package/dist/types/generate-types.d.ts +80 -0
- package/dist/types/generate-types.js +1 -0
- package/dist/types/mcp-types.d.ts +116 -0
- package/dist/types/mcp-types.js +5 -0
- package/dist/types/stream-types.d.ts +95 -0
- package/dist/types/stream-types.js +1 -0
- package/dist/types/universal-provider-options.d.ts +87 -0
- package/dist/types/universal-provider-options.js +53 -0
- package/dist/utils/providerUtils-fixed.js +1 -1
- package/dist/utils/streaming-utils.d.ts +14 -2
- package/dist/utils/streaming-utils.js +0 -3
- package/package.json +15 -10
- package/dist/lib/providers/agent-enhanced-provider.d.ts +0 -89
- package/dist/lib/providers/agent-enhanced-provider.js +0 -614
- package/dist/lib/providers/amazonBedrock.d.ts +0 -19
- package/dist/lib/providers/amazonBedrock.js +0 -334
- package/dist/lib/providers/azureOpenAI.d.ts +0 -39
- package/dist/lib/providers/azureOpenAI.js +0 -436
- package/dist/lib/providers/googleAIStudio.d.ts +0 -49
- package/dist/lib/providers/googleAIStudio.js +0 -333
- package/dist/lib/providers/googleVertexAI.d.ts +0 -38
- package/dist/lib/providers/googleVertexAI.js +0 -519
- package/dist/lib/providers/mistralAI.d.ts +0 -34
- package/dist/lib/providers/mistralAI.js +0 -294
- package/dist/providers/agent-enhanced-provider.d.ts +0 -89
- package/dist/providers/agent-enhanced-provider.js +0 -614
- package/dist/providers/amazonBedrock.d.ts +0 -19
- package/dist/providers/amazonBedrock.js +0 -334
- package/dist/providers/azureOpenAI.d.ts +0 -39
- package/dist/providers/azureOpenAI.js +0 -437
- package/dist/providers/googleAIStudio.d.ts +0 -49
- package/dist/providers/googleAIStudio.js +0 -333
- package/dist/providers/googleVertexAI.d.ts +0 -38
- package/dist/providers/googleVertexAI.js +0 -519
- package/dist/providers/mistralAI.d.ts +0 -34
- package/dist/providers/mistralAI.js +0 -294
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
* Integrates MCP tools directly with AI SDK's function calling capabilities
|
|
4
4
|
* This is the missing piece that enables true AI function calling!
|
|
5
5
|
*/
|
|
6
|
-
import { generateText as
|
|
6
|
+
import { generateText as aiGenerate, Output, } from "ai";
|
|
7
7
|
import { getAvailableFunctionTools, executeFunctionCall, isFunctionCallingAvailable, } from "../mcp/function-calling.js";
|
|
8
8
|
import { createExecutionContext } from "../mcp/context-manager.js";
|
|
9
9
|
import { mcpLogger } from "../mcp/logging.js";
|
|
@@ -22,31 +22,88 @@ export class FunctionCallingProvider {
|
|
|
22
22
|
this.sessionId = options.sessionId || `function-calling-${Date.now()}`;
|
|
23
23
|
this.userId = options.userId || "function-calling-user";
|
|
24
24
|
}
|
|
25
|
+
/**
|
|
26
|
+
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
27
|
+
* Future-ready for multi-modal capabilities with current text focus
|
|
28
|
+
*/
|
|
29
|
+
async stream(optionsOrPrompt, analysisSchema) {
|
|
30
|
+
const functionTag = "FunctionCallingProvider.stream";
|
|
31
|
+
const startTime = Date.now();
|
|
32
|
+
// Parse parameters - support both string and options object
|
|
33
|
+
const options = typeof optionsOrPrompt === "string"
|
|
34
|
+
? { input: { text: optionsOrPrompt } }
|
|
35
|
+
: optionsOrPrompt;
|
|
36
|
+
// Validate input
|
|
37
|
+
if (!options?.input?.text ||
|
|
38
|
+
typeof options.input.text !== "string" ||
|
|
39
|
+
options.input.text.trim() === "") {
|
|
40
|
+
throw new Error("Stream options must include input.text as a non-empty string");
|
|
41
|
+
}
|
|
42
|
+
// Use base provider's stream implementation
|
|
43
|
+
const baseResult = await this.baseProvider.stream(options);
|
|
44
|
+
if (!baseResult) {
|
|
45
|
+
throw new Error("No stream response received from provider");
|
|
46
|
+
}
|
|
47
|
+
// Return the result with function-calling metadata
|
|
48
|
+
return {
|
|
49
|
+
...baseResult,
|
|
50
|
+
provider: "function-calling",
|
|
51
|
+
model: options.model || "unknown",
|
|
52
|
+
metadata: {
|
|
53
|
+
streamId: `function-calling-${Date.now()}`,
|
|
54
|
+
startTime,
|
|
55
|
+
},
|
|
56
|
+
};
|
|
57
|
+
}
|
|
25
58
|
/**
|
|
26
59
|
* Generate text with real function calling support
|
|
27
60
|
*/
|
|
28
|
-
async
|
|
61
|
+
async generate(optionsOrPrompt, analysisSchema) {
|
|
29
62
|
const options = typeof optionsOrPrompt === "string"
|
|
30
63
|
? { prompt: optionsOrPrompt }
|
|
31
64
|
: optionsOrPrompt;
|
|
32
|
-
const functionTag = "FunctionCallingProvider.
|
|
65
|
+
const functionTag = "FunctionCallingProvider.generate";
|
|
33
66
|
// If function calling is disabled, use base provider
|
|
34
67
|
if (!this.enableFunctionCalling) {
|
|
35
68
|
mcpLogger.debug(`[${functionTag}] Function calling disabled, using base provider`);
|
|
36
|
-
|
|
69
|
+
const result = await this.baseProvider.generate(options, analysisSchema);
|
|
70
|
+
if (!result) {
|
|
71
|
+
return {
|
|
72
|
+
content: "No response generated",
|
|
73
|
+
provider: "function-calling",
|
|
74
|
+
model: "unknown",
|
|
75
|
+
};
|
|
76
|
+
}
|
|
77
|
+
return result;
|
|
37
78
|
}
|
|
38
79
|
try {
|
|
39
80
|
// Check if function calling is available
|
|
40
81
|
const functionsAvailable = await isFunctionCallingAvailable();
|
|
41
82
|
if (!functionsAvailable) {
|
|
42
83
|
mcpLogger.debug(`[${functionTag}] No functions available, using base provider`);
|
|
43
|
-
|
|
84
|
+
const result = await this.baseProvider.generate(options, analysisSchema);
|
|
85
|
+
if (!result) {
|
|
86
|
+
return {
|
|
87
|
+
content: "No response generated",
|
|
88
|
+
provider: "function-calling",
|
|
89
|
+
model: "unknown",
|
|
90
|
+
};
|
|
91
|
+
}
|
|
92
|
+
return result;
|
|
44
93
|
}
|
|
45
94
|
// Get available function tools
|
|
46
95
|
const { tools, toolMap } = await getAvailableFunctionTools();
|
|
47
96
|
if (tools.length === 0) {
|
|
48
97
|
mcpLogger.debug(`[${functionTag}] No tools available, using base provider`);
|
|
49
|
-
|
|
98
|
+
const result = await this.baseProvider.generate(options, analysisSchema);
|
|
99
|
+
if (!result) {
|
|
100
|
+
return {
|
|
101
|
+
content: "No response generated",
|
|
102
|
+
provider: "function-calling",
|
|
103
|
+
model: "unknown",
|
|
104
|
+
};
|
|
105
|
+
}
|
|
106
|
+
return result;
|
|
50
107
|
}
|
|
51
108
|
mcpLogger.debug(`[${functionTag}] Function calling enabled with ${tools.length} tools`);
|
|
52
109
|
// Create execution context
|
|
@@ -55,11 +112,15 @@ export class FunctionCallingProvider {
|
|
|
55
112
|
userId: this.userId,
|
|
56
113
|
aiProvider: this.baseProvider.constructor.name,
|
|
57
114
|
});
|
|
58
|
-
// Use the AI SDK's native function calling by calling
|
|
115
|
+
// Use the AI SDK's native function calling by calling generate directly
|
|
59
116
|
// We need to get the underlying model from the base provider
|
|
60
|
-
const result = await this.
|
|
117
|
+
const result = await this.generateWithTools(options, tools, toolMap, context, analysisSchema);
|
|
61
118
|
if (!result) {
|
|
62
|
-
return
|
|
119
|
+
return {
|
|
120
|
+
content: "No response generated",
|
|
121
|
+
provider: "function-calling",
|
|
122
|
+
model: "unknown",
|
|
123
|
+
};
|
|
63
124
|
}
|
|
64
125
|
// Enhance result with function calling metadata
|
|
65
126
|
const enhancedResult = {
|
|
@@ -77,18 +138,26 @@ export class FunctionCallingProvider {
|
|
|
77
138
|
}
|
|
78
139
|
catch (error) {
|
|
79
140
|
mcpLogger.warn(`[${functionTag}] Function calling failed, using base provider:`, error);
|
|
80
|
-
|
|
141
|
+
const result = await this.baseProvider.generate(options, analysisSchema);
|
|
142
|
+
if (!result) {
|
|
143
|
+
return {
|
|
144
|
+
content: "No response generated",
|
|
145
|
+
provider: "function-calling",
|
|
146
|
+
model: "unknown",
|
|
147
|
+
};
|
|
148
|
+
}
|
|
149
|
+
return result;
|
|
81
150
|
}
|
|
82
151
|
}
|
|
83
152
|
/**
|
|
84
153
|
* Generate text using AI SDK's native function calling
|
|
85
154
|
*/
|
|
86
|
-
async
|
|
87
|
-
const functionTag = "FunctionCallingProvider.
|
|
155
|
+
async generateWithTools(options, tools, toolMap, context, analysisSchema) {
|
|
156
|
+
const functionTag = "FunctionCallingProvider.generateWithTools";
|
|
88
157
|
try {
|
|
89
158
|
// Convert our tools to AI SDK format with proper execution
|
|
90
159
|
const toolsWithExecution = this.convertToAISDKTools(tools, toolMap, context);
|
|
91
|
-
mcpLogger.debug(`[${functionTag}] Calling AI SDK
|
|
160
|
+
mcpLogger.debug(`[${functionTag}] Calling AI SDK generate with ${Object.keys(toolsWithExecution).length} tools and maxSteps: 5`);
|
|
92
161
|
mcpLogger.debug(`[${functionTag}] Sanitized tool names:`, Object.keys(toolsWithExecution));
|
|
93
162
|
// Log the first few tools to debug the issue
|
|
94
163
|
const toolNames = Object.keys(toolsWithExecution);
|
|
@@ -99,9 +168,17 @@ export class FunctionCallingProvider {
|
|
|
99
168
|
const modelInfo = await this.getModelFromProvider();
|
|
100
169
|
if (!modelInfo) {
|
|
101
170
|
mcpLogger.warn(`[${functionTag}] Could not get model from provider, falling back to base provider`);
|
|
102
|
-
|
|
171
|
+
const result = await this.baseProvider.generate(options, analysisSchema);
|
|
172
|
+
if (!result) {
|
|
173
|
+
return {
|
|
174
|
+
content: "No response generated",
|
|
175
|
+
provider: "function-calling",
|
|
176
|
+
model: "unknown",
|
|
177
|
+
};
|
|
178
|
+
}
|
|
179
|
+
return result;
|
|
103
180
|
}
|
|
104
|
-
// Use AI SDK's
|
|
181
|
+
// Use AI SDK's generate directly with tools
|
|
105
182
|
const generateOptions = {
|
|
106
183
|
model: modelInfo.model,
|
|
107
184
|
prompt: options.prompt,
|
|
@@ -118,13 +195,29 @@ export class FunctionCallingProvider {
|
|
|
118
195
|
schema: analysisSchema,
|
|
119
196
|
});
|
|
120
197
|
}
|
|
121
|
-
const result = await
|
|
122
|
-
mcpLogger.debug(`[${functionTag}] AI SDK
|
|
198
|
+
const result = await aiGenerate(generateOptions);
|
|
199
|
+
mcpLogger.debug(`[${functionTag}] AI SDK generate completed`, {
|
|
123
200
|
toolCalls: result.toolCalls?.length || 0,
|
|
124
201
|
finishReason: result.finishReason,
|
|
125
202
|
usage: result.usage,
|
|
126
203
|
});
|
|
127
|
-
return
|
|
204
|
+
return {
|
|
205
|
+
content: result.text,
|
|
206
|
+
provider: "function-calling",
|
|
207
|
+
model: "unknown",
|
|
208
|
+
usage: result.usage
|
|
209
|
+
? {
|
|
210
|
+
inputTokens: result.usage.promptTokens,
|
|
211
|
+
outputTokens: result.usage.completionTokens,
|
|
212
|
+
totalTokens: result.usage.totalTokens,
|
|
213
|
+
}
|
|
214
|
+
: undefined,
|
|
215
|
+
responseTime: 0,
|
|
216
|
+
toolsUsed: result.toolCalls?.map((tc) => tc.toolName) || [],
|
|
217
|
+
toolExecutions: [],
|
|
218
|
+
enhancedWithTools: (result.toolCalls?.length || 0) > 0,
|
|
219
|
+
availableTools: [],
|
|
220
|
+
};
|
|
128
221
|
}
|
|
129
222
|
catch (error) {
|
|
130
223
|
mcpLogger.error(`[${functionTag}] Failed to generate text with tools:`, error);
|
|
@@ -299,56 +392,13 @@ CRITICAL INSTRUCTIONS:
|
|
|
299
392
|
These functions provide accurate, real-time data. Use them actively to enhance your responses.`;
|
|
300
393
|
}
|
|
301
394
|
/**
|
|
302
|
-
*
|
|
395
|
+
* Alias for generate() - CLI-SDK consistency
|
|
303
396
|
*/
|
|
304
|
-
async streamText(optionsOrPrompt, analysisSchema) {
|
|
305
|
-
const options = typeof optionsOrPrompt === "string"
|
|
306
|
-
? { prompt: optionsOrPrompt }
|
|
307
|
-
: optionsOrPrompt;
|
|
308
|
-
const functionTag = "FunctionCallingProvider.streamText";
|
|
309
|
-
// If function calling is disabled, use base provider
|
|
310
|
-
if (!this.enableFunctionCalling) {
|
|
311
|
-
mcpLogger.debug(`[${functionTag}] Function calling disabled, using base provider`);
|
|
312
|
-
return this.baseProvider.streamText(options, analysisSchema);
|
|
313
|
-
}
|
|
314
|
-
try {
|
|
315
|
-
// Check if function calling is available
|
|
316
|
-
const functionsAvailable = await isFunctionCallingAvailable();
|
|
317
|
-
if (!functionsAvailable) {
|
|
318
|
-
mcpLogger.debug(`[${functionTag}] No functions available, using base provider`);
|
|
319
|
-
return this.baseProvider.streamText(options, analysisSchema);
|
|
320
|
-
}
|
|
321
|
-
// Get available function tools
|
|
322
|
-
const { tools } = await getAvailableFunctionTools();
|
|
323
|
-
if (tools.length === 0) {
|
|
324
|
-
mcpLogger.debug(`[${functionTag}] No tools available, using base provider`);
|
|
325
|
-
return this.baseProvider.streamText(options, analysisSchema);
|
|
326
|
-
}
|
|
327
|
-
mcpLogger.debug(`[${functionTag}] Streaming with ${tools.length} functions available`);
|
|
328
|
-
// Enhance system prompt
|
|
329
|
-
const enhancedSystemPrompt = this.createFunctionAwareSystemPrompt(options.systemPrompt, tools);
|
|
330
|
-
// Stream with enhanced prompt
|
|
331
|
-
return this.baseProvider.streamText({
|
|
332
|
-
...options,
|
|
333
|
-
systemPrompt: enhancedSystemPrompt,
|
|
334
|
-
}, analysisSchema);
|
|
335
|
-
}
|
|
336
|
-
catch (error) {
|
|
337
|
-
mcpLogger.warn(`[${functionTag}] Function calling failed, using base provider:`, error);
|
|
338
|
-
return this.baseProvider.streamText(options, analysisSchema);
|
|
339
|
-
}
|
|
340
|
-
}
|
|
341
|
-
/**
|
|
342
|
-
* Alias for generateText() - CLI-SDK consistency
|
|
343
|
-
*/
|
|
344
|
-
async generate(optionsOrPrompt, analysisSchema) {
|
|
345
|
-
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
346
|
-
}
|
|
347
397
|
/**
|
|
348
|
-
* Short alias for
|
|
398
|
+
* Short alias for generate() - CLI-SDK consistency
|
|
349
399
|
*/
|
|
350
400
|
async gen(optionsOrPrompt, analysisSchema) {
|
|
351
|
-
return this.
|
|
401
|
+
return this.generate(optionsOrPrompt, analysisSchema);
|
|
352
402
|
}
|
|
353
403
|
}
|
|
354
404
|
/**
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import type { ZodType, ZodTypeDef } from "zod";
|
|
2
|
+
import { type Schema, type LanguageModelV1 } from "ai";
|
|
3
|
+
import type { AIProviderName } from "../core/types.js";
|
|
4
|
+
import type { StreamOptions, StreamResult } from "../types/stream-types.js";
|
|
5
|
+
import { BaseProvider } from "../core/base-provider.js";
|
|
6
|
+
/**
|
|
7
|
+
* Google AI Studio provider implementation using BaseProvider
|
|
8
|
+
* Migrated from original GoogleAIStudio class to new factory pattern
|
|
9
|
+
*/
|
|
10
|
+
export declare class GoogleAIStudioProvider extends BaseProvider {
|
|
11
|
+
constructor(modelName?: string, sdk?: any);
|
|
12
|
+
protected getProviderName(): AIProviderName;
|
|
13
|
+
protected getDefaultModel(): string;
|
|
14
|
+
/**
|
|
15
|
+
* š§ PHASE 2: Return AI SDK model instance for tool calling
|
|
16
|
+
*/
|
|
17
|
+
protected getAISDKModel(): LanguageModelV1;
|
|
18
|
+
protected handleProviderError(error: any): Error;
|
|
19
|
+
protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
|
|
20
|
+
private getApiKey;
|
|
21
|
+
private validateStreamOptions;
|
|
22
|
+
}
|
|
23
|
+
export default GoogleAIStudioProvider;
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
import { createGoogleGenerativeAI } from "@ai-sdk/google";
|
|
2
|
+
import { streamText } from "ai";
|
|
3
|
+
import { GoogleAIModels } from "../core/types.js";
|
|
4
|
+
import { BaseProvider } from "../core/base-provider.js";
|
|
5
|
+
import { logger } from "../utils/logger.js";
|
|
6
|
+
import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
|
|
7
|
+
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
8
|
+
// Environment variable setup
|
|
9
|
+
if (!process.env.GOOGLE_GENERATIVE_AI_API_KEY &&
|
|
10
|
+
process.env.GOOGLE_AI_API_KEY) {
|
|
11
|
+
process.env.GOOGLE_GENERATIVE_AI_API_KEY = process.env.GOOGLE_AI_API_KEY;
|
|
12
|
+
}
|
|
13
|
+
/**
|
|
14
|
+
* Google AI Studio provider implementation using BaseProvider
|
|
15
|
+
* Migrated from original GoogleAIStudio class to new factory pattern
|
|
16
|
+
*/
|
|
17
|
+
export class GoogleAIStudioProvider extends BaseProvider {
|
|
18
|
+
constructor(modelName, sdk) {
|
|
19
|
+
super(modelName, "google-ai", sdk);
|
|
20
|
+
logger.debug("GoogleAIStudioProvider initialized", {
|
|
21
|
+
model: this.modelName,
|
|
22
|
+
provider: this.providerName,
|
|
23
|
+
sdkProvided: !!sdk,
|
|
24
|
+
});
|
|
25
|
+
}
|
|
26
|
+
// ===================
|
|
27
|
+
// ABSTRACT METHOD IMPLEMENTATIONS
|
|
28
|
+
// ===================
|
|
29
|
+
getProviderName() {
|
|
30
|
+
return "google-ai";
|
|
31
|
+
}
|
|
32
|
+
getDefaultModel() {
|
|
33
|
+
return process.env.GOOGLE_AI_MODEL || GoogleAIModels.GEMINI_2_5_FLASH;
|
|
34
|
+
}
|
|
35
|
+
/**
|
|
36
|
+
* š§ PHASE 2: Return AI SDK model instance for tool calling
|
|
37
|
+
*/
|
|
38
|
+
getAISDKModel() {
|
|
39
|
+
const apiKey = this.getApiKey();
|
|
40
|
+
const google = createGoogleGenerativeAI({ apiKey });
|
|
41
|
+
return google(this.modelName);
|
|
42
|
+
}
|
|
43
|
+
handleProviderError(error) {
|
|
44
|
+
if (error instanceof TimeoutError) {
|
|
45
|
+
return new Error(`Google AI request timed out: ${error.message}`);
|
|
46
|
+
}
|
|
47
|
+
if (error?.message?.includes("API_KEY_INVALID")) {
|
|
48
|
+
return new Error("Invalid Google AI API key. Please check your GOOGLE_AI_API_KEY environment variable.");
|
|
49
|
+
}
|
|
50
|
+
if (error?.message?.includes("RATE_LIMIT_EXCEEDED")) {
|
|
51
|
+
return new Error("Google AI rate limit exceeded. Please try again later.");
|
|
52
|
+
}
|
|
53
|
+
return new Error(`Google AI error: ${error?.message || "Unknown error"}`);
|
|
54
|
+
}
|
|
55
|
+
// executeGenerate removed - BaseProvider handles all generation with tools
|
|
56
|
+
async executeStream(options, analysisSchema) {
|
|
57
|
+
this.validateStreamOptions(options);
|
|
58
|
+
const apiKey = this.getApiKey();
|
|
59
|
+
const google = createGoogleGenerativeAI({ apiKey });
|
|
60
|
+
const model = google(this.modelName);
|
|
61
|
+
const timeout = this.getTimeout(options);
|
|
62
|
+
const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
|
|
63
|
+
try {
|
|
64
|
+
const result = await streamText({
|
|
65
|
+
model,
|
|
66
|
+
prompt: options.input.text,
|
|
67
|
+
system: options.systemPrompt,
|
|
68
|
+
temperature: options.temperature,
|
|
69
|
+
maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
|
|
70
|
+
tools: options.tools,
|
|
71
|
+
toolChoice: "auto",
|
|
72
|
+
abortSignal: timeoutController?.controller.signal,
|
|
73
|
+
});
|
|
74
|
+
timeoutController?.cleanup();
|
|
75
|
+
// Transform string stream to content object stream
|
|
76
|
+
const transformedStream = async function* () {
|
|
77
|
+
for await (const chunk of result.textStream) {
|
|
78
|
+
yield { content: chunk };
|
|
79
|
+
}
|
|
80
|
+
};
|
|
81
|
+
return {
|
|
82
|
+
stream: transformedStream(),
|
|
83
|
+
provider: this.providerName,
|
|
84
|
+
model: this.modelName,
|
|
85
|
+
};
|
|
86
|
+
}
|
|
87
|
+
catch (error) {
|
|
88
|
+
throw this.handleProviderError(error);
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
// ===================
|
|
92
|
+
// HELPER METHODS
|
|
93
|
+
// ===================
|
|
94
|
+
getApiKey() {
|
|
95
|
+
const apiKey = process.env.GOOGLE_AI_API_KEY || process.env.GOOGLE_GENERATIVE_AI_API_KEY;
|
|
96
|
+
if (!apiKey) {
|
|
97
|
+
throw new Error("GOOGLE_AI_API_KEY or GOOGLE_GENERATIVE_AI_API_KEY environment variable is not set");
|
|
98
|
+
}
|
|
99
|
+
return apiKey;
|
|
100
|
+
}
|
|
101
|
+
validateStreamOptions(options) {
|
|
102
|
+
if (!options.input?.text || options.input.text.trim().length === 0) {
|
|
103
|
+
throw new Error("Input text is required and cannot be empty");
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
export default GoogleAIStudioProvider;
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import type { ZodType, ZodTypeDef } from "zod";
|
|
2
|
+
import { type Schema, type LanguageModelV1 } from "ai";
|
|
3
|
+
import type { AIProviderName } from "../core/types.js";
|
|
4
|
+
import type { StreamOptions, StreamResult } from "../types/stream-types.js";
|
|
5
|
+
import { BaseProvider } from "../core/base-provider.js";
|
|
6
|
+
/**
|
|
7
|
+
* Google Vertex AI Provider v2 - BaseProvider Implementation
|
|
8
|
+
*
|
|
9
|
+
* PHASE 3.5: Simple BaseProvider wrap around existing @ai-sdk/google-vertex implementation
|
|
10
|
+
*
|
|
11
|
+
* Features:
|
|
12
|
+
* - Extends BaseProvider for shared functionality
|
|
13
|
+
* - Preserves existing Google Cloud authentication
|
|
14
|
+
* - Maintains Anthropic model support via dynamic imports
|
|
15
|
+
* - Uses pre-initialized Vertex instance for efficiency
|
|
16
|
+
* - Enhanced error handling with setup guidance
|
|
17
|
+
*/
|
|
18
|
+
export declare class GoogleVertexProvider extends BaseProvider {
|
|
19
|
+
private vertex;
|
|
20
|
+
private model;
|
|
21
|
+
private projectId;
|
|
22
|
+
private location;
|
|
23
|
+
private cachedAnthropicModel;
|
|
24
|
+
constructor(modelName?: string);
|
|
25
|
+
protected getProviderName(): AIProviderName;
|
|
26
|
+
protected getDefaultModel(): string;
|
|
27
|
+
/**
|
|
28
|
+
* Returns the Vercel AI SDK model instance for Google Vertex
|
|
29
|
+
* Handles both Google and Anthropic models
|
|
30
|
+
*/
|
|
31
|
+
protected getAISDKModel(): Promise<LanguageModelV1>;
|
|
32
|
+
protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
|
|
33
|
+
protected handleProviderError(error: any): Error;
|
|
34
|
+
private validateStreamOptions;
|
|
35
|
+
/**
|
|
36
|
+
* Check if Anthropic models are available
|
|
37
|
+
* @returns Promise<boolean> indicating if Anthropic support is available
|
|
38
|
+
*/
|
|
39
|
+
hasAnthropicSupport(): Promise<boolean>;
|
|
40
|
+
/**
|
|
41
|
+
* Create an Anthropic model instance if available
|
|
42
|
+
* @param modelName Anthropic model name (e.g., 'claude-3-sonnet@20240229')
|
|
43
|
+
* @returns LanguageModelV1 instance or null if not available
|
|
44
|
+
*/
|
|
45
|
+
createAnthropicModel(modelName: string): Promise<LanguageModelV1 | null>;
|
|
46
|
+
}
|
|
47
|
+
export default GoogleVertexProvider;
|
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
import { createVertex, } from "@ai-sdk/google-vertex";
|
|
2
|
+
import { streamText } from "ai";
|
|
3
|
+
import { BaseProvider } from "../core/base-provider.js";
|
|
4
|
+
import { logger } from "../utils/logger.js";
|
|
5
|
+
import { TimeoutError, } from "../utils/timeout.js";
|
|
6
|
+
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
7
|
+
// Cache for anthropic module to avoid repeated imports
|
|
8
|
+
let _createVertexAnthropic = null;
|
|
9
|
+
let _anthropicImportAttempted = false;
|
|
10
|
+
// Function to dynamically import anthropic support
|
|
11
|
+
async function getCreateVertexAnthropic() {
|
|
12
|
+
if (_anthropicImportAttempted) {
|
|
13
|
+
return _createVertexAnthropic;
|
|
14
|
+
}
|
|
15
|
+
_anthropicImportAttempted = true;
|
|
16
|
+
try {
|
|
17
|
+
// Try to import the anthropic module - available in @ai-sdk/google-vertex ^2.2.0+
|
|
18
|
+
const anthropicModule = await import("@ai-sdk/google-vertex/anthropic");
|
|
19
|
+
_createVertexAnthropic = anthropicModule.createVertexAnthropic;
|
|
20
|
+
logger.debug("[GoogleVertexAI] Anthropic module successfully loaded");
|
|
21
|
+
return _createVertexAnthropic;
|
|
22
|
+
}
|
|
23
|
+
catch (error) {
|
|
24
|
+
// Anthropic module not available
|
|
25
|
+
logger.warn("[GoogleVertexAI] Anthropic module not available. Install @ai-sdk/google-vertex ^2.2.0 for Anthropic model support.");
|
|
26
|
+
return null;
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
// Configuration helpers
|
|
30
|
+
const getVertexProjectId = () => {
|
|
31
|
+
const projectId = process.env.GOOGLE_CLOUD_PROJECT_ID ||
|
|
32
|
+
process.env.VERTEX_PROJECT_ID ||
|
|
33
|
+
process.env.GOOGLE_VERTEX_PROJECT;
|
|
34
|
+
if (!projectId) {
|
|
35
|
+
throw new Error(`ā Google Vertex AI Provider Configuration Error\n\nMissing required environment variables: GOOGLE_CLOUD_PROJECT_ID or VERTEX_PROJECT_ID\n\nš§ Step 1: Get Google Cloud Credentials\n1. Visit: https://console.cloud.google.com/\n2. Create or select a project\n3. Enable Vertex AI API\n4. Set up authentication\n\nš§ Step 2: Set Environment Variables\nAdd to your .env file:\nGOOGLE_CLOUD_PROJECT_ID=your_project_id_here\nGOOGLE_APPLICATION_CREDENTIALS=/path/to/service-account.json\n\nš§ Step 3: Restart Application\nRestart your application to load the new environment variables.`);
|
|
36
|
+
}
|
|
37
|
+
return projectId;
|
|
38
|
+
};
|
|
39
|
+
const getVertexLocation = () => {
|
|
40
|
+
return (process.env.GOOGLE_CLOUD_LOCATION ||
|
|
41
|
+
process.env.VERTEX_LOCATION ||
|
|
42
|
+
process.env.GOOGLE_VERTEX_LOCATION ||
|
|
43
|
+
"us-central1");
|
|
44
|
+
};
|
|
45
|
+
const getDefaultVertexModel = () => {
|
|
46
|
+
return process.env.VERTEX_MODEL || "gemini-1.5-pro";
|
|
47
|
+
};
|
|
48
|
+
const hasGoogleCredentials = () => {
|
|
49
|
+
return !!(process.env.GOOGLE_APPLICATION_CREDENTIALS ||
|
|
50
|
+
process.env.GOOGLE_SERVICE_ACCOUNT_KEY ||
|
|
51
|
+
(process.env.GOOGLE_AUTH_CLIENT_EMAIL &&
|
|
52
|
+
process.env.GOOGLE_AUTH_PRIVATE_KEY));
|
|
53
|
+
};
|
|
54
|
+
/**
|
|
55
|
+
* Google Vertex AI Provider v2 - BaseProvider Implementation
|
|
56
|
+
*
|
|
57
|
+
* PHASE 3.5: Simple BaseProvider wrap around existing @ai-sdk/google-vertex implementation
|
|
58
|
+
*
|
|
59
|
+
* Features:
|
|
60
|
+
* - Extends BaseProvider for shared functionality
|
|
61
|
+
* - Preserves existing Google Cloud authentication
|
|
62
|
+
* - Maintains Anthropic model support via dynamic imports
|
|
63
|
+
* - Uses pre-initialized Vertex instance for efficiency
|
|
64
|
+
* - Enhanced error handling with setup guidance
|
|
65
|
+
*/
|
|
66
|
+
export class GoogleVertexProvider extends BaseProvider {
|
|
67
|
+
vertex;
|
|
68
|
+
model;
|
|
69
|
+
projectId;
|
|
70
|
+
location;
|
|
71
|
+
cachedAnthropicModel = null;
|
|
72
|
+
constructor(modelName) {
|
|
73
|
+
super(modelName, "vertex");
|
|
74
|
+
// Validate Google Cloud credentials
|
|
75
|
+
if (!hasGoogleCredentials()) {
|
|
76
|
+
throw new Error(`ā Google Vertex AI Provider Configuration Error\n\nMissing Google Cloud authentication. One of the following is required:\n\nš§ Option 1: Service Account Key File\nGOOGLE_APPLICATION_CREDENTIALS=/path/to/service-account.json\n\nš§ Option 2: Service Account Key (Base64)\nGOOGLE_SERVICE_ACCOUNT_KEY=base64_encoded_key\n\nš§ Option 3: Individual Credentials\nGOOGLE_AUTH_CLIENT_EMAIL=your-service-account@project.iam.gserviceaccount.com\nGOOGLE_AUTH_PRIVATE_KEY=-----BEGIN PRIVATE KEY-----...\n\nš§ Step 4: Restart Application\nRestart your application to load the new environment variables.`);
|
|
77
|
+
}
|
|
78
|
+
// Initialize Google Cloud configuration
|
|
79
|
+
this.projectId = getVertexProjectId();
|
|
80
|
+
this.location = getVertexLocation();
|
|
81
|
+
const vertexConfig = {
|
|
82
|
+
project: this.projectId,
|
|
83
|
+
location: this.location,
|
|
84
|
+
};
|
|
85
|
+
// Create Vertex provider instance
|
|
86
|
+
this.vertex = createVertex(vertexConfig);
|
|
87
|
+
// Pre-initialize model for efficiency
|
|
88
|
+
this.model = this.vertex(this.modelName || getDefaultVertexModel());
|
|
89
|
+
logger.debug("Google Vertex AI BaseProvider v2 initialized", {
|
|
90
|
+
modelName: this.modelName,
|
|
91
|
+
projectId: this.projectId,
|
|
92
|
+
location: this.location,
|
|
93
|
+
provider: this.providerName,
|
|
94
|
+
});
|
|
95
|
+
}
|
|
96
|
+
getProviderName() {
|
|
97
|
+
return "vertex";
|
|
98
|
+
}
|
|
99
|
+
getDefaultModel() {
|
|
100
|
+
return getDefaultVertexModel();
|
|
101
|
+
}
|
|
102
|
+
/**
|
|
103
|
+
* Returns the Vercel AI SDK model instance for Google Vertex
|
|
104
|
+
* Handles both Google and Anthropic models
|
|
105
|
+
*/
|
|
106
|
+
async getAISDKModel() {
|
|
107
|
+
// Check if this is an Anthropic model
|
|
108
|
+
if (this.modelName && this.modelName.includes("claude")) {
|
|
109
|
+
// Return cached Anthropic model if available
|
|
110
|
+
if (this.cachedAnthropicModel) {
|
|
111
|
+
return this.cachedAnthropicModel;
|
|
112
|
+
}
|
|
113
|
+
// Create and cache new Anthropic model
|
|
114
|
+
const anthropicModel = await this.createAnthropicModel(this.modelName);
|
|
115
|
+
if (anthropicModel) {
|
|
116
|
+
this.cachedAnthropicModel = anthropicModel;
|
|
117
|
+
return anthropicModel;
|
|
118
|
+
}
|
|
119
|
+
// Fall back to regular model if Anthropic not available
|
|
120
|
+
logger.warn(`Anthropic model ${this.modelName} requested but not available, falling back to Google model`);
|
|
121
|
+
}
|
|
122
|
+
return this.model;
|
|
123
|
+
}
|
|
124
|
+
// executeGenerate removed - BaseProvider handles all generation with tools
|
|
125
|
+
async executeStream(options, analysisSchema) {
|
|
126
|
+
try {
|
|
127
|
+
this.validateStreamOptions(options);
|
|
128
|
+
const result = await streamText({
|
|
129
|
+
model: this.model,
|
|
130
|
+
prompt: options.input.text,
|
|
131
|
+
system: options.systemPrompt,
|
|
132
|
+
maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
|
|
133
|
+
temperature: options.temperature,
|
|
134
|
+
});
|
|
135
|
+
return {
|
|
136
|
+
stream: (async function* () {
|
|
137
|
+
for await (const chunk of result.textStream) {
|
|
138
|
+
yield { content: chunk };
|
|
139
|
+
}
|
|
140
|
+
})(),
|
|
141
|
+
provider: this.providerName,
|
|
142
|
+
model: this.modelName,
|
|
143
|
+
};
|
|
144
|
+
}
|
|
145
|
+
catch (error) {
|
|
146
|
+
throw this.handleProviderError(error);
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
handleProviderError(error) {
|
|
150
|
+
if (error.name === "TimeoutError") {
|
|
151
|
+
return new TimeoutError(`Google Vertex AI request timed out. Consider increasing timeout or using a lighter model.`, this.defaultTimeout);
|
|
152
|
+
}
|
|
153
|
+
if (error.message?.includes("PERMISSION_DENIED")) {
|
|
154
|
+
return new Error(`ā Google Vertex AI Permission Denied\n\nYour Google Cloud credentials don't have permission to access Vertex AI.\n\nš§ Required Steps:\n1. Ensure your service account has Vertex AI User role\n2. Check if Vertex AI API is enabled in your project\n3. Verify your project ID is correct\n4. Confirm your location/region has Vertex AI available`);
|
|
155
|
+
}
|
|
156
|
+
if (error.message?.includes("NOT_FOUND")) {
|
|
157
|
+
return new Error(`ā Google Vertex AI Model Not Found\n\n${error.message}\n\nš§ Check:\n1. Model name is correct (e.g., 'gemini-1.5-pro')\n2. Model is available in your region (${this.location})\n3. Your project has access to the model\n4. Model supports your request parameters`);
|
|
158
|
+
}
|
|
159
|
+
if (error.message?.includes("QUOTA_EXCEEDED")) {
|
|
160
|
+
return new Error(`ā Google Vertex AI Quota Exceeded\n\n${error.message}\n\nš§ Solutions:\n1. Check your Vertex AI quotas in Google Cloud Console\n2. Request quota increase if needed\n3. Try a different model or reduce request frequency\n4. Consider using a different region`);
|
|
161
|
+
}
|
|
162
|
+
if (error.message?.includes("INVALID_ARGUMENT")) {
|
|
163
|
+
return new Error(`ā Google Vertex AI Invalid Request\n\n${error.message}\n\nš§ Check:\n1. Request parameters are within model limits\n2. Input text is properly formatted\n3. Temperature and other settings are valid\n4. Model supports your request type`);
|
|
164
|
+
}
|
|
165
|
+
return new Error(`ā Google Vertex AI Provider Error\n\n${error.message || "Unknown error occurred"}\n\nš§ Troubleshooting:\n1. Check Google Cloud credentials and permissions\n2. Verify project ID and location settings\n3. Ensure Vertex AI API is enabled\n4. Check network connectivity`);
|
|
166
|
+
}
|
|
167
|
+
validateStreamOptions(options) {
|
|
168
|
+
if (!options.input?.text?.trim()) {
|
|
169
|
+
throw new Error("Prompt is required for streaming");
|
|
170
|
+
}
|
|
171
|
+
if (options.maxTokens &&
|
|
172
|
+
(options.maxTokens < 1 || options.maxTokens > 8192)) {
|
|
173
|
+
throw new Error("maxTokens must be between 1 and 8192 for Google Vertex AI");
|
|
174
|
+
}
|
|
175
|
+
if (options.temperature &&
|
|
176
|
+
(options.temperature < 0 || options.temperature > 2)) {
|
|
177
|
+
throw new Error("temperature must be between 0 and 2");
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
/**
|
|
181
|
+
* Check if Anthropic models are available
|
|
182
|
+
* @returns Promise<boolean> indicating if Anthropic support is available
|
|
183
|
+
*/
|
|
184
|
+
async hasAnthropicSupport() {
|
|
185
|
+
const createVertexAnthropic = await getCreateVertexAnthropic();
|
|
186
|
+
return createVertexAnthropic !== null;
|
|
187
|
+
}
|
|
188
|
+
/**
|
|
189
|
+
* Create an Anthropic model instance if available
|
|
190
|
+
* @param modelName Anthropic model name (e.g., 'claude-3-sonnet@20240229')
|
|
191
|
+
* @returns LanguageModelV1 instance or null if not available
|
|
192
|
+
*/
|
|
193
|
+
async createAnthropicModel(modelName) {
|
|
194
|
+
const createVertexAnthropic = await getCreateVertexAnthropic();
|
|
195
|
+
if (!createVertexAnthropic) {
|
|
196
|
+
return null;
|
|
197
|
+
}
|
|
198
|
+
const vertexAnthropic = createVertexAnthropic({
|
|
199
|
+
project: this.projectId,
|
|
200
|
+
location: this.location,
|
|
201
|
+
});
|
|
202
|
+
return vertexAnthropic(modelName);
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
export default GoogleVertexProvider;
|