@juspay/neurolink 7.10.2 → 7.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/config/types.d.ts +14 -0
- package/dist/config/types.js +6 -0
- package/dist/core/baseProvider.d.ts +45 -340
- package/dist/core/baseProvider.js +205 -30
- package/dist/core/types.d.ts +4 -0
- package/dist/factories/providerFactory.js +1 -1
- package/dist/factories/providerRegistry.js +8 -8
- package/dist/lib/config/types.d.ts +14 -0
- package/dist/lib/config/types.js +6 -0
- package/dist/lib/core/baseProvider.d.ts +45 -340
- package/dist/lib/core/baseProvider.js +205 -30
- package/dist/lib/core/types.d.ts +4 -0
- package/dist/lib/factories/providerFactory.js +1 -1
- package/dist/lib/factories/providerRegistry.js +8 -8
- package/dist/lib/mcp/servers/agent/directToolsServer.js +80 -68
- package/dist/lib/mcp/toolRegistry.js +8 -2
- package/dist/lib/neurolink.js +20 -0
- package/dist/lib/providers/amazonBedrock.d.ts +0 -1
- package/dist/lib/providers/amazonBedrock.js +0 -13
- package/dist/lib/providers/anthropic.js +8 -25
- package/dist/lib/providers/googleAiStudio.d.ts +0 -1
- package/dist/lib/providers/googleAiStudio.js +10 -15
- package/dist/lib/providers/googleVertex.d.ts +0 -1
- package/dist/lib/providers/googleVertex.js +17 -24
- package/dist/lib/providers/huggingFace.d.ts +0 -1
- package/dist/lib/providers/huggingFace.js +0 -8
- package/dist/lib/providers/litellm.d.ts +0 -1
- package/dist/lib/providers/litellm.js +0 -8
- package/dist/lib/providers/mistral.d.ts +9 -24
- package/dist/lib/providers/mistral.js +44 -82
- package/dist/lib/providers/ollama.d.ts +0 -1
- package/dist/lib/providers/ollama.js +0 -12
- package/dist/lib/providers/openAI.d.ts +2 -3
- package/dist/lib/providers/openAI.js +12 -20
- package/dist/lib/providers/openaiCompatible.d.ts +0 -1
- package/dist/lib/providers/openaiCompatible.js +0 -8
- package/dist/lib/utils/toolUtils.d.ts +32 -0
- package/dist/lib/utils/toolUtils.js +60 -0
- package/dist/mcp/servers/agent/directToolsServer.js +80 -68
- package/dist/mcp/toolRegistry.js +8 -2
- package/dist/neurolink.js +20 -0
- package/dist/providers/amazonBedrock.d.ts +0 -1
- package/dist/providers/amazonBedrock.js +0 -13
- package/dist/providers/anthropic.js +8 -25
- package/dist/providers/googleAiStudio.d.ts +0 -1
- package/dist/providers/googleAiStudio.js +10 -15
- package/dist/providers/googleVertex.d.ts +0 -1
- package/dist/providers/googleVertex.js +17 -24
- package/dist/providers/huggingFace.d.ts +0 -1
- package/dist/providers/huggingFace.js +0 -8
- package/dist/providers/litellm.d.ts +0 -1
- package/dist/providers/litellm.js +0 -8
- package/dist/providers/mistral.d.ts +9 -24
- package/dist/providers/mistral.js +44 -82
- package/dist/providers/ollama.d.ts +0 -1
- package/dist/providers/ollama.js +0 -12
- package/dist/providers/openAI.d.ts +2 -3
- package/dist/providers/openAI.js +12 -20
- package/dist/providers/openaiCompatible.d.ts +0 -1
- package/dist/providers/openaiCompatible.js +0 -8
- package/dist/utils/toolUtils.d.ts +32 -0
- package/dist/utils/toolUtils.js +60 -0
- package/package.json +1 -1
|
@@ -24,17 +24,17 @@ export class ProviderRegistry {
|
|
|
24
24
|
// Register providers with dynamic import factory functions
|
|
25
25
|
const { ProviderFactory } = await import("./providerFactory.js");
|
|
26
26
|
// Register Google AI Studio Provider (our validated baseline)
|
|
27
|
-
ProviderFactory.registerProvider(AIProviderName.GOOGLE_AI, async (modelName,
|
|
27
|
+
ProviderFactory.registerProvider(AIProviderName.GOOGLE_AI, async (modelName, _providerName, sdk) => {
|
|
28
28
|
const { GoogleAIStudioProvider } = await import("../providers/googleAiStudio.js");
|
|
29
29
|
return new GoogleAIStudioProvider(modelName, sdk);
|
|
30
30
|
}, GoogleAIModels.GEMINI_2_5_FLASH, ["googleAiStudio", "google", "gemini", "google-ai"]);
|
|
31
31
|
// Register OpenAI provider
|
|
32
|
-
ProviderFactory.registerProvider(AIProviderName.OPENAI, async (modelName,
|
|
32
|
+
ProviderFactory.registerProvider(AIProviderName.OPENAI, async (modelName, _providerName, sdk) => {
|
|
33
33
|
const { OpenAIProvider } = await import("../providers/openAI.js");
|
|
34
|
-
return new OpenAIProvider(modelName);
|
|
34
|
+
return new OpenAIProvider(modelName, sdk);
|
|
35
35
|
}, OpenAIModels.GPT_4O_MINI, ["gpt", "chatgpt"]);
|
|
36
36
|
// Register Anthropic provider
|
|
37
|
-
ProviderFactory.registerProvider(AIProviderName.ANTHROPIC, async (modelName,
|
|
37
|
+
ProviderFactory.registerProvider(AIProviderName.ANTHROPIC, async (modelName, _providerName, sdk) => {
|
|
38
38
|
const { AnthropicProvider } = await import("../providers/anthropic.js");
|
|
39
39
|
return new AnthropicProvider(modelName, sdk);
|
|
40
40
|
}, "claude-3-5-sonnet-20241022", ["claude", "anthropic"]);
|
|
@@ -62,7 +62,7 @@ export class ProviderRegistry {
|
|
|
62
62
|
return new HuggingFaceProvider(modelName);
|
|
63
63
|
}, process.env.HUGGINGFACE_MODEL || "microsoft/DialoGPT-medium", ["huggingface", "hf"]);
|
|
64
64
|
// Register Mistral AI provider
|
|
65
|
-
ProviderFactory.registerProvider(AIProviderName.MISTRAL, async (modelName,
|
|
65
|
+
ProviderFactory.registerProvider(AIProviderName.MISTRAL, async (modelName, _providerName, sdk) => {
|
|
66
66
|
const { MistralProvider } = await import("../providers/mistral.js");
|
|
67
67
|
return new MistralProvider(modelName, sdk);
|
|
68
68
|
}, "mistral-large-latest", ["mistral"]);
|
|
@@ -72,18 +72,18 @@ export class ProviderRegistry {
|
|
|
72
72
|
return new OllamaProvider(modelName);
|
|
73
73
|
}, process.env.OLLAMA_MODEL || "llama3.1:8b", ["ollama", "local"]);
|
|
74
74
|
// Register LiteLLM provider
|
|
75
|
-
ProviderFactory.registerProvider(AIProviderName.LITELLM, async (modelName,
|
|
75
|
+
ProviderFactory.registerProvider(AIProviderName.LITELLM, async (modelName, _providerName, sdk) => {
|
|
76
76
|
const { LiteLLMProvider } = await import("../providers/litellm.js");
|
|
77
77
|
return new LiteLLMProvider(modelName, sdk);
|
|
78
78
|
}, process.env.LITELLM_MODEL || "openai/gpt-4o-mini", ["litellm"]);
|
|
79
79
|
// Register OpenAI Compatible provider
|
|
80
|
-
ProviderFactory.registerProvider(AIProviderName.OPENAI_COMPATIBLE, async (modelName,
|
|
80
|
+
ProviderFactory.registerProvider(AIProviderName.OPENAI_COMPATIBLE, async (modelName, _providerName, sdk) => {
|
|
81
81
|
const { OpenAICompatibleProvider } = await import("../providers/openaiCompatible.js");
|
|
82
82
|
return new OpenAICompatibleProvider(modelName, sdk);
|
|
83
83
|
}, process.env.OPENAI_COMPATIBLE_MODEL || undefined, // Enable auto-discovery when no model specified
|
|
84
84
|
["openai-compatible", "openrouter", "vllm", "compatible"]);
|
|
85
85
|
// Register Amazon SageMaker provider
|
|
86
|
-
ProviderFactory.registerProvider(AIProviderName.SAGEMAKER, async (modelName,
|
|
86
|
+
ProviderFactory.registerProvider(AIProviderName.SAGEMAKER, async (modelName, _providerName, _sdk) => {
|
|
87
87
|
const { AmazonSageMakerProvider } = await import("../providers/amazonSagemaker.js");
|
|
88
88
|
return new AmazonSageMakerProvider(modelName);
|
|
89
89
|
}, process.env.SAGEMAKER_MODEL || "sagemaker-model", ["sagemaker", "aws-sagemaker"]);
|
|
@@ -5,6 +5,7 @@
|
|
|
5
5
|
import { createMCPServer } from "../../factory.js";
|
|
6
6
|
import { directAgentTools } from "../../../agent/directTools.js";
|
|
7
7
|
import { logger } from "../../../utils/logger.js";
|
|
8
|
+
import { shouldDisableBuiltinTools } from "../../../utils/toolUtils.js";
|
|
8
9
|
/**
|
|
9
10
|
* Direct Tools Server - Agent direct tools for immediate use
|
|
10
11
|
*/
|
|
@@ -17,58 +18,77 @@ export const directToolsServer = createMCPServer({
|
|
|
17
18
|
});
|
|
18
19
|
/**
|
|
19
20
|
* Wrap each direct tool and register it with the server
|
|
21
|
+
* Only register if built-in tools are not disabled
|
|
20
22
|
*/
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
toolSpec &&
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
23
|
+
if (!shouldDisableBuiltinTools()) {
|
|
24
|
+
Object.entries(directAgentTools).forEach(([toolName, toolDef]) => {
|
|
25
|
+
// The toolDef is a Vercel AI SDK Tool object
|
|
26
|
+
// Extract properties from the Tool object
|
|
27
|
+
const toolSpec = toolDef._spec || toolDef;
|
|
28
|
+
const description = typeof toolSpec === "object" &&
|
|
29
|
+
toolSpec &&
|
|
30
|
+
"description" in toolSpec &&
|
|
31
|
+
typeof toolSpec.description === "string"
|
|
32
|
+
? toolSpec.description
|
|
33
|
+
: `Direct tool: ${toolName}`;
|
|
34
|
+
const inputSchema = typeof toolSpec === "object" && toolSpec && "parameters" in toolSpec
|
|
35
|
+
? toolSpec.parameters
|
|
36
|
+
: undefined;
|
|
37
|
+
const execute = typeof toolSpec === "object" && toolSpec && "execute" in toolSpec
|
|
38
|
+
? toolSpec.execute
|
|
39
|
+
: undefined;
|
|
40
|
+
directToolsServer.registerTool({
|
|
41
|
+
name: toolName,
|
|
42
|
+
description: description,
|
|
43
|
+
category: getToolCategory(toolName),
|
|
44
|
+
inputSchema: inputSchema,
|
|
45
|
+
isImplemented: true,
|
|
46
|
+
execute: async (params, context) => {
|
|
47
|
+
const startTime = Date.now();
|
|
48
|
+
try {
|
|
49
|
+
logger.debug(`[Direct Tools] Executing ${toolName} with params:`, params);
|
|
50
|
+
// Execute the direct tool
|
|
51
|
+
if (!execute || typeof execute !== "function") {
|
|
52
|
+
throw new Error(`Tool ${toolName} has no execute function`);
|
|
53
|
+
}
|
|
54
|
+
const result = await execute(params);
|
|
55
|
+
// Convert direct tool result to ToolResult format
|
|
56
|
+
if (result?.success) {
|
|
57
|
+
return {
|
|
58
|
+
success: true,
|
|
59
|
+
data: result.data || result,
|
|
60
|
+
usage: {
|
|
61
|
+
executionTime: Date.now() - startTime,
|
|
62
|
+
},
|
|
63
|
+
metadata: {
|
|
64
|
+
toolName,
|
|
65
|
+
serverId: "neurolink-direct",
|
|
66
|
+
sessionId: context.sessionId,
|
|
67
|
+
},
|
|
68
|
+
};
|
|
69
|
+
}
|
|
70
|
+
else {
|
|
71
|
+
return {
|
|
72
|
+
success: false,
|
|
73
|
+
data: null,
|
|
74
|
+
error: String(result?.error) || "Unknown error",
|
|
75
|
+
usage: {
|
|
76
|
+
executionTime: Date.now() - startTime,
|
|
77
|
+
},
|
|
78
|
+
metadata: {
|
|
79
|
+
toolName,
|
|
80
|
+
serverId: "neurolink-direct",
|
|
81
|
+
sessionId: context.sessionId,
|
|
82
|
+
},
|
|
83
|
+
};
|
|
84
|
+
}
|
|
50
85
|
}
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
if (result?.success) {
|
|
54
|
-
return {
|
|
55
|
-
success: true,
|
|
56
|
-
data: result.data || result,
|
|
57
|
-
usage: {
|
|
58
|
-
executionTime: Date.now() - startTime,
|
|
59
|
-
},
|
|
60
|
-
metadata: {
|
|
61
|
-
toolName,
|
|
62
|
-
serverId: "neurolink-direct",
|
|
63
|
-
sessionId: context.sessionId,
|
|
64
|
-
},
|
|
65
|
-
};
|
|
66
|
-
}
|
|
67
|
-
else {
|
|
86
|
+
catch (error) {
|
|
87
|
+
logger.error(`[Direct Tools] Error executing ${toolName}:`, error);
|
|
68
88
|
return {
|
|
69
89
|
success: false,
|
|
70
90
|
data: null,
|
|
71
|
-
error:
|
|
91
|
+
error: error instanceof Error ? error.message : String(error),
|
|
72
92
|
usage: {
|
|
73
93
|
executionTime: Date.now() - startTime,
|
|
74
94
|
},
|
|
@@ -79,26 +99,13 @@ Object.entries(directAgentTools).forEach(([toolName, toolDef]) => {
|
|
|
79
99
|
},
|
|
80
100
|
};
|
|
81
101
|
}
|
|
82
|
-
}
|
|
83
|
-
|
|
84
|
-
logger.error(`[Direct Tools] Error executing ${toolName}:`, error);
|
|
85
|
-
return {
|
|
86
|
-
success: false,
|
|
87
|
-
data: null,
|
|
88
|
-
error: error instanceof Error ? error.message : String(error),
|
|
89
|
-
usage: {
|
|
90
|
-
executionTime: Date.now() - startTime,
|
|
91
|
-
},
|
|
92
|
-
metadata: {
|
|
93
|
-
toolName,
|
|
94
|
-
serverId: "neurolink-direct",
|
|
95
|
-
sessionId: context.sessionId,
|
|
96
|
-
},
|
|
97
|
-
};
|
|
98
|
-
}
|
|
99
|
-
},
|
|
102
|
+
},
|
|
103
|
+
});
|
|
100
104
|
});
|
|
101
|
-
}
|
|
105
|
+
}
|
|
106
|
+
else {
|
|
107
|
+
logger.info("[Direct Tools] Built-in tools disabled via configuration");
|
|
108
|
+
}
|
|
102
109
|
/**
|
|
103
110
|
* Get tool category based on tool name
|
|
104
111
|
*/
|
|
@@ -117,5 +124,10 @@ function getToolCategory(toolName) {
|
|
|
117
124
|
return "utility";
|
|
118
125
|
}
|
|
119
126
|
}
|
|
120
|
-
// Log successful registration
|
|
121
|
-
|
|
127
|
+
// Log successful registration or disable status
|
|
128
|
+
if (!shouldDisableBuiltinTools()) {
|
|
129
|
+
logger.info(`[Direct Tools] Registered ${Object.keys(directAgentTools).length} direct tools`);
|
|
130
|
+
}
|
|
131
|
+
else {
|
|
132
|
+
logger.info("[Direct Tools] 0 direct tools registered (disabled via environment variable)");
|
|
133
|
+
}
|
|
@@ -5,6 +5,7 @@
|
|
|
5
5
|
import { MCPRegistry } from "./registry.js";
|
|
6
6
|
import { registryLogger } from "../utils/logger.js";
|
|
7
7
|
import { randomUUID } from "crypto";
|
|
8
|
+
import { shouldDisableBuiltinTools } from "../utils/toolUtils.js";
|
|
8
9
|
import { directAgentTools } from "../agent/directTools.js";
|
|
9
10
|
export class MCPToolRegistry extends MCPRegistry {
|
|
10
11
|
tools = new Map();
|
|
@@ -12,8 +13,13 @@ export class MCPToolRegistry extends MCPRegistry {
|
|
|
12
13
|
toolExecutionStats = new Map();
|
|
13
14
|
constructor() {
|
|
14
15
|
super();
|
|
15
|
-
//
|
|
16
|
-
|
|
16
|
+
// 🔧 CONDITIONAL: Only auto-register direct tools if not disabled via configuration
|
|
17
|
+
if (!shouldDisableBuiltinTools()) {
|
|
18
|
+
this.registerDirectTools();
|
|
19
|
+
}
|
|
20
|
+
else {
|
|
21
|
+
registryLogger.debug("Built-in direct tools disabled via configuration");
|
|
22
|
+
}
|
|
17
23
|
}
|
|
18
24
|
/**
|
|
19
25
|
* Register all direct tools from directAgentTools
|
package/dist/lib/neurolink.js
CHANGED
|
@@ -337,6 +337,11 @@ export class NeuroLink {
|
|
|
337
337
|
// Create provider and generate
|
|
338
338
|
const provider = await AIProviderFactory.createProvider(providerName, options.model, !options.disableTools, // Pass disableTools as inverse of enableMCP
|
|
339
339
|
this);
|
|
340
|
+
// Enable tool execution for the provider using BaseProvider method
|
|
341
|
+
provider.setupToolExecutor({
|
|
342
|
+
customTools: this.customTools,
|
|
343
|
+
executeTool: this.executeTool.bind(this),
|
|
344
|
+
}, functionTag);
|
|
340
345
|
const result = await provider.generate({
|
|
341
346
|
...options,
|
|
342
347
|
systemPrompt: enhancedSystemPrompt,
|
|
@@ -410,6 +415,11 @@ export class NeuroLink {
|
|
|
410
415
|
logger.debug(`[${functionTag}] Attempting provider: ${providerName}`);
|
|
411
416
|
const provider = await AIProviderFactory.createProvider(providerName, options.model, !options.disableTools, // Pass disableTools as inverse of enableMCP
|
|
412
417
|
this);
|
|
418
|
+
// Enable tool execution for direct provider generation using BaseProvider method
|
|
419
|
+
provider.setupToolExecutor({
|
|
420
|
+
customTools: this.customTools,
|
|
421
|
+
executeTool: this.executeTool.bind(this),
|
|
422
|
+
}, functionTag);
|
|
413
423
|
const result = await provider.generate(options);
|
|
414
424
|
const responseTime = Date.now() - startTime;
|
|
415
425
|
if (!result) {
|
|
@@ -599,6 +609,11 @@ export class NeuroLink {
|
|
|
599
609
|
});
|
|
600
610
|
// Create provider using the same factory pattern as generate
|
|
601
611
|
const provider = await AIProviderFactory.createBestProvider(providerName, options.model, true, this);
|
|
612
|
+
// Enable tool execution for streaming using BaseProvider method
|
|
613
|
+
provider.setupToolExecutor({
|
|
614
|
+
customTools: this.customTools,
|
|
615
|
+
executeTool: this.executeTool.bind(this),
|
|
616
|
+
}, functionTag);
|
|
602
617
|
// Create clean options for provider (remove factoryConfig)
|
|
603
618
|
const cleanOptions = createCleanStreamOptions(enhancedOptions);
|
|
604
619
|
// Call the provider's stream method with clean options
|
|
@@ -650,6 +665,11 @@ export class NeuroLink {
|
|
|
650
665
|
// Use factory to create provider without MCP
|
|
651
666
|
const provider = await AIProviderFactory.createBestProvider(providerName, options.model, false, // Disable MCP for fallback
|
|
652
667
|
this);
|
|
668
|
+
// Enable tool execution for fallback streaming using BaseProvider method
|
|
669
|
+
provider.setupToolExecutor({
|
|
670
|
+
customTools: this.customTools,
|
|
671
|
+
executeTool: this.executeTool.bind(this),
|
|
672
|
+
}, functionTag);
|
|
653
673
|
// Create clean options for fallback provider (remove factoryConfig)
|
|
654
674
|
const cleanOptions = createCleanStreamOptions(enhancedOptions);
|
|
655
675
|
const streamResult = await provider.stream(cleanOptions);
|
|
@@ -27,6 +27,5 @@ export declare class AmazonBedrockProvider extends BaseProvider {
|
|
|
27
27
|
protected getAISDKModel(): LanguageModelV1;
|
|
28
28
|
protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
|
|
29
29
|
protected handleProviderError(error: unknown): Error;
|
|
30
|
-
private validateStreamOptions;
|
|
31
30
|
}
|
|
32
31
|
export default AmazonBedrockProvider;
|
|
@@ -116,18 +116,5 @@ export class AmazonBedrockProvider extends BaseProvider {
|
|
|
116
116
|
}
|
|
117
117
|
return new Error(`❌ Amazon Bedrock Provider Error\n\n${errorMessage || "Unknown error occurred"}\n\n🔧 Troubleshooting:\n1. Check AWS credentials and permissions\n2. Verify model availability\n3. Check network connectivity`);
|
|
118
118
|
}
|
|
119
|
-
validateStreamOptions(options) {
|
|
120
|
-
if (!options.input?.text?.trim()) {
|
|
121
|
-
throw new Error("Prompt is required for streaming");
|
|
122
|
-
}
|
|
123
|
-
if (options.maxTokens &&
|
|
124
|
-
(options.maxTokens < 1 || options.maxTokens > 4096)) {
|
|
125
|
-
throw new Error("maxTokens must be between 1 and 4096 for Amazon Bedrock");
|
|
126
|
-
}
|
|
127
|
-
if (options.temperature &&
|
|
128
|
-
(options.temperature < 0 || options.temperature > 1)) {
|
|
129
|
-
throw new Error("temperature must be between 0 and 1");
|
|
130
|
-
}
|
|
131
|
-
}
|
|
132
119
|
}
|
|
133
120
|
export default AmazonBedrockProvider;
|
|
@@ -3,7 +3,7 @@ import { streamText } from "ai";
|
|
|
3
3
|
import { BaseProvider } from "../core/baseProvider.js";
|
|
4
4
|
import { logger } from "../utils/logger.js";
|
|
5
5
|
import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
|
|
6
|
-
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
6
|
+
import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
|
|
7
7
|
import { validateApiKey, createAnthropicConfig, getProviderModel, } from "../utils/providerConfig.js";
|
|
8
8
|
// Configuration helpers - now using consolidated utility
|
|
9
9
|
const getAnthropicApiKey = () => {
|
|
@@ -87,52 +87,35 @@ export class AnthropicProvider extends BaseProvider {
|
|
|
87
87
|
}
|
|
88
88
|
// executeGenerate removed - BaseProvider handles all generation with tools
|
|
89
89
|
async executeStream(options, analysisSchema) {
|
|
90
|
-
|
|
91
|
-
const validationOptions = {
|
|
92
|
-
prompt: options.input.text,
|
|
93
|
-
systemPrompt: options.systemPrompt,
|
|
94
|
-
temperature: options.temperature,
|
|
95
|
-
maxTokens: options.maxTokens,
|
|
96
|
-
};
|
|
97
|
-
this.validateOptions(validationOptions);
|
|
90
|
+
this.validateStreamOptions(options);
|
|
98
91
|
const timeout = this.getTimeout(options);
|
|
99
92
|
const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
|
|
100
93
|
try {
|
|
101
94
|
// ✅ Get tools for streaming (same as generate method)
|
|
102
95
|
const shouldUseTools = !options.disableTools && this.supportsTools();
|
|
103
96
|
const tools = shouldUseTools ? await this.getAllTools() : {};
|
|
104
|
-
// 🔧 CRITICAL FIX: Vercel AI SDK streamText() hangs with tools and maxSteps > 1
|
|
105
|
-
// For stream-focused SDK, we need reliable streaming, so avoid the hanging case
|
|
106
|
-
if (shouldUseTools && Object.keys(tools).length > 0) {
|
|
107
|
-
throw new Error("Vercel AI SDK streamText() limitation with tools - falling back to synthetic streaming");
|
|
108
|
-
}
|
|
109
97
|
const result = await streamText({
|
|
110
98
|
model: this.model,
|
|
111
99
|
prompt: options.input.text,
|
|
112
100
|
system: options.systemPrompt || undefined,
|
|
113
101
|
temperature: options.temperature,
|
|
114
102
|
maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
|
|
115
|
-
tools
|
|
116
|
-
maxSteps:
|
|
117
|
-
toolChoice: "none",
|
|
103
|
+
tools,
|
|
104
|
+
maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
|
|
105
|
+
toolChoice: shouldUseTools ? "auto" : "none",
|
|
118
106
|
abortSignal: timeoutController?.controller.signal,
|
|
119
107
|
});
|
|
120
108
|
timeoutController?.cleanup();
|
|
121
|
-
|
|
122
|
-
const transformedStream = async function* () {
|
|
123
|
-
for await (const chunk of result.textStream) {
|
|
124
|
-
yield { content: chunk };
|
|
125
|
-
}
|
|
126
|
-
};
|
|
109
|
+
const transformedStream = this.createTextStream(result);
|
|
127
110
|
// ✅ Note: Vercel AI SDK's streamText() method limitations with tools
|
|
128
111
|
// The streamText() function doesn't provide the same tool result access as generateText()
|
|
129
|
-
//
|
|
112
|
+
// Full tool support is now available with real streaming
|
|
130
113
|
const toolCalls = [];
|
|
131
114
|
const toolResults = [];
|
|
132
115
|
const usage = await result.usage;
|
|
133
116
|
const finishReason = await result.finishReason;
|
|
134
117
|
return {
|
|
135
|
-
stream: transformedStream
|
|
118
|
+
stream: transformedStream,
|
|
136
119
|
provider: this.providerName,
|
|
137
120
|
model: this.modelName,
|
|
138
121
|
toolCalls, // ✅ Include tool calls in stream result
|
|
@@ -18,6 +18,5 @@ export declare class GoogleAIStudioProvider extends BaseProvider {
|
|
|
18
18
|
protected handleProviderError(error: unknown): Error;
|
|
19
19
|
protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
|
|
20
20
|
private getApiKey;
|
|
21
|
-
private validateStreamOptions;
|
|
22
21
|
}
|
|
23
22
|
export default GoogleAIStudioProvider;
|
|
@@ -4,7 +4,7 @@ import { GoogleAIModels } from "../core/types.js";
|
|
|
4
4
|
import { BaseProvider } from "../core/baseProvider.js";
|
|
5
5
|
import { logger } from "../utils/logger.js";
|
|
6
6
|
import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
|
|
7
|
-
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
7
|
+
import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
|
|
8
8
|
import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
|
|
9
9
|
// Environment variable setup
|
|
10
10
|
if (!process.env.GOOGLE_GENERATIVE_AI_API_KEY &&
|
|
@@ -73,30 +73,30 @@ export class GoogleAIStudioProvider extends BaseProvider {
|
|
|
73
73
|
const timeout = this.getTimeout(options);
|
|
74
74
|
const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
|
|
75
75
|
try {
|
|
76
|
+
// Get tools consistently with generate method
|
|
77
|
+
const shouldUseTools = !options.disableTools && this.supportsTools();
|
|
78
|
+
const tools = shouldUseTools ? await this.getAllTools() : {};
|
|
76
79
|
const result = await streamText({
|
|
77
80
|
model,
|
|
78
81
|
prompt: options.input.text,
|
|
79
82
|
system: options.systemPrompt,
|
|
80
83
|
temperature: options.temperature,
|
|
81
84
|
maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
|
|
82
|
-
tools
|
|
83
|
-
|
|
85
|
+
tools,
|
|
86
|
+
maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
|
|
87
|
+
toolChoice: shouldUseTools ? "auto" : "none",
|
|
84
88
|
abortSignal: timeoutController?.controller.signal,
|
|
85
89
|
});
|
|
86
90
|
timeoutController?.cleanup();
|
|
87
|
-
// Transform string stream to content object stream
|
|
88
|
-
const transformedStream =
|
|
89
|
-
for await (const chunk of result.textStream) {
|
|
90
|
-
yield { content: chunk };
|
|
91
|
-
}
|
|
92
|
-
};
|
|
91
|
+
// Transform string stream to content object stream using BaseProvider method
|
|
92
|
+
const transformedStream = this.createTextStream(result);
|
|
93
93
|
// Create analytics promise that resolves after stream completion
|
|
94
94
|
const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
|
|
95
95
|
requestId: `google-ai-stream-${Date.now()}`,
|
|
96
96
|
streamingMode: true,
|
|
97
97
|
});
|
|
98
98
|
return {
|
|
99
|
-
stream: transformedStream
|
|
99
|
+
stream: transformedStream,
|
|
100
100
|
provider: this.providerName,
|
|
101
101
|
model: this.modelName,
|
|
102
102
|
analytics: analyticsPromise,
|
|
@@ -121,10 +121,5 @@ export class GoogleAIStudioProvider extends BaseProvider {
|
|
|
121
121
|
}
|
|
122
122
|
return apiKey;
|
|
123
123
|
}
|
|
124
|
-
validateStreamOptions(options) {
|
|
125
|
-
if (!options.input?.text || options.input.text.trim().length === 0) {
|
|
126
|
-
throw new Error("Input text is required and cannot be empty");
|
|
127
|
-
}
|
|
128
|
-
}
|
|
129
124
|
}
|
|
130
125
|
export default GoogleAIStudioProvider;
|
|
@@ -40,7 +40,6 @@ export declare class GoogleVertexProvider extends BaseProvider {
|
|
|
40
40
|
private getModel;
|
|
41
41
|
protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
|
|
42
42
|
protected handleProviderError(error: unknown): Error;
|
|
43
|
-
private validateStreamOptions;
|
|
44
43
|
/**
|
|
45
44
|
* Memory-safe cache management for model configurations
|
|
46
45
|
* Implements LRU eviction to prevent memory leaks in long-running processes
|
|
@@ -2,8 +2,8 @@ import { createVertex, } from "@ai-sdk/google-vertex";
|
|
|
2
2
|
import { streamText, Output, } from "ai";
|
|
3
3
|
import { BaseProvider } from "../core/baseProvider.js";
|
|
4
4
|
import { logger } from "../utils/logger.js";
|
|
5
|
-
import { TimeoutError } from "../utils/timeout.js";
|
|
6
|
-
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
5
|
+
import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
|
|
6
|
+
import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
|
|
7
7
|
import { ModelConfigurationManager } from "../core/modelConfiguration.js";
|
|
8
8
|
import { validateApiKey, createVertexProjectConfig, createGoogleAuthConfig, } from "../utils/providerConfig.js";
|
|
9
9
|
// Cache for anthropic module to avoid repeated imports
|
|
@@ -184,6 +184,9 @@ export class GoogleVertexProvider extends BaseProvider {
|
|
|
184
184
|
async executeStream(options, analysisSchema) {
|
|
185
185
|
const functionTag = "GoogleVertexProvider.executeStream";
|
|
186
186
|
let chunkCount = 0;
|
|
187
|
+
// Add timeout controller for consistency with other providers
|
|
188
|
+
const timeout = this.getTimeout(options);
|
|
189
|
+
const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
|
|
187
190
|
try {
|
|
188
191
|
this.validateStreamOptions(options);
|
|
189
192
|
logger.debug(`${functionTag}: Starting stream request`, {
|
|
@@ -200,6 +203,9 @@ export class GoogleVertexProvider extends BaseProvider {
|
|
|
200
203
|
const maxTokens = shouldSetMaxTokens
|
|
201
204
|
? options.maxTokens || DEFAULT_MAX_TOKENS
|
|
202
205
|
: undefined;
|
|
206
|
+
// Get tools consistently with generate method (using BaseProvider pattern)
|
|
207
|
+
const shouldUseTools = !options.disableTools && this.supportsTools();
|
|
208
|
+
const tools = shouldUseTools ? await this.getAllTools() : {};
|
|
203
209
|
// Build complete stream options with proper typing
|
|
204
210
|
let streamOptions = {
|
|
205
211
|
model: model,
|
|
@@ -207,6 +213,10 @@ export class GoogleVertexProvider extends BaseProvider {
|
|
|
207
213
|
system: options.systemPrompt,
|
|
208
214
|
temperature: options.temperature,
|
|
209
215
|
...(maxTokens && { maxTokens }),
|
|
216
|
+
tools,
|
|
217
|
+
maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
|
|
218
|
+
toolChoice: shouldUseTools ? "auto" : "none",
|
|
219
|
+
abortSignal: timeoutController?.controller.signal,
|
|
210
220
|
onError: (event) => {
|
|
211
221
|
const error = event.error;
|
|
212
222
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
@@ -243,17 +253,17 @@ export class GoogleVertexProvider extends BaseProvider {
|
|
|
243
253
|
}
|
|
244
254
|
}
|
|
245
255
|
const result = streamText(streamOptions);
|
|
256
|
+
timeoutController?.cleanup();
|
|
257
|
+
// Transform string stream to content object stream using BaseProvider method
|
|
258
|
+
const transformedStream = this.createTextStream(result);
|
|
246
259
|
return {
|
|
247
|
-
stream:
|
|
248
|
-
for await (const chunk of result.textStream) {
|
|
249
|
-
yield { content: chunk };
|
|
250
|
-
}
|
|
251
|
-
})(),
|
|
260
|
+
stream: transformedStream,
|
|
252
261
|
provider: this.providerName,
|
|
253
262
|
model: this.modelName,
|
|
254
263
|
};
|
|
255
264
|
}
|
|
256
265
|
catch (error) {
|
|
266
|
+
timeoutController?.cleanup();
|
|
257
267
|
logger.error(`${functionTag}: Exception`, {
|
|
258
268
|
provider: this.providerName,
|
|
259
269
|
modelName: this.modelName,
|
|
@@ -287,23 +297,6 @@ export class GoogleVertexProvider extends BaseProvider {
|
|
|
287
297
|
}
|
|
288
298
|
return new Error(`❌ Google Vertex AI Provider Error\n\n${message}\n\nTroubleshooting:\n1. Check Google Cloud credentials and permissions\n2. Verify project ID and location settings\n3. Ensure Vertex AI API is enabled\n4. Check network connectivity`);
|
|
289
299
|
}
|
|
290
|
-
validateStreamOptions(options) {
|
|
291
|
-
if (!options.input?.text?.trim()) {
|
|
292
|
-
throw new Error("Prompt is required for streaming");
|
|
293
|
-
}
|
|
294
|
-
// Use cached model configuration for validation performance
|
|
295
|
-
const modelName = this.modelName || getDefaultVertexModel();
|
|
296
|
-
const shouldValidateMaxTokens = this.shouldSetMaxTokensCached(modelName);
|
|
297
|
-
if (shouldValidateMaxTokens &&
|
|
298
|
-
options.maxTokens &&
|
|
299
|
-
(options.maxTokens < 1 || options.maxTokens > 8192)) {
|
|
300
|
-
throw new Error("maxTokens must be between 1 and 8192 for Google Vertex AI");
|
|
301
|
-
}
|
|
302
|
-
if (options.temperature &&
|
|
303
|
-
(options.temperature < 0 || options.temperature > 2)) {
|
|
304
|
-
throw new Error("temperature must be between 0 and 2");
|
|
305
|
-
}
|
|
306
|
-
}
|
|
307
300
|
/**
|
|
308
301
|
* Memory-safe cache management for model configurations
|
|
309
302
|
* Implements LRU eviction to prevent memory leaks in long-running processes
|
|
@@ -272,14 +272,6 @@ Available tools will be provided in the function calling format. Use them when t
|
|
|
272
272
|
getAISDKModel() {
|
|
273
273
|
return this.model;
|
|
274
274
|
}
|
|
275
|
-
// ===================
|
|
276
|
-
// PRIVATE VALIDATION METHODS
|
|
277
|
-
// ===================
|
|
278
|
-
validateStreamOptions(options) {
|
|
279
|
-
if (!options.input?.text || options.input.text.trim().length === 0) {
|
|
280
|
-
throw new Error("Input text is required and cannot be empty");
|
|
281
|
-
}
|
|
282
|
-
}
|
|
283
275
|
}
|
|
284
276
|
// Export for factory registration
|
|
285
277
|
export default HuggingFaceProvider;
|
|
@@ -264,12 +264,4 @@ export class LiteLLMProvider extends BaseProvider {
|
|
|
264
264
|
throw error;
|
|
265
265
|
}
|
|
266
266
|
}
|
|
267
|
-
// ===================
|
|
268
|
-
// PRIVATE VALIDATION METHODS
|
|
269
|
-
// ===================
|
|
270
|
-
validateStreamOptions(options) {
|
|
271
|
-
if (!options.input?.text || options.input.text.trim().length === 0) {
|
|
272
|
-
throw new Error("Input text is required and cannot be empty");
|
|
273
|
-
}
|
|
274
|
-
}
|
|
275
267
|
}
|