@juspay/neurolink 5.0.0 → 5.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +20 -7
- package/README.md +160 -172
- package/dist/agent/direct-tools.d.ts +6 -6
- package/dist/chat/sse-handler.js +5 -4
- package/dist/chat/websocket-chat-handler.js +9 -9
- package/dist/cli/commands/config.d.ts +3 -3
- package/dist/cli/commands/mcp.js +9 -8
- package/dist/cli/commands/ollama.js +3 -3
- package/dist/cli/factories/command-factory.d.ts +18 -0
- package/dist/cli/factories/command-factory.js +183 -0
- package/dist/cli/index.js +105 -157
- package/dist/cli/utils/interactive-setup.js +2 -2
- package/dist/core/base-provider.d.ts +423 -0
- package/dist/core/base-provider.js +365 -0
- package/dist/core/constants.d.ts +1 -1
- package/dist/core/constants.js +1 -1
- package/dist/core/dynamic-models.d.ts +6 -6
- package/dist/core/evaluation.d.ts +19 -80
- package/dist/core/evaluation.js +185 -484
- package/dist/core/factory.d.ts +3 -3
- package/dist/core/factory.js +31 -91
- package/dist/core/service-registry.d.ts +47 -0
- package/dist/core/service-registry.js +112 -0
- package/dist/core/types.d.ts +49 -49
- package/dist/core/types.js +1 -0
- package/dist/factories/compatibility-factory.d.ts +20 -0
- package/dist/factories/compatibility-factory.js +69 -0
- package/dist/factories/provider-factory.d.ts +72 -0
- package/dist/factories/provider-factory.js +144 -0
- package/dist/factories/provider-generate-factory.d.ts +20 -0
- package/dist/factories/provider-generate-factory.js +87 -0
- package/dist/factories/provider-registry.d.ts +38 -0
- package/dist/factories/provider-registry.js +107 -0
- package/dist/index.d.ts +8 -5
- package/dist/index.js +5 -5
- package/dist/lib/agent/direct-tools.d.ts +6 -6
- package/dist/lib/chat/sse-handler.js +5 -4
- package/dist/lib/chat/websocket-chat-handler.js +9 -9
- package/dist/lib/core/base-provider.d.ts +423 -0
- package/dist/lib/core/base-provider.js +365 -0
- package/dist/lib/core/constants.d.ts +1 -1
- package/dist/lib/core/constants.js +1 -1
- package/dist/lib/core/dynamic-models.d.ts +6 -6
- package/dist/lib/core/evaluation.d.ts +19 -80
- package/dist/lib/core/evaluation.js +185 -484
- package/dist/lib/core/factory.d.ts +3 -3
- package/dist/lib/core/factory.js +30 -91
- package/dist/lib/core/service-registry.d.ts +47 -0
- package/dist/lib/core/service-registry.js +112 -0
- package/dist/lib/core/types.d.ts +49 -49
- package/dist/lib/core/types.js +1 -0
- package/dist/lib/factories/compatibility-factory.d.ts +20 -0
- package/dist/lib/factories/compatibility-factory.js +69 -0
- package/dist/lib/factories/provider-factory.d.ts +72 -0
- package/dist/lib/factories/provider-factory.js +144 -0
- package/dist/lib/factories/provider-generate-factory.d.ts +20 -0
- package/dist/lib/factories/provider-generate-factory.js +87 -0
- package/dist/lib/factories/provider-registry.d.ts +38 -0
- package/dist/lib/factories/provider-registry.js +107 -0
- package/dist/lib/index.d.ts +8 -5
- package/dist/lib/index.js +5 -5
- package/dist/lib/mcp/client.js +5 -5
- package/dist/lib/mcp/config.js +28 -3
- package/dist/lib/mcp/dynamic-orchestrator.js +8 -8
- package/dist/lib/mcp/external-client.js +2 -2
- package/dist/lib/mcp/factory.d.ts +1 -1
- package/dist/lib/mcp/factory.js +1 -1
- package/dist/lib/mcp/function-calling.js +1 -1
- package/dist/lib/mcp/initialize-tools.d.ts +1 -1
- package/dist/lib/mcp/initialize-tools.js +45 -1
- package/dist/lib/mcp/initialize.js +16 -6
- package/dist/lib/mcp/neurolink-mcp-client.js +10 -10
- package/dist/lib/mcp/orchestrator.js +4 -4
- package/dist/lib/mcp/servers/agent/direct-tools-server.d.ts +8 -0
- package/dist/lib/mcp/servers/agent/direct-tools-server.js +109 -0
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +8 -6
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
- package/dist/lib/mcp/unified-registry.d.ts +4 -0
- package/dist/lib/mcp/unified-registry.js +42 -9
- package/dist/lib/neurolink.d.ts +161 -174
- package/dist/lib/neurolink.js +723 -397
- package/dist/lib/providers/amazon-bedrock.d.ts +32 -0
- package/dist/lib/providers/amazon-bedrock.js +143 -0
- package/dist/lib/providers/analytics-helper.js +7 -4
- package/dist/lib/providers/anthropic-baseprovider.d.ts +23 -0
- package/dist/lib/providers/anthropic-baseprovider.js +114 -0
- package/dist/lib/providers/anthropic.d.ts +19 -39
- package/dist/lib/providers/anthropic.js +84 -378
- package/dist/lib/providers/azure-openai.d.ts +20 -0
- package/dist/lib/providers/azure-openai.js +89 -0
- package/dist/lib/providers/function-calling-provider.d.ts +14 -12
- package/dist/lib/providers/function-calling-provider.js +114 -64
- package/dist/lib/providers/google-ai-studio.d.ts +23 -0
- package/dist/lib/providers/google-ai-studio.js +107 -0
- package/dist/lib/providers/google-vertex.d.ts +47 -0
- package/dist/lib/providers/google-vertex.js +205 -0
- package/dist/lib/providers/huggingFace.d.ts +33 -27
- package/dist/lib/providers/huggingFace.js +103 -400
- package/dist/lib/providers/index.d.ts +9 -9
- package/dist/lib/providers/index.js +9 -9
- package/dist/lib/providers/mcp-provider.d.ts +13 -8
- package/dist/lib/providers/mcp-provider.js +63 -18
- package/dist/lib/providers/mistral.d.ts +42 -0
- package/dist/lib/providers/mistral.js +160 -0
- package/dist/lib/providers/ollama.d.ts +52 -35
- package/dist/lib/providers/ollama.js +297 -477
- package/dist/lib/providers/openAI.d.ts +21 -21
- package/dist/lib/providers/openAI.js +81 -245
- package/dist/lib/sdk/tool-extension.d.ts +181 -0
- package/dist/lib/sdk/tool-extension.js +283 -0
- package/dist/lib/sdk/tool-registration.d.ts +95 -0
- package/dist/lib/sdk/tool-registration.js +167 -0
- package/dist/lib/types/generate-types.d.ts +80 -0
- package/dist/lib/types/generate-types.js +1 -0
- package/dist/lib/types/mcp-types.d.ts +116 -0
- package/dist/lib/types/mcp-types.js +5 -0
- package/dist/lib/types/stream-types.d.ts +95 -0
- package/dist/lib/types/stream-types.js +1 -0
- package/dist/lib/types/universal-provider-options.d.ts +87 -0
- package/dist/lib/types/universal-provider-options.js +53 -0
- package/dist/lib/utils/providerUtils-fixed.js +1 -1
- package/dist/lib/utils/streaming-utils.d.ts +14 -2
- package/dist/lib/utils/streaming-utils.js +0 -3
- package/dist/mcp/client.js +5 -5
- package/dist/mcp/config.js +28 -3
- package/dist/mcp/dynamic-orchestrator.js +8 -8
- package/dist/mcp/external-client.js +2 -2
- package/dist/mcp/factory.d.ts +1 -1
- package/dist/mcp/factory.js +1 -1
- package/dist/mcp/function-calling.js +1 -1
- package/dist/mcp/initialize-tools.d.ts +1 -1
- package/dist/mcp/initialize-tools.js +45 -1
- package/dist/mcp/initialize.js +16 -6
- package/dist/mcp/neurolink-mcp-client.js +10 -10
- package/dist/mcp/orchestrator.js +4 -4
- package/dist/mcp/servers/agent/direct-tools-server.d.ts +8 -0
- package/dist/mcp/servers/agent/direct-tools-server.js +109 -0
- package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
- package/dist/mcp/servers/ai-providers/ai-core-server.js +8 -6
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
- package/dist/mcp/unified-registry.d.ts +4 -0
- package/dist/mcp/unified-registry.js +42 -9
- package/dist/neurolink.d.ts +161 -174
- package/dist/neurolink.js +723 -397
- package/dist/providers/amazon-bedrock.d.ts +32 -0
- package/dist/providers/amazon-bedrock.js +143 -0
- package/dist/providers/analytics-helper.js +7 -4
- package/dist/providers/anthropic-baseprovider.d.ts +23 -0
- package/dist/providers/anthropic-baseprovider.js +114 -0
- package/dist/providers/anthropic.d.ts +19 -39
- package/dist/providers/anthropic.js +83 -377
- package/dist/providers/azure-openai.d.ts +20 -0
- package/dist/providers/azure-openai.js +89 -0
- package/dist/providers/function-calling-provider.d.ts +14 -12
- package/dist/providers/function-calling-provider.js +114 -64
- package/dist/providers/google-ai-studio.d.ts +23 -0
- package/dist/providers/google-ai-studio.js +108 -0
- package/dist/providers/google-vertex.d.ts +47 -0
- package/dist/providers/google-vertex.js +205 -0
- package/dist/providers/huggingFace.d.ts +33 -27
- package/dist/providers/huggingFace.js +102 -399
- package/dist/providers/index.d.ts +9 -9
- package/dist/providers/index.js +9 -9
- package/dist/providers/mcp-provider.d.ts +13 -8
- package/dist/providers/mcp-provider.js +63 -18
- package/dist/providers/mistral.d.ts +42 -0
- package/dist/providers/mistral.js +160 -0
- package/dist/providers/ollama.d.ts +52 -35
- package/dist/providers/ollama.js +297 -476
- package/dist/providers/openAI.d.ts +21 -21
- package/dist/providers/openAI.js +81 -246
- package/dist/sdk/tool-extension.d.ts +181 -0
- package/dist/sdk/tool-extension.js +283 -0
- package/dist/sdk/tool-registration.d.ts +95 -0
- package/dist/sdk/tool-registration.js +168 -0
- package/dist/types/generate-types.d.ts +80 -0
- package/dist/types/generate-types.js +1 -0
- package/dist/types/mcp-types.d.ts +116 -0
- package/dist/types/mcp-types.js +5 -0
- package/dist/types/stream-types.d.ts +95 -0
- package/dist/types/stream-types.js +1 -0
- package/dist/types/universal-provider-options.d.ts +87 -0
- package/dist/types/universal-provider-options.js +53 -0
- package/dist/utils/providerUtils-fixed.js +1 -1
- package/dist/utils/streaming-utils.d.ts +14 -2
- package/dist/utils/streaming-utils.js +0 -3
- package/package.json +15 -10
- package/dist/lib/providers/agent-enhanced-provider.d.ts +0 -89
- package/dist/lib/providers/agent-enhanced-provider.js +0 -614
- package/dist/lib/providers/amazonBedrock.d.ts +0 -19
- package/dist/lib/providers/amazonBedrock.js +0 -334
- package/dist/lib/providers/azureOpenAI.d.ts +0 -39
- package/dist/lib/providers/azureOpenAI.js +0 -436
- package/dist/lib/providers/googleAIStudio.d.ts +0 -49
- package/dist/lib/providers/googleAIStudio.js +0 -333
- package/dist/lib/providers/googleVertexAI.d.ts +0 -38
- package/dist/lib/providers/googleVertexAI.js +0 -519
- package/dist/lib/providers/mistralAI.d.ts +0 -34
- package/dist/lib/providers/mistralAI.js +0 -294
- package/dist/providers/agent-enhanced-provider.d.ts +0 -89
- package/dist/providers/agent-enhanced-provider.js +0 -614
- package/dist/providers/amazonBedrock.d.ts +0 -19
- package/dist/providers/amazonBedrock.js +0 -334
- package/dist/providers/azureOpenAI.d.ts +0 -39
- package/dist/providers/azureOpenAI.js +0 -437
- package/dist/providers/googleAIStudio.d.ts +0 -49
- package/dist/providers/googleAIStudio.js +0 -333
- package/dist/providers/googleVertexAI.d.ts +0 -38
- package/dist/providers/googleVertexAI.js +0 -519
- package/dist/providers/mistralAI.d.ts +0 -34
- package/dist/providers/mistralAI.js +0 -294
|
@@ -1,405 +1,111 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
* Direct integration with Anthropic's Claude models via their native API.
|
|
5
|
-
* Supports Claude 3.5 Sonnet, Claude 3.5 Haiku, and Claude 3 Opus.
|
|
6
|
-
*/
|
|
7
|
-
import { AIProviderName } from "../core/types.js";
|
|
1
|
+
import { anthropic } from "@ai-sdk/anthropic";
|
|
2
|
+
import { streamText, Output } from "ai";
|
|
3
|
+
import { BaseProvider } from "../core/base-provider.js";
|
|
8
4
|
import { logger } from "../utils/logger.js";
|
|
9
5
|
import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
|
|
10
6
|
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
7
|
+
// Configuration helpers
|
|
8
|
+
const getAnthropicApiKey = () => {
|
|
9
|
+
const apiKey = process.env.ANTHROPIC_API_KEY;
|
|
10
|
+
if (!apiKey) {
|
|
11
|
+
throw new Error(`❌ Anthropic Provider Configuration Error\n\nMissing required environment variable: ANTHROPIC_API_KEY\n\n🔧 Step 1: Get Anthropic API Key\n1. Visit: https://console.anthropic.com/\n2. Sign in or create an account\n3. Go to API Keys section\n4. Create a new API key\n\n🔧 Step 2: Set Environment Variable\nAdd to your .env file:\nANTHROPIC_API_KEY=your_api_key_here\n\n🔧 Step 3: Restart Application\nRestart your application to load the new environment variables.`);
|
|
12
|
+
}
|
|
13
|
+
return apiKey;
|
|
14
|
+
};
|
|
15
|
+
const getDefaultAnthropicModel = () => {
|
|
16
|
+
return process.env.ANTHROPIC_MODEL || "claude-3-5-sonnet-20241022";
|
|
17
|
+
};
|
|
18
|
+
/**
|
|
19
|
+
* Anthropic Provider v2 - BaseProvider Implementation
|
|
20
|
+
* Fixed syntax and enhanced with proper error handling
|
|
21
|
+
*/
|
|
22
|
+
export class AnthropicProvider extends BaseProvider {
|
|
23
|
+
model;
|
|
24
|
+
constructor(modelName, sdk) {
|
|
25
|
+
super(modelName, "anthropic", sdk);
|
|
26
|
+
// Initialize Anthropic model with API key validation
|
|
27
|
+
const apiKey = getAnthropicApiKey();
|
|
28
|
+
this.model = anthropic(this.modelName || getDefaultAnthropicModel());
|
|
29
|
+
logger.debug("Anthropic Provider v2 initialized", {
|
|
30
|
+
modelName: this.modelName,
|
|
31
|
+
provider: this.providerName,
|
|
32
|
+
});
|
|
26
33
|
}
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
if (!apiKey) {
|
|
30
|
-
throw new Error("ANTHROPIC_API_KEY environment variable is required");
|
|
31
|
-
}
|
|
32
|
-
return apiKey;
|
|
34
|
+
getProviderName() {
|
|
35
|
+
return "anthropic";
|
|
33
36
|
}
|
|
34
|
-
|
|
35
|
-
return
|
|
37
|
+
getDefaultModel() {
|
|
38
|
+
return getDefaultAnthropicModel();
|
|
36
39
|
}
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
"anthropic-version": "2023-06-01",
|
|
43
|
-
"anthropic-dangerous-direct-browser-access": "true", // Required for browser usage
|
|
44
|
-
};
|
|
45
|
-
logger.debug(`[AnthropicProvider.makeRequest] ${stream ? "Streaming" : "Non-streaming"} request to ${url}`);
|
|
46
|
-
logger.debug(`[AnthropicProvider.makeRequest] Model: ${body.model}, Max tokens: ${body.max_tokens}`);
|
|
47
|
-
const proxyFetch = createProxyFetch();
|
|
48
|
-
const response = await proxyFetch(url, {
|
|
49
|
-
method: "POST",
|
|
50
|
-
headers,
|
|
51
|
-
body: JSON.stringify(body),
|
|
52
|
-
signal, // Add abort signal for timeout support
|
|
53
|
-
});
|
|
54
|
-
if (!response.ok) {
|
|
55
|
-
const errorText = await response.text();
|
|
56
|
-
logger.error(`[AnthropicProvider.makeRequest] API error ${response.status}: ${errorText}`);
|
|
57
|
-
throw new Error(`Anthropic API error ${response.status}: ${errorText}`);
|
|
58
|
-
}
|
|
59
|
-
return response;
|
|
60
|
-
}
|
|
61
|
-
async generateText(optionsOrPrompt, schema) {
|
|
62
|
-
const functionTag = "AnthropicProvider.generateText";
|
|
63
|
-
const provider = "anthropic";
|
|
64
|
-
const startTime = Date.now();
|
|
65
|
-
logger.debug(`[${functionTag}] Starting text generation`);
|
|
66
|
-
// Parse parameters with backward compatibility
|
|
67
|
-
const options = typeof optionsOrPrompt === "string"
|
|
68
|
-
? { prompt: optionsOrPrompt }
|
|
69
|
-
: optionsOrPrompt;
|
|
70
|
-
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = "You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.", timeout = getDefaultTimeout(provider, "generate"), enableAnalytics = false, enableEvaluation = false, context, } = options;
|
|
71
|
-
logger.debug(`[${functionTag}] Prompt: "${prompt.substring(0, 100)}...", Temperature: ${temperature}, Max tokens: ${maxTokens}, Timeout: ${timeout}`);
|
|
72
|
-
const requestBody = {
|
|
73
|
-
model: this.getModel(),
|
|
74
|
-
max_tokens: maxTokens,
|
|
75
|
-
messages: [
|
|
76
|
-
{
|
|
77
|
-
role: "user",
|
|
78
|
-
content: prompt,
|
|
79
|
-
},
|
|
80
|
-
],
|
|
81
|
-
temperature,
|
|
82
|
-
system: systemPrompt,
|
|
83
|
-
};
|
|
84
|
-
// Create timeout controller if timeout is specified
|
|
85
|
-
const timeoutController = createTimeoutController(timeout, provider, "generate");
|
|
86
|
-
try {
|
|
87
|
-
const response = await this.makeRequest("messages", requestBody, false, timeoutController?.controller.signal);
|
|
88
|
-
const data = await response.json();
|
|
89
|
-
// Clean up timeout if successful
|
|
90
|
-
timeoutController?.cleanup();
|
|
91
|
-
logger.debug(`[${functionTag}] Success. Generated ${data.usage.output_tokens} tokens`);
|
|
92
|
-
const content = data.content.map((block) => block.text).join("");
|
|
93
|
-
const result = {
|
|
94
|
-
content,
|
|
95
|
-
provider: this.name,
|
|
96
|
-
model: data.model,
|
|
97
|
-
usage: {
|
|
98
|
-
promptTokens: data.usage.input_tokens,
|
|
99
|
-
completionTokens: data.usage.output_tokens,
|
|
100
|
-
totalTokens: data.usage.input_tokens + data.usage.output_tokens,
|
|
101
|
-
},
|
|
102
|
-
finishReason: data.stop_reason,
|
|
103
|
-
};
|
|
104
|
-
// Add analytics if enabled
|
|
105
|
-
if (options.enableAnalytics) {
|
|
106
|
-
result.analytics = createAnalytics(provider, this.defaultModel, result, Date.now() - startTime, options.context);
|
|
107
|
-
}
|
|
108
|
-
// Add evaluation if enabled
|
|
109
|
-
if (options.enableEvaluation) {
|
|
110
|
-
result.evaluation = await evaluateResponse(prompt, result.content, options.context);
|
|
111
|
-
}
|
|
112
|
-
return result;
|
|
113
|
-
}
|
|
114
|
-
catch (error) {
|
|
115
|
-
// Always cleanup timeout
|
|
116
|
-
timeoutController?.cleanup();
|
|
117
|
-
// Log timeout errors specifically
|
|
118
|
-
if (error instanceof TimeoutError) {
|
|
119
|
-
logger.error(`[${functionTag}] Timeout error`, {
|
|
120
|
-
provider,
|
|
121
|
-
timeout: error.timeout,
|
|
122
|
-
message: error.message,
|
|
123
|
-
});
|
|
124
|
-
}
|
|
125
|
-
else if (error?.name === "AbortError") {
|
|
126
|
-
// Convert AbortError to TimeoutError
|
|
127
|
-
const timeoutError = new TimeoutError(`${provider} generate operation timed out after ${timeout}`, timeoutController?.timeoutMs || 0, provider, "generate");
|
|
128
|
-
logger.error(`[${functionTag}] Timeout error`, {
|
|
129
|
-
provider,
|
|
130
|
-
timeout: timeoutController?.timeoutMs,
|
|
131
|
-
message: timeoutError.message,
|
|
132
|
-
});
|
|
133
|
-
throw timeoutError;
|
|
134
|
-
}
|
|
135
|
-
else {
|
|
136
|
-
logger.error(`[${functionTag}] Error:`, error);
|
|
137
|
-
}
|
|
138
|
-
throw error;
|
|
139
|
-
}
|
|
40
|
+
/**
|
|
41
|
+
* Returns the Vercel AI SDK model instance for Anthropic
|
|
42
|
+
*/
|
|
43
|
+
getAISDKModel() {
|
|
44
|
+
return this.model;
|
|
140
45
|
}
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
logger.debug(`[${functionTag}] Starting text streaming`);
|
|
145
|
-
// Parse parameters with backward compatibility
|
|
146
|
-
const options = typeof optionsOrPrompt === "string"
|
|
147
|
-
? { prompt: optionsOrPrompt }
|
|
148
|
-
: optionsOrPrompt;
|
|
149
|
-
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = "You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.", timeout = getDefaultTimeout(provider, "stream"), } = options;
|
|
150
|
-
logger.debug(`[${functionTag}] Streaming prompt: "${prompt.substring(0, 100)}...", Timeout: ${timeout}`);
|
|
151
|
-
const requestBody = {
|
|
152
|
-
model: this.getModel(),
|
|
153
|
-
max_tokens: maxTokens,
|
|
154
|
-
messages: [
|
|
155
|
-
{
|
|
156
|
-
role: "user",
|
|
157
|
-
content: prompt,
|
|
158
|
-
},
|
|
159
|
-
],
|
|
160
|
-
temperature,
|
|
161
|
-
system: systemPrompt,
|
|
162
|
-
stream: true,
|
|
163
|
-
};
|
|
164
|
-
// Create timeout controller if timeout is specified
|
|
165
|
-
const timeoutController = createTimeoutController(timeout, provider, "stream");
|
|
166
|
-
try {
|
|
167
|
-
const response = await this.makeRequest("messages", requestBody, true, timeoutController?.controller.signal);
|
|
168
|
-
if (!response.body) {
|
|
169
|
-
throw new Error("No response body received");
|
|
170
|
-
}
|
|
171
|
-
// Return a StreamTextResult-like object with timeout signal
|
|
172
|
-
return {
|
|
173
|
-
textStream: this.createAsyncIterable(response.body, timeoutController?.controller.signal),
|
|
174
|
-
text: "",
|
|
175
|
-
usage: { promptTokens: 0, completionTokens: 0, totalTokens: 0 },
|
|
176
|
-
finishReason: "end_turn",
|
|
177
|
-
// Store timeout controller for external cleanup if needed
|
|
178
|
-
_timeoutController: timeoutController,
|
|
179
|
-
};
|
|
46
|
+
handleProviderError(error) {
|
|
47
|
+
if (error instanceof TimeoutError) {
|
|
48
|
+
return new Error(`Anthropic request timed out: ${error.message}`);
|
|
180
49
|
}
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
// Log timeout errors specifically
|
|
185
|
-
if (error instanceof TimeoutError) {
|
|
186
|
-
logger.error(`[${functionTag}] Timeout error`, {
|
|
187
|
-
provider,
|
|
188
|
-
timeout: error.timeout,
|
|
189
|
-
message: error.message,
|
|
190
|
-
});
|
|
191
|
-
}
|
|
192
|
-
else if (error?.name === "AbortError") {
|
|
193
|
-
// Convert AbortError to TimeoutError
|
|
194
|
-
const timeoutError = new TimeoutError(`${provider} stream operation timed out after ${timeout}`, timeoutController?.timeoutMs || 0, provider, "stream");
|
|
195
|
-
logger.error(`[${functionTag}] Timeout error`, {
|
|
196
|
-
provider,
|
|
197
|
-
timeout: timeoutController?.timeoutMs,
|
|
198
|
-
message: timeoutError.message,
|
|
199
|
-
});
|
|
200
|
-
throw timeoutError;
|
|
201
|
-
}
|
|
202
|
-
else {
|
|
203
|
-
logger.error(`[${functionTag}] Error:`, error);
|
|
204
|
-
}
|
|
205
|
-
throw error;
|
|
206
|
-
}
|
|
207
|
-
}
|
|
208
|
-
async *createAsyncIterable(body, signal) {
|
|
209
|
-
const reader = body.getReader();
|
|
210
|
-
const decoder = new TextDecoder();
|
|
211
|
-
let buffer = "";
|
|
212
|
-
try {
|
|
213
|
-
while (true) {
|
|
214
|
-
// Check if aborted
|
|
215
|
-
if (signal?.aborted) {
|
|
216
|
-
throw new Error("AbortError");
|
|
217
|
-
}
|
|
218
|
-
const { done, value } = await reader.read();
|
|
219
|
-
if (done) {
|
|
220
|
-
break;
|
|
221
|
-
}
|
|
222
|
-
buffer += decoder.decode(value, { stream: true });
|
|
223
|
-
const lines = buffer.split("\n");
|
|
224
|
-
buffer = lines.pop() || "";
|
|
225
|
-
for (const line of lines) {
|
|
226
|
-
if (line.trim() === "") {
|
|
227
|
-
continue;
|
|
228
|
-
}
|
|
229
|
-
if (line.startsWith("data: ")) {
|
|
230
|
-
const data = line.slice(6);
|
|
231
|
-
if (data.trim() === "[DONE]") {
|
|
232
|
-
continue;
|
|
233
|
-
}
|
|
234
|
-
try {
|
|
235
|
-
const chunk = JSON.parse(data);
|
|
236
|
-
// Extract text content from different chunk types
|
|
237
|
-
if (chunk.type === "content_block_delta" && chunk.delta?.text) {
|
|
238
|
-
yield chunk.delta.text;
|
|
239
|
-
}
|
|
240
|
-
}
|
|
241
|
-
catch (parseError) {
|
|
242
|
-
logger.warn("[AnthropicProvider.createAsyncIterable] Failed to parse chunk:", parseError);
|
|
243
|
-
continue;
|
|
244
|
-
}
|
|
245
|
-
}
|
|
246
|
-
}
|
|
247
|
-
}
|
|
50
|
+
if (error?.message?.includes("API_KEY_INVALID") ||
|
|
51
|
+
error?.message?.includes("Invalid API key")) {
|
|
52
|
+
return new Error("Invalid Anthropic API key. Please check your ANTHROPIC_API_KEY environment variable.");
|
|
248
53
|
}
|
|
249
|
-
|
|
250
|
-
|
|
54
|
+
if (error?.message?.includes("rate limit")) {
|
|
55
|
+
return new Error("Anthropic rate limit exceeded. Please try again later.");
|
|
251
56
|
}
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
:
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
model: this.getModel(),
|
|
263
|
-
max_tokens: maxTokens,
|
|
264
|
-
messages: [
|
|
265
|
-
{
|
|
266
|
-
role: "user",
|
|
267
|
-
content: prompt,
|
|
268
|
-
},
|
|
269
|
-
],
|
|
270
|
-
temperature,
|
|
271
|
-
system: systemPrompt,
|
|
272
|
-
stream: true,
|
|
57
|
+
return new Error(`Anthropic error: ${error?.message || "Unknown error"}`);
|
|
58
|
+
}
|
|
59
|
+
// executeGenerate removed - BaseProvider handles all generation with tools
|
|
60
|
+
async executeStream(options, analysisSchema) {
|
|
61
|
+
// Convert StreamOptions to TextGenerationOptions for validation
|
|
62
|
+
const validationOptions = {
|
|
63
|
+
prompt: options.input.text,
|
|
64
|
+
systemPrompt: options.systemPrompt,
|
|
65
|
+
temperature: options.temperature,
|
|
66
|
+
maxTokens: options.maxTokens,
|
|
273
67
|
};
|
|
68
|
+
this.validateOptions(validationOptions);
|
|
69
|
+
const timeout = this.getTimeout(options);
|
|
70
|
+
const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
|
|
274
71
|
try {
|
|
275
|
-
const
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
try {
|
|
283
|
-
while (true) {
|
|
284
|
-
const { done, value } = await reader.read();
|
|
285
|
-
if (done) {
|
|
286
|
-
break;
|
|
287
|
-
}
|
|
288
|
-
buffer += decoder.decode(value, { stream: true });
|
|
289
|
-
const lines = buffer.split("\n");
|
|
290
|
-
buffer = lines.pop() || "";
|
|
291
|
-
for (const line of lines) {
|
|
292
|
-
if (line.trim() === "") {
|
|
293
|
-
continue;
|
|
294
|
-
}
|
|
295
|
-
if (line.startsWith("data: ")) {
|
|
296
|
-
const data = line.slice(6);
|
|
297
|
-
if (data.trim() === "[DONE]") {
|
|
298
|
-
continue;
|
|
299
|
-
}
|
|
300
|
-
try {
|
|
301
|
-
const chunk = JSON.parse(data);
|
|
302
|
-
// Extract text content from different chunk types
|
|
303
|
-
if (chunk.type === "content_block_delta" && chunk.delta?.text) {
|
|
304
|
-
yield {
|
|
305
|
-
content: chunk.delta.text,
|
|
306
|
-
provider: this.name,
|
|
307
|
-
model: this.getModel(),
|
|
308
|
-
};
|
|
309
|
-
}
|
|
310
|
-
}
|
|
311
|
-
catch (parseError) {
|
|
312
|
-
logger.warn("[AnthropicProvider.generateTextStream] Failed to parse chunk:", parseError);
|
|
313
|
-
continue;
|
|
314
|
-
}
|
|
315
|
-
}
|
|
316
|
-
}
|
|
317
|
-
}
|
|
318
|
-
}
|
|
319
|
-
finally {
|
|
320
|
-
reader.releaseLock();
|
|
321
|
-
}
|
|
322
|
-
logger.debug("[AnthropicProvider.generateTextStream] Streaming completed");
|
|
323
|
-
}
|
|
324
|
-
catch (error) {
|
|
325
|
-
logger.error("[AnthropicProvider.generateTextStream] Error:", error);
|
|
326
|
-
throw error;
|
|
327
|
-
}
|
|
328
|
-
}
|
|
329
|
-
async testConnection() {
|
|
330
|
-
logger.debug("[AnthropicProvider.testConnection] Testing connection to Anthropic API");
|
|
331
|
-
const startTime = Date.now();
|
|
332
|
-
try {
|
|
333
|
-
await this.generateText({
|
|
334
|
-
prompt: "Hello",
|
|
335
|
-
maxTokens: 5,
|
|
72
|
+
const result = await streamText({
|
|
73
|
+
model: this.model,
|
|
74
|
+
prompt: options.input.text,
|
|
75
|
+
system: options.systemPrompt || undefined,
|
|
76
|
+
temperature: options.temperature,
|
|
77
|
+
maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
|
|
78
|
+
abortSignal: timeoutController?.controller.signal,
|
|
336
79
|
});
|
|
337
|
-
|
|
338
|
-
|
|
80
|
+
timeoutController?.cleanup();
|
|
81
|
+
// Transform string stream to content object stream
|
|
82
|
+
const transformedStream = async function* () {
|
|
83
|
+
for await (const chunk of result.textStream) {
|
|
84
|
+
yield { content: chunk };
|
|
85
|
+
}
|
|
86
|
+
};
|
|
339
87
|
return {
|
|
340
|
-
|
|
341
|
-
|
|
88
|
+
stream: transformedStream(),
|
|
89
|
+
provider: this.providerName,
|
|
90
|
+
model: this.modelName,
|
|
342
91
|
};
|
|
343
92
|
}
|
|
344
93
|
catch (error) {
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
return {
|
|
348
|
-
success: false,
|
|
349
|
-
error: error instanceof Error ? error.message : "Unknown error",
|
|
350
|
-
responseTime,
|
|
351
|
-
};
|
|
94
|
+
timeoutController?.cleanup();
|
|
95
|
+
throw this.handleProviderError(error);
|
|
352
96
|
}
|
|
353
97
|
}
|
|
354
|
-
|
|
98
|
+
async isAvailable() {
|
|
355
99
|
try {
|
|
356
|
-
|
|
100
|
+
getAnthropicApiKey();
|
|
357
101
|
return true;
|
|
358
102
|
}
|
|
359
103
|
catch {
|
|
360
104
|
return false;
|
|
361
105
|
}
|
|
362
106
|
}
|
|
363
|
-
|
|
364
|
-
return
|
|
365
|
-
}
|
|
366
|
-
getOptionalConfig() {
|
|
367
|
-
return ["ANTHROPIC_MODEL", "ANTHROPIC_BASE_URL"];
|
|
368
|
-
}
|
|
369
|
-
getModels() {
|
|
370
|
-
return [
|
|
371
|
-
"claude-3-5-sonnet-20241022",
|
|
372
|
-
"claude-3-5-haiku-20241022",
|
|
373
|
-
"claude-3-opus-20240229",
|
|
374
|
-
"claude-3-sonnet-20240229",
|
|
375
|
-
"claude-3-haiku-20240307",
|
|
376
|
-
];
|
|
377
|
-
}
|
|
378
|
-
supportsStreaming() {
|
|
379
|
-
return true;
|
|
380
|
-
}
|
|
381
|
-
supportsSchema() {
|
|
382
|
-
return false; // Anthropic doesn't have native JSON schema support like OpenAI
|
|
383
|
-
}
|
|
384
|
-
getCapabilities() {
|
|
385
|
-
return [
|
|
386
|
-
"text-generation",
|
|
387
|
-
"streaming",
|
|
388
|
-
"conversation",
|
|
389
|
-
"system-prompts",
|
|
390
|
-
"long-context", // Claude models support up to 200k tokens
|
|
391
|
-
];
|
|
392
|
-
}
|
|
393
|
-
/**
|
|
394
|
-
* Alias for generateText() - CLI-SDK consistency
|
|
395
|
-
*/
|
|
396
|
-
async generate(optionsOrPrompt, analysisSchema) {
|
|
397
|
-
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
398
|
-
}
|
|
399
|
-
/**
|
|
400
|
-
* Short alias for generateText() - CLI-SDK consistency
|
|
401
|
-
*/
|
|
402
|
-
async gen(optionsOrPrompt, analysisSchema) {
|
|
403
|
-
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
107
|
+
getModel() {
|
|
108
|
+
return this.model;
|
|
404
109
|
}
|
|
405
110
|
}
|
|
111
|
+
export default AnthropicProvider;
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
import { BaseProvider } from "../core/base-provider.js";
|
|
2
|
+
import type { AIProviderName } from "../core/types.js";
|
|
3
|
+
import type { StreamOptions, StreamResult } from "../types/stream-types.js";
|
|
4
|
+
export declare class AzureOpenAIProvider extends BaseProvider {
|
|
5
|
+
private apiKey;
|
|
6
|
+
private resourceName;
|
|
7
|
+
private deployment;
|
|
8
|
+
private apiVersion;
|
|
9
|
+
private azureProvider;
|
|
10
|
+
constructor(modelName?: string);
|
|
11
|
+
protected getProviderName(): AIProviderName;
|
|
12
|
+
protected getDefaultModel(): string;
|
|
13
|
+
/**
|
|
14
|
+
* Returns the Vercel AI SDK model instance for Azure OpenAI
|
|
15
|
+
*/
|
|
16
|
+
protected getAISDKModel(): any;
|
|
17
|
+
protected handleProviderError(error: any): Error;
|
|
18
|
+
protected executeStream(options: StreamOptions, analysisSchema?: any): Promise<StreamResult>;
|
|
19
|
+
}
|
|
20
|
+
export default AzureOpenAIProvider;
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
import { createAzure } from "@ai-sdk/azure";
|
|
2
|
+
import { streamText } from "ai";
|
|
3
|
+
import { BaseProvider } from "../core/base-provider.js";
|
|
4
|
+
export class AzureOpenAIProvider extends BaseProvider {
|
|
5
|
+
apiKey;
|
|
6
|
+
resourceName;
|
|
7
|
+
deployment;
|
|
8
|
+
apiVersion;
|
|
9
|
+
azureProvider;
|
|
10
|
+
constructor(modelName) {
|
|
11
|
+
super(modelName, "azure");
|
|
12
|
+
this.apiKey = process.env.AZURE_OPENAI_API_KEY || "";
|
|
13
|
+
const endpoint = process.env.AZURE_OPENAI_ENDPOINT || "";
|
|
14
|
+
this.resourceName = endpoint
|
|
15
|
+
.replace("https://", "")
|
|
16
|
+
.replace(/\/+$/, "") // Remove trailing slashes
|
|
17
|
+
.replace(".openai.azure.com", "");
|
|
18
|
+
this.deployment =
|
|
19
|
+
modelName ||
|
|
20
|
+
process.env.AZURE_OPENAI_DEPLOYMENT ||
|
|
21
|
+
process.env.AZURE_OPENAI_DEPLOYMENT_ID ||
|
|
22
|
+
"gpt-4o";
|
|
23
|
+
this.apiVersion = process.env.AZURE_API_VERSION || "2024-10-01-preview";
|
|
24
|
+
if (!this.apiKey) {
|
|
25
|
+
throw new Error("AZURE_OPENAI_API_KEY environment variable is required");
|
|
26
|
+
}
|
|
27
|
+
if (!this.resourceName) {
|
|
28
|
+
throw new Error("AZURE_OPENAI_ENDPOINT environment variable is required");
|
|
29
|
+
}
|
|
30
|
+
// Create the Azure provider instance
|
|
31
|
+
this.azureProvider = createAzure({
|
|
32
|
+
resourceName: this.resourceName,
|
|
33
|
+
apiKey: this.apiKey,
|
|
34
|
+
apiVersion: this.apiVersion,
|
|
35
|
+
});
|
|
36
|
+
console.log("Azure Vercel Provider initialized", {
|
|
37
|
+
deployment: this.deployment,
|
|
38
|
+
resourceName: this.resourceName,
|
|
39
|
+
provider: "azure-vercel",
|
|
40
|
+
});
|
|
41
|
+
}
|
|
42
|
+
getProviderName() {
|
|
43
|
+
return "azure";
|
|
44
|
+
}
|
|
45
|
+
getDefaultModel() {
|
|
46
|
+
return this.deployment;
|
|
47
|
+
}
|
|
48
|
+
/**
|
|
49
|
+
* Returns the Vercel AI SDK model instance for Azure OpenAI
|
|
50
|
+
*/
|
|
51
|
+
getAISDKModel() {
|
|
52
|
+
return this.azureProvider(this.deployment);
|
|
53
|
+
}
|
|
54
|
+
handleProviderError(error) {
|
|
55
|
+
if (error?.message?.includes("401")) {
|
|
56
|
+
return new Error("Invalid Azure OpenAI API key or endpoint.");
|
|
57
|
+
}
|
|
58
|
+
return new Error(`Azure OpenAI error: ${error?.message || "Unknown error"}`);
|
|
59
|
+
}
|
|
60
|
+
// executeGenerate removed - BaseProvider handles all generation with tools
|
|
61
|
+
async executeStream(options, analysisSchema) {
|
|
62
|
+
try {
|
|
63
|
+
const stream = await streamText({
|
|
64
|
+
model: this.azureProvider(this.deployment),
|
|
65
|
+
prompt: options.input?.text || "",
|
|
66
|
+
maxTokens: options.maxTokens || 1000,
|
|
67
|
+
temperature: options.temperature || 0.7,
|
|
68
|
+
system: options.systemPrompt,
|
|
69
|
+
});
|
|
70
|
+
return {
|
|
71
|
+
stream: (async function* () {
|
|
72
|
+
for await (const chunk of stream.textStream) {
|
|
73
|
+
yield { content: chunk };
|
|
74
|
+
}
|
|
75
|
+
})(),
|
|
76
|
+
provider: "azure",
|
|
77
|
+
model: this.deployment,
|
|
78
|
+
metadata: {
|
|
79
|
+
streamId: `azure-${Date.now()}`,
|
|
80
|
+
startTime: Date.now(),
|
|
81
|
+
},
|
|
82
|
+
};
|
|
83
|
+
}
|
|
84
|
+
catch (error) {
|
|
85
|
+
throw this.handleProviderError(error);
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
export default AzureOpenAIProvider;
|
|
@@ -3,9 +3,11 @@
|
|
|
3
3
|
* Integrates MCP tools directly with AI SDK's function calling capabilities
|
|
4
4
|
* This is the missing piece that enables true AI function calling!
|
|
5
5
|
*/
|
|
6
|
-
import type { AIProvider, TextGenerationOptions,
|
|
7
|
-
import { type
|
|
6
|
+
import type { AIProvider, TextGenerationOptions, EnhancedGenerateResult } from "../core/types.js";
|
|
7
|
+
import { type Schema } from "ai";
|
|
8
|
+
import type { GenerateResult } from "../types/generate-types.js";
|
|
8
9
|
import type { ZodType, ZodTypeDef } from "zod";
|
|
10
|
+
import type { StreamOptions, StreamResult } from "../types/stream-types.js";
|
|
9
11
|
/**
|
|
10
12
|
* Enhanced provider that enables real function calling with MCP tools
|
|
11
13
|
*/
|
|
@@ -19,14 +21,19 @@ export declare class FunctionCallingProvider implements AIProvider {
|
|
|
19
21
|
sessionId?: string;
|
|
20
22
|
userId?: string;
|
|
21
23
|
});
|
|
24
|
+
/**
|
|
25
|
+
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
26
|
+
* Future-ready for multi-modal capabilities with current text focus
|
|
27
|
+
*/
|
|
28
|
+
stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: any): Promise<StreamResult>;
|
|
22
29
|
/**
|
|
23
30
|
* Generate text with real function calling support
|
|
24
31
|
*/
|
|
25
|
-
|
|
32
|
+
generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateResult>;
|
|
26
33
|
/**
|
|
27
34
|
* Generate text using AI SDK's native function calling
|
|
28
35
|
*/
|
|
29
|
-
private
|
|
36
|
+
private generateWithTools;
|
|
30
37
|
/**
|
|
31
38
|
* Get the model from the base provider
|
|
32
39
|
* This is a temporary solution - ideally we'd have a getModel() method on AIProvider
|
|
@@ -45,17 +52,12 @@ export declare class FunctionCallingProvider implements AIProvider {
|
|
|
45
52
|
*/
|
|
46
53
|
private createFunctionAwareSystemPrompt;
|
|
47
54
|
/**
|
|
48
|
-
*
|
|
49
|
-
*/
|
|
50
|
-
streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
|
|
51
|
-
/**
|
|
52
|
-
* Alias for generateText() - CLI-SDK consistency
|
|
55
|
+
* Alias for generate() - CLI-SDK consistency
|
|
53
56
|
*/
|
|
54
|
-
generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
|
|
55
57
|
/**
|
|
56
|
-
* Short alias for
|
|
58
|
+
* Short alias for generate() - CLI-SDK consistency
|
|
57
59
|
*/
|
|
58
|
-
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<
|
|
60
|
+
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateResult | null>;
|
|
59
61
|
}
|
|
60
62
|
/**
|
|
61
63
|
* Create a function-calling enhanced version of any AI provider
|