@juspay/neurolink 5.1.0 → 5.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (190) hide show
  1. package/CHANGELOG.md +21 -9
  2. package/README.md +123 -126
  3. package/dist/agent/direct-tools.d.ts +6 -6
  4. package/dist/cli/commands/config.d.ts +3 -3
  5. package/dist/cli/commands/mcp.js +8 -7
  6. package/dist/cli/factories/command-factory.d.ts +4 -0
  7. package/dist/cli/factories/command-factory.js +63 -8
  8. package/dist/cli/index.js +87 -140
  9. package/dist/core/base-provider.d.ts +423 -0
  10. package/dist/core/base-provider.js +376 -0
  11. package/dist/core/constants.d.ts +2 -1
  12. package/dist/core/constants.js +2 -1
  13. package/dist/core/dynamic-models.d.ts +6 -6
  14. package/dist/core/evaluation.d.ts +19 -80
  15. package/dist/core/evaluation.js +185 -484
  16. package/dist/core/factory.d.ts +3 -3
  17. package/dist/core/factory.js +31 -91
  18. package/dist/core/service-registry.d.ts +47 -0
  19. package/dist/core/service-registry.js +112 -0
  20. package/dist/core/types.d.ts +8 -1
  21. package/dist/factories/compatibility-factory.js +1 -1
  22. package/dist/factories/provider-factory.d.ts +72 -0
  23. package/dist/factories/provider-factory.js +144 -0
  24. package/dist/factories/provider-registry.d.ts +38 -0
  25. package/dist/factories/provider-registry.js +107 -0
  26. package/dist/index.d.ts +4 -3
  27. package/dist/index.js +2 -4
  28. package/dist/lib/agent/direct-tools.d.ts +6 -6
  29. package/dist/lib/core/base-provider.d.ts +423 -0
  30. package/dist/lib/core/base-provider.js +376 -0
  31. package/dist/lib/core/constants.d.ts +2 -1
  32. package/dist/lib/core/constants.js +2 -1
  33. package/dist/lib/core/dynamic-models.d.ts +6 -6
  34. package/dist/lib/core/evaluation.d.ts +19 -80
  35. package/dist/lib/core/evaluation.js +185 -484
  36. package/dist/lib/core/factory.d.ts +3 -3
  37. package/dist/lib/core/factory.js +30 -91
  38. package/dist/lib/core/service-registry.d.ts +47 -0
  39. package/dist/lib/core/service-registry.js +112 -0
  40. package/dist/lib/core/types.d.ts +8 -1
  41. package/dist/lib/factories/compatibility-factory.js +1 -1
  42. package/dist/lib/factories/provider-factory.d.ts +72 -0
  43. package/dist/lib/factories/provider-factory.js +144 -0
  44. package/dist/lib/factories/provider-registry.d.ts +38 -0
  45. package/dist/lib/factories/provider-registry.js +107 -0
  46. package/dist/lib/index.d.ts +4 -3
  47. package/dist/lib/index.js +2 -4
  48. package/dist/lib/mcp/client.d.ts +1 -0
  49. package/dist/lib/mcp/client.js +1 -0
  50. package/dist/lib/mcp/config.js +28 -3
  51. package/dist/lib/mcp/context-manager.d.ts +1 -0
  52. package/dist/lib/mcp/context-manager.js +8 -4
  53. package/dist/lib/mcp/function-calling.d.ts +13 -0
  54. package/dist/lib/mcp/function-calling.js +134 -35
  55. package/dist/lib/mcp/initialize-tools.d.ts +1 -1
  56. package/dist/lib/mcp/initialize-tools.js +45 -1
  57. package/dist/lib/mcp/initialize.js +16 -6
  58. package/dist/lib/mcp/neurolink-mcp-client.d.ts +1 -0
  59. package/dist/lib/mcp/neurolink-mcp-client.js +21 -5
  60. package/dist/lib/mcp/servers/agent/direct-tools-server.d.ts +8 -0
  61. package/dist/lib/mcp/servers/agent/direct-tools-server.js +109 -0
  62. package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +3 -1
  63. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  64. package/dist/lib/mcp/unified-registry.d.ts +4 -0
  65. package/dist/lib/mcp/unified-registry.js +42 -9
  66. package/dist/lib/neurolink.d.ts +156 -117
  67. package/dist/lib/neurolink.js +619 -404
  68. package/dist/lib/providers/amazon-bedrock.d.ts +32 -0
  69. package/dist/lib/providers/amazon-bedrock.js +143 -0
  70. package/dist/lib/providers/analytics-helper.js +7 -4
  71. package/dist/lib/providers/anthropic-baseprovider.d.ts +23 -0
  72. package/dist/lib/providers/anthropic-baseprovider.js +114 -0
  73. package/dist/lib/providers/anthropic.d.ts +19 -43
  74. package/dist/lib/providers/anthropic.js +82 -306
  75. package/dist/lib/providers/azure-openai.d.ts +20 -0
  76. package/dist/lib/providers/azure-openai.js +89 -0
  77. package/dist/lib/providers/function-calling-provider.d.ts +64 -2
  78. package/dist/lib/providers/function-calling-provider.js +208 -9
  79. package/dist/lib/providers/google-ai-studio.d.ts +23 -0
  80. package/dist/lib/providers/google-ai-studio.js +107 -0
  81. package/dist/lib/providers/google-vertex.d.ts +47 -0
  82. package/dist/lib/providers/google-vertex.js +205 -0
  83. package/dist/lib/providers/huggingFace.d.ts +32 -25
  84. package/dist/lib/providers/huggingFace.js +97 -431
  85. package/dist/lib/providers/index.d.ts +9 -9
  86. package/dist/lib/providers/index.js +9 -9
  87. package/dist/lib/providers/mcp-provider.js +24 -5
  88. package/dist/lib/providers/mistral.d.ts +42 -0
  89. package/dist/lib/providers/mistral.js +160 -0
  90. package/dist/lib/providers/ollama.d.ts +52 -36
  91. package/dist/lib/providers/ollama.js +297 -520
  92. package/dist/lib/providers/openAI.d.ts +19 -18
  93. package/dist/lib/providers/openAI.js +76 -275
  94. package/dist/lib/sdk/tool-extension.d.ts +181 -0
  95. package/dist/lib/sdk/tool-extension.js +283 -0
  96. package/dist/lib/sdk/tool-registration.d.ts +95 -0
  97. package/dist/lib/sdk/tool-registration.js +167 -0
  98. package/dist/lib/services/streaming/streaming-manager.js +11 -10
  99. package/dist/lib/services/websocket/websocket-server.js +12 -11
  100. package/dist/lib/telemetry/telemetry-service.js +8 -7
  101. package/dist/lib/types/generate-types.d.ts +1 -0
  102. package/dist/lib/types/mcp-types.d.ts +116 -0
  103. package/dist/lib/types/mcp-types.js +5 -0
  104. package/dist/lib/types/stream-types.d.ts +30 -18
  105. package/dist/lib/types/universal-provider-options.d.ts +87 -0
  106. package/dist/lib/types/universal-provider-options.js +53 -0
  107. package/dist/mcp/client.d.ts +1 -0
  108. package/dist/mcp/client.js +1 -0
  109. package/dist/mcp/config.js +28 -3
  110. package/dist/mcp/context-manager.d.ts +1 -0
  111. package/dist/mcp/context-manager.js +8 -4
  112. package/dist/mcp/function-calling.d.ts +13 -0
  113. package/dist/mcp/function-calling.js +134 -35
  114. package/dist/mcp/initialize-tools.d.ts +1 -1
  115. package/dist/mcp/initialize-tools.js +45 -1
  116. package/dist/mcp/initialize.js +16 -6
  117. package/dist/mcp/neurolink-mcp-client.d.ts +1 -0
  118. package/dist/mcp/neurolink-mcp-client.js +21 -5
  119. package/dist/mcp/servers/agent/direct-tools-server.d.ts +8 -0
  120. package/dist/mcp/servers/agent/direct-tools-server.js +109 -0
  121. package/dist/mcp/servers/ai-providers/ai-core-server.js +3 -1
  122. package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  123. package/dist/mcp/unified-registry.d.ts +4 -0
  124. package/dist/mcp/unified-registry.js +42 -9
  125. package/dist/neurolink.d.ts +156 -117
  126. package/dist/neurolink.js +619 -404
  127. package/dist/providers/amazon-bedrock.d.ts +32 -0
  128. package/dist/providers/amazon-bedrock.js +143 -0
  129. package/dist/providers/analytics-helper.js +7 -4
  130. package/dist/providers/anthropic-baseprovider.d.ts +23 -0
  131. package/dist/providers/anthropic-baseprovider.js +114 -0
  132. package/dist/providers/anthropic.d.ts +19 -43
  133. package/dist/providers/anthropic.js +81 -305
  134. package/dist/providers/azure-openai.d.ts +20 -0
  135. package/dist/providers/azure-openai.js +89 -0
  136. package/dist/providers/function-calling-provider.d.ts +64 -2
  137. package/dist/providers/function-calling-provider.js +208 -9
  138. package/dist/providers/google-ai-studio.d.ts +23 -0
  139. package/dist/providers/google-ai-studio.js +108 -0
  140. package/dist/providers/google-vertex.d.ts +47 -0
  141. package/dist/providers/google-vertex.js +205 -0
  142. package/dist/providers/huggingFace.d.ts +32 -25
  143. package/dist/providers/huggingFace.js +96 -430
  144. package/dist/providers/index.d.ts +9 -9
  145. package/dist/providers/index.js +9 -9
  146. package/dist/providers/mcp-provider.js +24 -5
  147. package/dist/providers/mistral.d.ts +42 -0
  148. package/dist/providers/mistral.js +160 -0
  149. package/dist/providers/ollama.d.ts +52 -36
  150. package/dist/providers/ollama.js +297 -519
  151. package/dist/providers/openAI.d.ts +19 -18
  152. package/dist/providers/openAI.js +76 -276
  153. package/dist/sdk/tool-extension.d.ts +181 -0
  154. package/dist/sdk/tool-extension.js +283 -0
  155. package/dist/sdk/tool-registration.d.ts +95 -0
  156. package/dist/sdk/tool-registration.js +168 -0
  157. package/dist/services/streaming/streaming-manager.js +11 -10
  158. package/dist/services/websocket/websocket-server.js +12 -11
  159. package/dist/telemetry/telemetry-service.js +8 -7
  160. package/dist/types/generate-types.d.ts +1 -0
  161. package/dist/types/mcp-types.d.ts +116 -0
  162. package/dist/types/mcp-types.js +5 -0
  163. package/dist/types/stream-types.d.ts +30 -18
  164. package/dist/types/universal-provider-options.d.ts +87 -0
  165. package/dist/types/universal-provider-options.js +53 -0
  166. package/package.json +12 -5
  167. package/dist/lib/providers/agent-enhanced-provider.d.ts +0 -93
  168. package/dist/lib/providers/agent-enhanced-provider.js +0 -605
  169. package/dist/lib/providers/amazonBedrock.d.ts +0 -28
  170. package/dist/lib/providers/amazonBedrock.js +0 -364
  171. package/dist/lib/providers/azureOpenAI.d.ts +0 -42
  172. package/dist/lib/providers/azureOpenAI.js +0 -347
  173. package/dist/lib/providers/googleAIStudio.d.ts +0 -42
  174. package/dist/lib/providers/googleAIStudio.js +0 -364
  175. package/dist/lib/providers/googleVertexAI.d.ts +0 -34
  176. package/dist/lib/providers/googleVertexAI.js +0 -547
  177. package/dist/lib/providers/mistralAI.d.ts +0 -37
  178. package/dist/lib/providers/mistralAI.js +0 -325
  179. package/dist/providers/agent-enhanced-provider.d.ts +0 -93
  180. package/dist/providers/agent-enhanced-provider.js +0 -606
  181. package/dist/providers/amazonBedrock.d.ts +0 -28
  182. package/dist/providers/amazonBedrock.js +0 -364
  183. package/dist/providers/azureOpenAI.d.ts +0 -42
  184. package/dist/providers/azureOpenAI.js +0 -348
  185. package/dist/providers/googleAIStudio.d.ts +0 -42
  186. package/dist/providers/googleAIStudio.js +0 -364
  187. package/dist/providers/googleVertexAI.d.ts +0 -34
  188. package/dist/providers/googleVertexAI.js +0 -547
  189. package/dist/providers/mistralAI.d.ts +0 -37
  190. package/dist/providers/mistralAI.js +0 -325
@@ -1,335 +1,111 @@
1
- /**
2
- * Anthropic AI Provider (Direct API)
3
- *
4
- * Direct integration with Anthropic's Claude models via their native API.
5
- * Supports Claude 3.5 Sonnet, Claude 3.5 Haiku, and Claude 3 Opus.
6
- */
7
- import { AIProviderName } from "../core/types.js";
1
+ import { anthropic } from "@ai-sdk/anthropic";
2
+ import { streamText, Output } from "ai";
3
+ import { BaseProvider } from "../core/base-provider.js";
8
4
  import { logger } from "../utils/logger.js";
9
5
  import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
10
6
  import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
11
- import { evaluateResponse } from "../core/evaluation.js";
12
- import { createAnalytics } from "../core/analytics.js";
13
- import { createProxyFetch } from "../proxy/proxy-fetch.js";
14
- export class AnthropicProvider {
15
- name = AIProviderName.ANTHROPIC;
16
- apiKey;
17
- baseURL;
18
- defaultModel;
19
- constructor() {
20
- this.apiKey = this.getApiKey();
21
- this.baseURL =
22
- process.env.ANTHROPIC_BASE_URL || "https://api.anthropic.com";
23
- this.defaultModel =
24
- process.env.ANTHROPIC_MODEL || "claude-3-5-sonnet-20241022";
25
- logger.debug(`[AnthropicProvider] Initialized with model: ${this.defaultModel}`);
7
+ // Configuration helpers
8
+ const getAnthropicApiKey = () => {
9
+ const apiKey = process.env.ANTHROPIC_API_KEY;
10
+ if (!apiKey) {
11
+ throw new Error(`❌ Anthropic Provider Configuration Error\n\nMissing required environment variable: ANTHROPIC_API_KEY\n\n🔧 Step 1: Get Anthropic API Key\n1. Visit: https://console.anthropic.com/\n2. Sign in or create an account\n3. Go to API Keys section\n4. Create a new API key\n\n🔧 Step 2: Set Environment Variable\nAdd to your .env file:\nANTHROPIC_API_KEY=your_api_key_here\n\n🔧 Step 3: Restart Application\nRestart your application to load the new environment variables.`);
12
+ }
13
+ return apiKey;
14
+ };
15
+ const getDefaultAnthropicModel = () => {
16
+ return process.env.ANTHROPIC_MODEL || "claude-3-5-sonnet-20241022";
17
+ };
18
+ /**
19
+ * Anthropic Provider v2 - BaseProvider Implementation
20
+ * Fixed syntax and enhanced with proper error handling
21
+ */
22
+ export class AnthropicProvider extends BaseProvider {
23
+ model;
24
+ constructor(modelName, sdk) {
25
+ super(modelName, "anthropic", sdk);
26
+ // Initialize Anthropic model with API key validation
27
+ const apiKey = getAnthropicApiKey();
28
+ this.model = anthropic(this.modelName || getDefaultAnthropicModel());
29
+ logger.debug("Anthropic Provider v2 initialized", {
30
+ modelName: this.modelName,
31
+ provider: this.providerName,
32
+ });
26
33
  }
27
- getApiKey() {
28
- const apiKey = process.env.ANTHROPIC_API_KEY;
29
- if (!apiKey) {
30
- throw new Error("ANTHROPIC_API_KEY environment variable is required");
31
- }
32
- return apiKey;
34
+ getProviderName() {
35
+ return "anthropic";
33
36
  }
34
- getModel() {
35
- return this.defaultModel;
36
- }
37
- async makeRequest(endpoint, body, stream = false, signal) {
38
- const url = `${this.baseURL}/v1/${endpoint}`;
39
- const headers = {
40
- "Content-Type": "application/json",
41
- "x-api-key": this.apiKey,
42
- "anthropic-version": "2023-06-01",
43
- "anthropic-dangerous-direct-browser-access": "true", // Required for browser usage
44
- };
45
- logger.debug(`[AnthropicProvider.makeRequest] ${stream ? "Streaming" : "Non-streaming"} request to ${url}`);
46
- logger.debug(`[AnthropicProvider.makeRequest] Model: ${body.model}, Max tokens: ${body.max_tokens}`);
47
- const proxyFetch = createProxyFetch();
48
- const response = await proxyFetch(url, {
49
- method: "POST",
50
- headers,
51
- body: JSON.stringify(body),
52
- signal, // Add abort signal for timeout support
53
- });
54
- if (!response.ok) {
55
- const errorText = await response.text();
56
- logger.error(`[AnthropicProvider.makeRequest] API error ${response.status}: ${errorText}`);
57
- throw new Error(`Anthropic API error ${response.status}: ${errorText}`);
58
- }
59
- return response;
37
+ getDefaultModel() {
38
+ return getDefaultAnthropicModel();
60
39
  }
61
40
  /**
62
- * PRIMARY METHOD: Stream content using AI (recommended for new code)
63
- * Future-ready for multi-modal capabilities with current text focus
41
+ * Returns the Vercel AI SDK model instance for Anthropic
64
42
  */
65
- async stream(optionsOrPrompt, analysisSchema) {
66
- const functionTag = "AnthropicProvider.stream";
67
- const provider = "anthropic";
68
- const startTime = Date.now();
69
- logger.debug(`[${functionTag}] Starting content streaming`);
70
- // Parse parameters - support both string and options object
71
- const options = typeof optionsOrPrompt === "string"
72
- ? { input: { text: optionsOrPrompt } }
73
- : optionsOrPrompt;
74
- // Validate input
75
- if (!options?.input?.text ||
76
- typeof options.input.text !== "string" ||
77
- options.input.text.trim() === "") {
78
- throw new Error("Stream options must include input.text as a non-empty string");
43
+ getAISDKModel() {
44
+ return this.model;
45
+ }
46
+ handleProviderError(error) {
47
+ if (error instanceof TimeoutError) {
48
+ return new Error(`Anthropic request timed out: ${error.message}`);
79
49
  }
80
- // Extract parameters
81
- const { prompt = options.input.text, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = "You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.", timeout = getDefaultTimeout(provider, "stream"), } = options;
82
- logger.debug(`[${functionTag}] Streaming prompt: "${prompt.substring(0, 100)}...", Timeout: ${timeout}`);
83
- // Create timeout controller if timeout is specified
84
- const timeoutController = createTimeoutController(timeout, provider, "stream");
85
- try {
86
- const body = {
87
- model: this.getModel(),
88
- max_tokens: maxTokens,
89
- messages: [
90
- ...(systemPrompt
91
- ? [{ role: "assistant", content: systemPrompt }]
92
- : []),
93
- { role: "user", content: prompt },
94
- ],
95
- temperature,
96
- stream: true,
97
- };
98
- const response = await this.makeRequest("messages", body, true, timeoutController?.controller.signal);
99
- const streamIterable = this.createAsyncIterable(response.body, timeoutController?.controller.signal);
100
- // Clean up timeout controller
101
- timeoutController?.cleanup();
102
- logger.debug(`[${functionTag}] Stream initialized successfully`);
103
- // Convert to StreamResult format
104
- return {
105
- stream: (async function* () {
106
- for await (const chunk of streamIterable) {
107
- yield { content: chunk };
108
- }
109
- })(),
110
- provider: "anthropic",
111
- model: this.getModel(),
112
- metadata: {
113
- streamId: `anthropic-${Date.now()}`,
114
- startTime,
115
- },
116
- };
50
+ if (error?.message?.includes("API_KEY_INVALID") ||
51
+ error?.message?.includes("Invalid API key")) {
52
+ return new Error("Invalid Anthropic API key. Please check your ANTHROPIC_API_KEY environment variable.");
117
53
  }
118
- catch (error) {
119
- // Always cleanup timeout on error
120
- timeoutController?.cleanup();
121
- if (error.name === "AbortError" || error.message.includes("timeout")) {
122
- const timeoutError = new TimeoutError(`${provider} stream operation timed out after ${timeout}`, timeoutController?.timeoutMs || 0, provider, "stream");
123
- logger.error(`[${functionTag}] Timeout error`, {
124
- provider,
125
- timeout: timeoutController?.timeoutMs,
126
- message: timeoutError.message,
127
- });
128
- throw timeoutError;
129
- }
130
- else {
131
- logger.error(`[${functionTag}] Error:`, error);
132
- }
133
- throw error;
54
+ if (error?.message?.includes("rate limit")) {
55
+ return new Error("Anthropic rate limit exceeded. Please try again later.");
134
56
  }
135
- }
136
- async generate(optionsOrPrompt, schema) {
137
- const functionTag = "AnthropicProvider.generate";
138
- const provider = "anthropic";
139
- const startTime = Date.now();
140
- logger.debug(`[${functionTag}] Starting text generation`);
141
- // Parse parameters with backward compatibility
142
- const options = typeof optionsOrPrompt === "string"
143
- ? { prompt: optionsOrPrompt }
144
- : optionsOrPrompt;
145
- const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = "You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.", timeout = getDefaultTimeout(provider, "generate"), enableAnalytics = false, enableEvaluation = false, context, } = options;
146
- logger.debug(`[${functionTag}] Prompt: "${prompt.substring(0, 100)}...", Temperature: ${temperature}, Max tokens: ${maxTokens}, Timeout: ${timeout}`);
147
- const requestBody = {
148
- model: this.getModel(),
149
- max_tokens: maxTokens,
150
- messages: [
151
- {
152
- role: "user",
153
- content: prompt,
154
- },
155
- ],
156
- temperature,
157
- system: systemPrompt,
57
+ return new Error(`Anthropic error: ${error?.message || "Unknown error"}`);
58
+ }
59
+ // executeGenerate removed - BaseProvider handles all generation with tools
60
+ async executeStream(options, analysisSchema) {
61
+ // Convert StreamOptions to TextGenerationOptions for validation
62
+ const validationOptions = {
63
+ prompt: options.input.text,
64
+ systemPrompt: options.systemPrompt,
65
+ temperature: options.temperature,
66
+ maxTokens: options.maxTokens,
158
67
  };
159
- // Create timeout controller if timeout is specified
160
- const timeoutController = createTimeoutController(timeout, provider, "generate");
68
+ this.validateOptions(validationOptions);
69
+ const timeout = this.getTimeout(options);
70
+ const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
161
71
  try {
162
- const response = await this.makeRequest("messages", requestBody, false, timeoutController?.controller.signal);
163
- const data = await response.json();
164
- // Clean up timeout if successful
165
- timeoutController?.cleanup();
166
- logger.debug(`[${functionTag}] Success. Generated ${data.usage.output_tokens} tokens`);
167
- const content = data.content.map((block) => block.text).join("");
168
- const result = {
169
- content,
170
- provider: this.name,
171
- model: data.model,
172
- usage: {
173
- promptTokens: data.usage.input_tokens,
174
- completionTokens: data.usage.output_tokens,
175
- totalTokens: data.usage.input_tokens + data.usage.output_tokens,
176
- },
177
- finishReason: data.stop_reason,
178
- };
179
- // Add analytics if enabled
180
- if (options.enableAnalytics) {
181
- result.analytics = createAnalytics(provider, this.defaultModel, result, Date.now() - startTime, options.context);
182
- }
183
- // Add evaluation if enabled
184
- if (options.enableEvaluation) {
185
- result.evaluation = await evaluateResponse(prompt, result.content, options.context);
186
- }
187
- return result;
188
- }
189
- catch (error) {
190
- // Always cleanup timeout
72
+ const result = await streamText({
73
+ model: this.model,
74
+ prompt: options.input.text,
75
+ system: options.systemPrompt || undefined,
76
+ temperature: options.temperature,
77
+ maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
78
+ abortSignal: timeoutController?.controller.signal,
79
+ });
191
80
  timeoutController?.cleanup();
192
- // Log timeout errors specifically
193
- if (error instanceof TimeoutError) {
194
- logger.error(`[${functionTag}] Timeout error`, {
195
- provider,
196
- timeout: error.timeout,
197
- message: error.message,
198
- });
199
- }
200
- else if (error?.name === "AbortError") {
201
- // Convert AbortError to TimeoutError
202
- const timeoutError = new TimeoutError(`${provider} generate operation timed out after ${timeout}`, timeoutController?.timeoutMs || 0, provider, "generate");
203
- logger.error(`[${functionTag}] Timeout error`, {
204
- provider,
205
- timeout: timeoutController?.timeoutMs,
206
- message: timeoutError.message,
207
- });
208
- throw timeoutError;
209
- }
210
- else {
211
- logger.error(`[${functionTag}] Error:`, error);
212
- }
213
- throw error;
214
- }
215
- }
216
- /**
217
- * LEGACY METHOD: Use stream() instead for new code
218
- * @deprecated Use stream() method instead
219
- */
220
- async *createAsyncIterable(body, signal) {
221
- const reader = body.getReader();
222
- const decoder = new TextDecoder();
223
- let buffer = "";
224
- try {
225
- while (true) {
226
- // Check if aborted
227
- if (signal?.aborted) {
228
- throw new Error("AbortError");
229
- }
230
- const { done, value } = await reader.read();
231
- if (done) {
232
- break;
233
- }
234
- buffer += decoder.decode(value, { stream: true });
235
- const lines = buffer.split("\n");
236
- buffer = lines.pop() || "";
237
- for (const line of lines) {
238
- if (line.trim() === "") {
239
- continue;
240
- }
241
- if (line.startsWith("data: ")) {
242
- const data = line.slice(6);
243
- if (data.trim() === "[DONE]") {
244
- continue;
245
- }
246
- try {
247
- const chunk = JSON.parse(data);
248
- // Extract text content from different chunk types
249
- if (chunk.type === "content_block_delta" && chunk.delta?.text) {
250
- yield chunk.delta.text;
251
- }
252
- }
253
- catch (parseError) {
254
- logger.warn("[AnthropicProvider.createAsyncIterable] Failed to parse chunk:", parseError);
255
- continue;
256
- }
257
- }
81
+ // Transform string stream to content object stream
82
+ const transformedStream = async function* () {
83
+ for await (const chunk of result.textStream) {
84
+ yield { content: chunk };
258
85
  }
259
- }
260
- }
261
- finally {
262
- reader.releaseLock();
263
- }
264
- }
265
- async testConnection() {
266
- logger.debug("[AnthropicProvider.testConnection] Testing connection to Anthropic API");
267
- const startTime = Date.now();
268
- try {
269
- await this.generate({
270
- prompt: "Hello",
271
- maxTokens: 5,
272
- });
273
- const responseTime = Date.now() - startTime;
274
- logger.debug(`[AnthropicProvider.testConnection] Connection test successful (${responseTime}ms)`);
86
+ };
275
87
  return {
276
- success: true,
277
- responseTime,
88
+ stream: transformedStream(),
89
+ provider: this.providerName,
90
+ model: this.modelName,
278
91
  };
279
92
  }
280
93
  catch (error) {
281
- const responseTime = Date.now() - startTime;
282
- logger.error(`[AnthropicProvider.testConnection] Connection test failed (${responseTime}ms):`, error);
283
- return {
284
- success: false,
285
- error: error instanceof Error ? error.message : "Unknown error",
286
- responseTime,
287
- };
94
+ timeoutController?.cleanup();
95
+ throw this.handleProviderError(error);
288
96
  }
289
97
  }
290
- isConfigured() {
98
+ async isAvailable() {
291
99
  try {
292
- this.getApiKey();
100
+ getAnthropicApiKey();
293
101
  return true;
294
102
  }
295
103
  catch {
296
104
  return false;
297
105
  }
298
106
  }
299
- getRequiredConfig() {
300
- return ["ANTHROPIC_API_KEY"];
301
- }
302
- getOptionalConfig() {
303
- return ["ANTHROPIC_MODEL", "ANTHROPIC_BASE_URL"];
304
- }
305
- getModels() {
306
- return [
307
- "claude-3-5-sonnet-20241022",
308
- "claude-3-5-haiku-20241022",
309
- "claude-3-opus-20240229",
310
- "claude-3-sonnet-20240229",
311
- "claude-3-haiku-20240307",
312
- ];
313
- }
314
- supportsStreaming() {
315
- return true;
316
- }
317
- supportsSchema() {
318
- return false; // Anthropic doesn't have native JSON schema support like OpenAI
319
- }
320
- getCapabilities() {
321
- return [
322
- "text-generation",
323
- "streaming",
324
- "conversation",
325
- "system-prompts",
326
- "long-context", // Claude models support up to 200k tokens
327
- ];
328
- }
329
- /**
330
- * Short alias for generate() - CLI-SDK consistency
331
- */
332
- async gen(optionsOrPrompt, analysisSchema) {
333
- return this.generate(optionsOrPrompt, analysisSchema);
107
+ getModel() {
108
+ return this.model;
334
109
  }
335
110
  }
111
+ export default AnthropicProvider;
@@ -0,0 +1,20 @@
1
+ import { BaseProvider } from "../core/base-provider.js";
2
+ import type { AIProviderName } from "../core/types.js";
3
+ import type { StreamOptions, StreamResult } from "../types/stream-types.js";
4
+ export declare class AzureOpenAIProvider extends BaseProvider {
5
+ private apiKey;
6
+ private resourceName;
7
+ private deployment;
8
+ private apiVersion;
9
+ private azureProvider;
10
+ constructor(modelName?: string);
11
+ protected getProviderName(): AIProviderName;
12
+ protected getDefaultModel(): string;
13
+ /**
14
+ * Returns the Vercel AI SDK model instance for Azure OpenAI
15
+ */
16
+ protected getAISDKModel(): any;
17
+ protected handleProviderError(error: any): Error;
18
+ protected executeStream(options: StreamOptions, analysisSchema?: any): Promise<StreamResult>;
19
+ }
20
+ export default AzureOpenAIProvider;
@@ -0,0 +1,89 @@
1
+ import { createAzure } from "@ai-sdk/azure";
2
+ import { streamText } from "ai";
3
+ import { BaseProvider } from "../core/base-provider.js";
4
+ export class AzureOpenAIProvider extends BaseProvider {
5
+ apiKey;
6
+ resourceName;
7
+ deployment;
8
+ apiVersion;
9
+ azureProvider;
10
+ constructor(modelName) {
11
+ super(modelName, "azure");
12
+ this.apiKey = process.env.AZURE_OPENAI_API_KEY || "";
13
+ const endpoint = process.env.AZURE_OPENAI_ENDPOINT || "";
14
+ this.resourceName = endpoint
15
+ .replace("https://", "")
16
+ .replace(/\/+$/, "") // Remove trailing slashes
17
+ .replace(".openai.azure.com", "");
18
+ this.deployment =
19
+ modelName ||
20
+ process.env.AZURE_OPENAI_DEPLOYMENT ||
21
+ process.env.AZURE_OPENAI_DEPLOYMENT_ID ||
22
+ "gpt-4o";
23
+ this.apiVersion = process.env.AZURE_API_VERSION || "2024-10-01-preview";
24
+ if (!this.apiKey) {
25
+ throw new Error("AZURE_OPENAI_API_KEY environment variable is required");
26
+ }
27
+ if (!this.resourceName) {
28
+ throw new Error("AZURE_OPENAI_ENDPOINT environment variable is required");
29
+ }
30
+ // Create the Azure provider instance
31
+ this.azureProvider = createAzure({
32
+ resourceName: this.resourceName,
33
+ apiKey: this.apiKey,
34
+ apiVersion: this.apiVersion,
35
+ });
36
+ console.log("Azure Vercel Provider initialized", {
37
+ deployment: this.deployment,
38
+ resourceName: this.resourceName,
39
+ provider: "azure-vercel",
40
+ });
41
+ }
42
+ getProviderName() {
43
+ return "azure";
44
+ }
45
+ getDefaultModel() {
46
+ return this.deployment;
47
+ }
48
+ /**
49
+ * Returns the Vercel AI SDK model instance for Azure OpenAI
50
+ */
51
+ getAISDKModel() {
52
+ return this.azureProvider(this.deployment);
53
+ }
54
+ handleProviderError(error) {
55
+ if (error?.message?.includes("401")) {
56
+ return new Error("Invalid Azure OpenAI API key or endpoint.");
57
+ }
58
+ return new Error(`Azure OpenAI error: ${error?.message || "Unknown error"}`);
59
+ }
60
+ // executeGenerate removed - BaseProvider handles all generation with tools
61
+ async executeStream(options, analysisSchema) {
62
+ try {
63
+ const stream = await streamText({
64
+ model: this.azureProvider(this.deployment),
65
+ prompt: options.input?.text || "",
66
+ maxTokens: options.maxTokens || 1000,
67
+ temperature: options.temperature || 0.7,
68
+ system: options.systemPrompt,
69
+ });
70
+ return {
71
+ stream: (async function* () {
72
+ for await (const chunk of stream.textStream) {
73
+ yield { content: chunk };
74
+ }
75
+ })(),
76
+ provider: "azure",
77
+ model: this.deployment,
78
+ metadata: {
79
+ streamId: `azure-${Date.now()}`,
80
+ startTime: Date.now(),
81
+ },
82
+ };
83
+ }
84
+ catch (error) {
85
+ throw this.handleProviderError(error);
86
+ }
87
+ }
88
+ }
89
+ export default AzureOpenAIProvider;
@@ -16,10 +16,15 @@ export declare class FunctionCallingProvider implements AIProvider {
16
16
  private enableFunctionCalling;
17
17
  private sessionId;
18
18
  private userId;
19
+ private cachedToolsObject;
20
+ private cachedToolMap;
21
+ private cacheTimestamp;
22
+ private readonly cacheExpiryMs;
19
23
  constructor(baseProvider: AIProvider, options?: {
20
24
  enableFunctionCalling?: boolean;
21
25
  sessionId?: string;
22
26
  userId?: string;
27
+ cacheExpiryMs?: number;
23
28
  });
24
29
  /**
25
30
  * PRIMARY METHOD: Stream content using AI (recommended for new code)
@@ -31,7 +36,11 @@ export declare class FunctionCallingProvider implements AIProvider {
31
36
  */
32
37
  generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateResult>;
33
38
  /**
34
- * Generate text using AI SDK's native function calling
39
+ * Generate text with tools using the AI SDK's generate function (with tools object)
40
+ */
41
+ private generateWithToolsObject;
42
+ /**
43
+ * Generate text using AI SDK's native function calling (legacy array-based)
35
44
  */
36
45
  private generateWithTools;
37
46
  /**
@@ -44,7 +53,11 @@ export declare class FunctionCallingProvider implements AIProvider {
44
53
  */
45
54
  private sanitizeToolName;
46
55
  /**
47
- * Convert our tools to AI SDK format with proper execution
56
+ * Wrap tools with proper execution context (for object-based tools)
57
+ */
58
+ private wrapToolsWithExecution;
59
+ /**
60
+ * Convert our tools to AI SDK format with proper execution (legacy array-based)
48
61
  */
49
62
  private convertToAISDKTools;
50
63
  /**
@@ -54,6 +67,53 @@ export declare class FunctionCallingProvider implements AIProvider {
54
67
  /**
55
68
  * Alias for generate() - CLI-SDK consistency
56
69
  */
70
+ /**
71
+ * Clear cached tools - Cache Invalidation Strategy
72
+ *
73
+ * WHEN TO CALL clearToolsCache():
74
+ *
75
+ * 1. **MCP Server Changes**: When MCP servers are added, removed, or restarted
76
+ * - After calling unifiedRegistry.addServer() or removeServer()
77
+ * - When MCP server configurations change
78
+ * - After MCP server restart or reconnection
79
+ *
80
+ * 2. **Tool Registration Changes**: When custom tools are modified
81
+ * - After registering new SDK tools via registerTool()
82
+ * - When tool implementations change
83
+ * - After unregistering tools
84
+ *
85
+ * 3. **Provider Reinitialization**: When the provider context changes
86
+ * - Before switching between different AI providers
87
+ * - When session context changes significantly
88
+ * - After provider authentication refresh
89
+ *
90
+ * 4. **Error Recovery**: When tool execution encounters systematic failures
91
+ * - After MCP connection errors are resolved
92
+ * - When tool discovery needs to be re-run
93
+ * - During error recovery workflows
94
+ *
95
+ * 5. **Development/Testing**: During development and testing cycles
96
+ * - Between test cases that modify tool availability
97
+ * - When testing different tool configurations
98
+ * - During hot reloading scenarios
99
+ *
100
+ * CACHE LIFECYCLE:
101
+ * - Cache is populated on first generate() call via getAvailableFunctionTools()
102
+ * - Cache persists across multiple generate() calls for performance
103
+ * - Cache is invalidated by calling this method
104
+ * - Next generate() call will rebuild cache from current tool state
105
+ *
106
+ * PERFORMANCE IMPACT:
107
+ * - Clearing cache forces tool discovery on next usage (~100-500ms overhead)
108
+ * - Recommended to clear cache proactively rather than reactively
109
+ * - Consider batching tool changes before clearing cache
110
+ *
111
+ * THREAD SAFETY:
112
+ * - This method is not thread-safe
113
+ * - Avoid calling during active generate() operations
114
+ * - Safe to call between separate AI generation requests
115
+ */
116
+ clearToolsCache(): void;
57
117
  /**
58
118
  * Short alias for generate() - CLI-SDK consistency
59
119
  */
@@ -66,6 +126,7 @@ export declare function createFunctionCallingProvider(baseProvider: AIProvider,
66
126
  enableFunctionCalling?: boolean;
67
127
  sessionId?: string;
68
128
  userId?: string;
129
+ cacheExpiryMs?: number;
69
130
  }): AIProvider;
70
131
  /**
71
132
  * Enhanced MCP Provider Factory that creates function-calling enabled providers
@@ -77,4 +138,5 @@ export declare function createMCPAwareProviderV3(baseProvider: AIProvider, optio
77
138
  enableFunctionCalling?: boolean;
78
139
  sessionId?: string;
79
140
  userId?: string;
141
+ cacheExpiryMs?: number;
80
142
  }): AIProvider;