@juspay/neurolink 7.10.2 → 7.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/dist/config/types.d.ts +14 -0
  3. package/dist/config/types.js +6 -0
  4. package/dist/core/baseProvider.d.ts +45 -340
  5. package/dist/core/baseProvider.js +205 -30
  6. package/dist/core/types.d.ts +4 -0
  7. package/dist/factories/providerFactory.js +1 -1
  8. package/dist/factories/providerRegistry.js +8 -8
  9. package/dist/lib/config/types.d.ts +14 -0
  10. package/dist/lib/config/types.js +6 -0
  11. package/dist/lib/core/baseProvider.d.ts +45 -340
  12. package/dist/lib/core/baseProvider.js +205 -30
  13. package/dist/lib/core/types.d.ts +4 -0
  14. package/dist/lib/factories/providerFactory.js +1 -1
  15. package/dist/lib/factories/providerRegistry.js +8 -8
  16. package/dist/lib/mcp/servers/agent/directToolsServer.js +80 -68
  17. package/dist/lib/mcp/toolRegistry.js +8 -2
  18. package/dist/lib/neurolink.js +20 -0
  19. package/dist/lib/providers/amazonBedrock.d.ts +0 -1
  20. package/dist/lib/providers/amazonBedrock.js +0 -13
  21. package/dist/lib/providers/anthropic.js +8 -25
  22. package/dist/lib/providers/googleAiStudio.d.ts +0 -1
  23. package/dist/lib/providers/googleAiStudio.js +10 -15
  24. package/dist/lib/providers/googleVertex.d.ts +0 -1
  25. package/dist/lib/providers/googleVertex.js +17 -24
  26. package/dist/lib/providers/huggingFace.d.ts +0 -1
  27. package/dist/lib/providers/huggingFace.js +0 -8
  28. package/dist/lib/providers/litellm.d.ts +0 -1
  29. package/dist/lib/providers/litellm.js +0 -8
  30. package/dist/lib/providers/mistral.d.ts +9 -24
  31. package/dist/lib/providers/mistral.js +44 -82
  32. package/dist/lib/providers/ollama.d.ts +0 -1
  33. package/dist/lib/providers/ollama.js +0 -12
  34. package/dist/lib/providers/openAI.d.ts +2 -3
  35. package/dist/lib/providers/openAI.js +12 -20
  36. package/dist/lib/providers/openaiCompatible.d.ts +0 -1
  37. package/dist/lib/providers/openaiCompatible.js +0 -8
  38. package/dist/lib/utils/toolUtils.d.ts +32 -0
  39. package/dist/lib/utils/toolUtils.js +60 -0
  40. package/dist/mcp/servers/agent/directToolsServer.js +80 -68
  41. package/dist/mcp/toolRegistry.js +8 -2
  42. package/dist/neurolink.js +20 -0
  43. package/dist/providers/amazonBedrock.d.ts +0 -1
  44. package/dist/providers/amazonBedrock.js +0 -13
  45. package/dist/providers/anthropic.js +8 -25
  46. package/dist/providers/googleAiStudio.d.ts +0 -1
  47. package/dist/providers/googleAiStudio.js +10 -15
  48. package/dist/providers/googleVertex.d.ts +0 -1
  49. package/dist/providers/googleVertex.js +17 -24
  50. package/dist/providers/huggingFace.d.ts +0 -1
  51. package/dist/providers/huggingFace.js +0 -8
  52. package/dist/providers/litellm.d.ts +0 -1
  53. package/dist/providers/litellm.js +0 -8
  54. package/dist/providers/mistral.d.ts +9 -24
  55. package/dist/providers/mistral.js +44 -82
  56. package/dist/providers/ollama.d.ts +0 -1
  57. package/dist/providers/ollama.js +0 -12
  58. package/dist/providers/openAI.d.ts +2 -3
  59. package/dist/providers/openAI.js +12 -20
  60. package/dist/providers/openaiCompatible.d.ts +0 -1
  61. package/dist/providers/openaiCompatible.js +0 -8
  62. package/dist/utils/toolUtils.d.ts +32 -0
  63. package/dist/utils/toolUtils.js +60 -0
  64. package/package.json +1 -1
@@ -5,6 +5,7 @@
5
5
  import { MCPRegistry } from "./registry.js";
6
6
  import { registryLogger } from "../utils/logger.js";
7
7
  import { randomUUID } from "crypto";
8
+ import { shouldDisableBuiltinTools } from "../utils/toolUtils.js";
8
9
  import { directAgentTools } from "../agent/directTools.js";
9
10
  export class MCPToolRegistry extends MCPRegistry {
10
11
  tools = new Map();
@@ -12,8 +13,13 @@ export class MCPToolRegistry extends MCPRegistry {
12
13
  toolExecutionStats = new Map();
13
14
  constructor() {
14
15
  super();
15
- // Auto-register direct tools on initialization
16
- this.registerDirectTools();
16
+ // šŸ”§ CONDITIONAL: Only auto-register direct tools if not disabled via configuration
17
+ if (!shouldDisableBuiltinTools()) {
18
+ this.registerDirectTools();
19
+ }
20
+ else {
21
+ registryLogger.debug("Built-in direct tools disabled via configuration");
22
+ }
17
23
  }
18
24
  /**
19
25
  * Register all direct tools from directAgentTools
package/dist/neurolink.js CHANGED
@@ -337,6 +337,11 @@ export class NeuroLink {
337
337
  // Create provider and generate
338
338
  const provider = await AIProviderFactory.createProvider(providerName, options.model, !options.disableTools, // Pass disableTools as inverse of enableMCP
339
339
  this);
340
+ // Enable tool execution for the provider using BaseProvider method
341
+ provider.setupToolExecutor({
342
+ customTools: this.customTools,
343
+ executeTool: this.executeTool.bind(this),
344
+ }, functionTag);
340
345
  const result = await provider.generate({
341
346
  ...options,
342
347
  systemPrompt: enhancedSystemPrompt,
@@ -410,6 +415,11 @@ export class NeuroLink {
410
415
  logger.debug(`[${functionTag}] Attempting provider: ${providerName}`);
411
416
  const provider = await AIProviderFactory.createProvider(providerName, options.model, !options.disableTools, // Pass disableTools as inverse of enableMCP
412
417
  this);
418
+ // Enable tool execution for direct provider generation using BaseProvider method
419
+ provider.setupToolExecutor({
420
+ customTools: this.customTools,
421
+ executeTool: this.executeTool.bind(this),
422
+ }, functionTag);
413
423
  const result = await provider.generate(options);
414
424
  const responseTime = Date.now() - startTime;
415
425
  if (!result) {
@@ -599,6 +609,11 @@ export class NeuroLink {
599
609
  });
600
610
  // Create provider using the same factory pattern as generate
601
611
  const provider = await AIProviderFactory.createBestProvider(providerName, options.model, true, this);
612
+ // Enable tool execution for streaming using BaseProvider method
613
+ provider.setupToolExecutor({
614
+ customTools: this.customTools,
615
+ executeTool: this.executeTool.bind(this),
616
+ }, functionTag);
602
617
  // Create clean options for provider (remove factoryConfig)
603
618
  const cleanOptions = createCleanStreamOptions(enhancedOptions);
604
619
  // Call the provider's stream method with clean options
@@ -650,6 +665,11 @@ export class NeuroLink {
650
665
  // Use factory to create provider without MCP
651
666
  const provider = await AIProviderFactory.createBestProvider(providerName, options.model, false, // Disable MCP for fallback
652
667
  this);
668
+ // Enable tool execution for fallback streaming using BaseProvider method
669
+ provider.setupToolExecutor({
670
+ customTools: this.customTools,
671
+ executeTool: this.executeTool.bind(this),
672
+ }, functionTag);
653
673
  // Create clean options for fallback provider (remove factoryConfig)
654
674
  const cleanOptions = createCleanStreamOptions(enhancedOptions);
655
675
  const streamResult = await provider.stream(cleanOptions);
@@ -27,6 +27,5 @@ export declare class AmazonBedrockProvider extends BaseProvider {
27
27
  protected getAISDKModel(): LanguageModelV1;
28
28
  protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
29
29
  protected handleProviderError(error: unknown): Error;
30
- private validateStreamOptions;
31
30
  }
32
31
  export default AmazonBedrockProvider;
@@ -116,18 +116,5 @@ export class AmazonBedrockProvider extends BaseProvider {
116
116
  }
117
117
  return new Error(`āŒ Amazon Bedrock Provider Error\n\n${errorMessage || "Unknown error occurred"}\n\nšŸ”§ Troubleshooting:\n1. Check AWS credentials and permissions\n2. Verify model availability\n3. Check network connectivity`);
118
118
  }
119
- validateStreamOptions(options) {
120
- if (!options.input?.text?.trim()) {
121
- throw new Error("Prompt is required for streaming");
122
- }
123
- if (options.maxTokens &&
124
- (options.maxTokens < 1 || options.maxTokens > 4096)) {
125
- throw new Error("maxTokens must be between 1 and 4096 for Amazon Bedrock");
126
- }
127
- if (options.temperature &&
128
- (options.temperature < 0 || options.temperature > 1)) {
129
- throw new Error("temperature must be between 0 and 1");
130
- }
131
- }
132
119
  }
133
120
  export default AmazonBedrockProvider;
@@ -3,7 +3,7 @@ import { streamText } from "ai";
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
4
  import { logger } from "../utils/logger.js";
5
5
  import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
6
- import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
6
+ import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
7
7
  import { validateApiKey, createAnthropicConfig, getProviderModel, } from "../utils/providerConfig.js";
8
8
  // Configuration helpers - now using consolidated utility
9
9
  const getAnthropicApiKey = () => {
@@ -87,52 +87,35 @@ export class AnthropicProvider extends BaseProvider {
87
87
  }
88
88
  // executeGenerate removed - BaseProvider handles all generation with tools
89
89
  async executeStream(options, analysisSchema) {
90
- // Convert StreamOptions to TextGenerationOptions for validation
91
- const validationOptions = {
92
- prompt: options.input.text,
93
- systemPrompt: options.systemPrompt,
94
- temperature: options.temperature,
95
- maxTokens: options.maxTokens,
96
- };
97
- this.validateOptions(validationOptions);
90
+ this.validateStreamOptions(options);
98
91
  const timeout = this.getTimeout(options);
99
92
  const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
100
93
  try {
101
94
  // āœ… Get tools for streaming (same as generate method)
102
95
  const shouldUseTools = !options.disableTools && this.supportsTools();
103
96
  const tools = shouldUseTools ? await this.getAllTools() : {};
104
- // šŸ”§ CRITICAL FIX: Vercel AI SDK streamText() hangs with tools and maxSteps > 1
105
- // For stream-focused SDK, we need reliable streaming, so avoid the hanging case
106
- if (shouldUseTools && Object.keys(tools).length > 0) {
107
- throw new Error("Vercel AI SDK streamText() limitation with tools - falling back to synthetic streaming");
108
- }
109
97
  const result = await streamText({
110
98
  model: this.model,
111
99
  prompt: options.input.text,
112
100
  system: options.systemPrompt || undefined,
113
101
  temperature: options.temperature,
114
102
  maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
115
- tools: {}, // šŸ”§ Force empty tools for real streaming to avoid hanging
116
- maxSteps: 1, // šŸ”§ Force single step for real streaming
117
- toolChoice: "none", // šŸ”§ Force no tools for real streaming
103
+ tools,
104
+ maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
105
+ toolChoice: shouldUseTools ? "auto" : "none",
118
106
  abortSignal: timeoutController?.controller.signal,
119
107
  });
120
108
  timeoutController?.cleanup();
121
- // Transform string stream to content object stream
122
- const transformedStream = async function* () {
123
- for await (const chunk of result.textStream) {
124
- yield { content: chunk };
125
- }
126
- };
109
+ const transformedStream = this.createTextStream(result);
127
110
  // āœ… Note: Vercel AI SDK's streamText() method limitations with tools
128
111
  // The streamText() function doesn't provide the same tool result access as generateText()
129
- // For full tool support, the BaseProvider will fall back to synthetic streaming when needed
112
+ // Full tool support is now available with real streaming
130
113
  const toolCalls = [];
131
114
  const toolResults = [];
132
115
  const usage = await result.usage;
133
116
  const finishReason = await result.finishReason;
134
117
  return {
135
- stream: transformedStream(),
118
+ stream: transformedStream,
136
119
  provider: this.providerName,
137
120
  model: this.modelName,
138
121
  toolCalls, // āœ… Include tool calls in stream result
@@ -18,6 +18,5 @@ export declare class GoogleAIStudioProvider extends BaseProvider {
18
18
  protected handleProviderError(error: unknown): Error;
19
19
  protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
20
20
  private getApiKey;
21
- private validateStreamOptions;
22
21
  }
23
22
  export default GoogleAIStudioProvider;
@@ -4,7 +4,7 @@ import { GoogleAIModels } from "../core/types.js";
4
4
  import { BaseProvider } from "../core/baseProvider.js";
5
5
  import { logger } from "../utils/logger.js";
6
6
  import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
7
- import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
7
+ import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
8
8
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
9
9
  // Environment variable setup
10
10
  if (!process.env.GOOGLE_GENERATIVE_AI_API_KEY &&
@@ -73,30 +73,30 @@ export class GoogleAIStudioProvider extends BaseProvider {
73
73
  const timeout = this.getTimeout(options);
74
74
  const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
75
75
  try {
76
+ // Get tools consistently with generate method
77
+ const shouldUseTools = !options.disableTools && this.supportsTools();
78
+ const tools = shouldUseTools ? await this.getAllTools() : {};
76
79
  const result = await streamText({
77
80
  model,
78
81
  prompt: options.input.text,
79
82
  system: options.systemPrompt,
80
83
  temperature: options.temperature,
81
84
  maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
82
- tools: options.tools,
83
- toolChoice: "auto",
85
+ tools,
86
+ maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
87
+ toolChoice: shouldUseTools ? "auto" : "none",
84
88
  abortSignal: timeoutController?.controller.signal,
85
89
  });
86
90
  timeoutController?.cleanup();
87
- // Transform string stream to content object stream
88
- const transformedStream = async function* () {
89
- for await (const chunk of result.textStream) {
90
- yield { content: chunk };
91
- }
92
- };
91
+ // Transform string stream to content object stream using BaseProvider method
92
+ const transformedStream = this.createTextStream(result);
93
93
  // Create analytics promise that resolves after stream completion
94
94
  const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
95
95
  requestId: `google-ai-stream-${Date.now()}`,
96
96
  streamingMode: true,
97
97
  });
98
98
  return {
99
- stream: transformedStream(),
99
+ stream: transformedStream,
100
100
  provider: this.providerName,
101
101
  model: this.modelName,
102
102
  analytics: analyticsPromise,
@@ -121,10 +121,5 @@ export class GoogleAIStudioProvider extends BaseProvider {
121
121
  }
122
122
  return apiKey;
123
123
  }
124
- validateStreamOptions(options) {
125
- if (!options.input?.text || options.input.text.trim().length === 0) {
126
- throw new Error("Input text is required and cannot be empty");
127
- }
128
- }
129
124
  }
130
125
  export default GoogleAIStudioProvider;
@@ -40,7 +40,6 @@ export declare class GoogleVertexProvider extends BaseProvider {
40
40
  private getModel;
41
41
  protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
42
42
  protected handleProviderError(error: unknown): Error;
43
- private validateStreamOptions;
44
43
  /**
45
44
  * Memory-safe cache management for model configurations
46
45
  * Implements LRU eviction to prevent memory leaks in long-running processes
@@ -2,8 +2,8 @@ import { createVertex, } from "@ai-sdk/google-vertex";
2
2
  import { streamText, Output, } from "ai";
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
4
  import { logger } from "../utils/logger.js";
5
- import { TimeoutError } from "../utils/timeout.js";
6
- import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
5
+ import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
6
+ import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
7
7
  import { ModelConfigurationManager } from "../core/modelConfiguration.js";
8
8
  import { validateApiKey, createVertexProjectConfig, createGoogleAuthConfig, } from "../utils/providerConfig.js";
9
9
  // Cache for anthropic module to avoid repeated imports
@@ -184,6 +184,9 @@ export class GoogleVertexProvider extends BaseProvider {
184
184
  async executeStream(options, analysisSchema) {
185
185
  const functionTag = "GoogleVertexProvider.executeStream";
186
186
  let chunkCount = 0;
187
+ // Add timeout controller for consistency with other providers
188
+ const timeout = this.getTimeout(options);
189
+ const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
187
190
  try {
188
191
  this.validateStreamOptions(options);
189
192
  logger.debug(`${functionTag}: Starting stream request`, {
@@ -200,6 +203,9 @@ export class GoogleVertexProvider extends BaseProvider {
200
203
  const maxTokens = shouldSetMaxTokens
201
204
  ? options.maxTokens || DEFAULT_MAX_TOKENS
202
205
  : undefined;
206
+ // Get tools consistently with generate method (using BaseProvider pattern)
207
+ const shouldUseTools = !options.disableTools && this.supportsTools();
208
+ const tools = shouldUseTools ? await this.getAllTools() : {};
203
209
  // Build complete stream options with proper typing
204
210
  let streamOptions = {
205
211
  model: model,
@@ -207,6 +213,10 @@ export class GoogleVertexProvider extends BaseProvider {
207
213
  system: options.systemPrompt,
208
214
  temperature: options.temperature,
209
215
  ...(maxTokens && { maxTokens }),
216
+ tools,
217
+ maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
218
+ toolChoice: shouldUseTools ? "auto" : "none",
219
+ abortSignal: timeoutController?.controller.signal,
210
220
  onError: (event) => {
211
221
  const error = event.error;
212
222
  const errorMessage = error instanceof Error ? error.message : String(error);
@@ -243,17 +253,17 @@ export class GoogleVertexProvider extends BaseProvider {
243
253
  }
244
254
  }
245
255
  const result = streamText(streamOptions);
256
+ timeoutController?.cleanup();
257
+ // Transform string stream to content object stream using BaseProvider method
258
+ const transformedStream = this.createTextStream(result);
246
259
  return {
247
- stream: (async function* () {
248
- for await (const chunk of result.textStream) {
249
- yield { content: chunk };
250
- }
251
- })(),
260
+ stream: transformedStream,
252
261
  provider: this.providerName,
253
262
  model: this.modelName,
254
263
  };
255
264
  }
256
265
  catch (error) {
266
+ timeoutController?.cleanup();
257
267
  logger.error(`${functionTag}: Exception`, {
258
268
  provider: this.providerName,
259
269
  modelName: this.modelName,
@@ -287,23 +297,6 @@ export class GoogleVertexProvider extends BaseProvider {
287
297
  }
288
298
  return new Error(`āŒ Google Vertex AI Provider Error\n\n${message}\n\nTroubleshooting:\n1. Check Google Cloud credentials and permissions\n2. Verify project ID and location settings\n3. Ensure Vertex AI API is enabled\n4. Check network connectivity`);
289
299
  }
290
- validateStreamOptions(options) {
291
- if (!options.input?.text?.trim()) {
292
- throw new Error("Prompt is required for streaming");
293
- }
294
- // Use cached model configuration for validation performance
295
- const modelName = this.modelName || getDefaultVertexModel();
296
- const shouldValidateMaxTokens = this.shouldSetMaxTokensCached(modelName);
297
- if (shouldValidateMaxTokens &&
298
- options.maxTokens &&
299
- (options.maxTokens < 1 || options.maxTokens > 8192)) {
300
- throw new Error("maxTokens must be between 1 and 8192 for Google Vertex AI");
301
- }
302
- if (options.temperature &&
303
- (options.temperature < 0 || options.temperature > 2)) {
304
- throw new Error("temperature must be between 0 and 2");
305
- }
306
- }
307
300
  /**
308
301
  * Memory-safe cache management for model configurations
309
302
  * Implements LRU eviction to prevent memory leaks in long-running processes
@@ -75,6 +75,5 @@ export declare class HuggingFaceProvider extends BaseProvider {
75
75
  * Returns the Vercel AI SDK model instance for HuggingFace
76
76
  */
77
77
  protected getAISDKModel(): LanguageModelV1;
78
- private validateStreamOptions;
79
78
  }
80
79
  export default HuggingFaceProvider;
@@ -272,14 +272,6 @@ Available tools will be provided in the function calling format. Use them when t
272
272
  getAISDKModel() {
273
273
  return this.model;
274
274
  }
275
- // ===================
276
- // PRIVATE VALIDATION METHODS
277
- // ===================
278
- validateStreamOptions(options) {
279
- if (!options.input?.text || options.input.text.trim().length === 0) {
280
- throw new Error("Input text is required and cannot be empty");
281
- }
282
- }
283
275
  }
284
276
  // Export for factory registration
285
277
  export default HuggingFaceProvider;
@@ -39,5 +39,4 @@ export declare class LiteLLMProvider extends BaseProvider {
39
39
  * @private
40
40
  */
41
41
  private fetchModelsFromAPI;
42
- private validateStreamOptions;
43
42
  }
@@ -264,12 +264,4 @@ export class LiteLLMProvider extends BaseProvider {
264
264
  throw error;
265
265
  }
266
266
  }
267
- // ===================
268
- // PRIVATE VALIDATION METHODS
269
- // ===================
270
- validateStreamOptions(options) {
271
- if (!options.input?.text || options.input.text.trim().length === 0) {
272
- throw new Error("Input text is required and cannot be empty");
273
- }
274
- }
275
267
  }
@@ -1,5 +1,6 @@
1
- import { type LanguageModelV1 } from "ai";
2
- import type { AIProviderName, TextGenerationOptions, EnhancedGenerateResult } from "../core/types.js";
1
+ import type { ZodType, ZodTypeDef } from "zod";
2
+ import { type Schema, type LanguageModelV1 } from "ai";
3
+ import type { AIProviderName } from "../core/types.js";
3
4
  import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
4
5
  import { BaseProvider } from "../core/baseProvider.js";
5
6
  /**
@@ -9,30 +10,14 @@ import { BaseProvider } from "../core/baseProvider.js";
9
10
  export declare class MistralProvider extends BaseProvider {
10
11
  private model;
11
12
  constructor(modelName?: string, sdk?: unknown);
13
+ protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
14
+ protected getProviderName(): AIProviderName;
15
+ protected getDefaultModel(): string;
12
16
  /**
13
- * Generate text using Mistral API
17
+ * Returns the Vercel AI SDK model instance for Mistral
14
18
  */
15
- generate(options: TextGenerationOptions): Promise<EnhancedGenerateResult>;
16
- /**
17
- * Stream text generation using Mistral API
18
- */
19
- executeStream(options: StreamOptions): Promise<StreamResult>;
20
- /**
21
- * Get default model name for this provider
22
- */
23
- getDefaultModel(): string;
24
- /**
25
- * Get provider name
26
- */
27
- getProviderName(): AIProviderName;
28
- /**
29
- * Get AI SDK model instance
30
- */
31
- getAISDKModel(): LanguageModelV1;
32
- /**
33
- * Handle provider-specific errors
34
- */
35
- handleProviderError(error: unknown): Error;
19
+ protected getAISDKModel(): LanguageModelV1;
20
+ protected handleProviderError(error: unknown): Error;
36
21
  /**
37
22
  * Validate provider configuration
38
23
  */
@@ -2,7 +2,8 @@ import { createMistral } from "@ai-sdk/mistral";
2
2
  import { streamText } from "ai";
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
4
  import { logger } from "../utils/logger.js";
5
- import { createAnalytics } from "../core/analytics.js";
5
+ import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
6
+ import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
6
7
  import { validateApiKey, createMistralConfig, getProviderModel, } from "../utils/providerConfig.js";
7
8
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
8
9
  // Configuration helpers - now using consolidated utility
@@ -29,85 +30,43 @@ export class MistralProvider extends BaseProvider {
29
30
  const mistral = createMistral({
30
31
  apiKey: apiKey,
31
32
  });
32
- this.model = mistral(this.modelName || getDefaultMistralModel());
33
+ this.model = mistral(this.modelName);
33
34
  logger.debug("Mistral Provider v2 initialized", {
34
35
  modelName: this.modelName,
35
36
  providerName: this.providerName,
36
37
  });
37
38
  }
38
- /**
39
- * Generate text using Mistral API
40
- */
41
- async generate(options) {
42
- const startTime = Date.now();
43
- try {
44
- const result = await this.model.doGenerate({
45
- inputFormat: "prompt",
46
- mode: { type: "regular" },
47
- prompt: [
48
- {
49
- role: "user",
50
- content: [{ type: "text", text: options.prompt || "" }],
51
- },
52
- ],
53
- temperature: options.temperature,
54
- maxTokens: options.maxTokens,
55
- });
56
- const responseTime = Date.now() - startTime;
57
- // Extract token usage and text content
58
- const tokenUsage = result.usage;
59
- const textContent = result.text || "";
60
- // Create analytics data using helper
61
- const analytics = createAnalytics("mistral", this.modelName, { usage: tokenUsage, content: textContent }, responseTime, { requestId: `mistral-${Date.now()}` });
62
- return {
63
- content: textContent,
64
- usage: {
65
- inputTokens: tokenUsage?.promptTokens || 0,
66
- outputTokens: tokenUsage?.completionTokens || 0,
67
- totalTokens: (tokenUsage?.promptTokens || 0) +
68
- (tokenUsage?.completionTokens || 0),
69
- },
70
- provider: this.providerName,
71
- model: this.modelName,
72
- analytics,
73
- };
74
- }
75
- catch (error) {
76
- const responseTime = Date.now() - startTime;
77
- logger.error("Mistral generation failed", {
78
- error: error instanceof Error ? error.message : String(error),
79
- responseTime,
80
- });
81
- throw new Error(`Mistral generation failed: ${error instanceof Error ? error.message : String(error)}`);
82
- }
83
- }
84
- /**
85
- * Stream text generation using Mistral API
86
- */
87
- async executeStream(options) {
39
+ // generate() method is inherited from BaseProvider; this provider uses the base implementation for generation with tools
40
+ async executeStream(options, analysisSchema) {
41
+ this.validateStreamOptions(options);
88
42
  const startTime = Date.now();
43
+ const timeout = this.getTimeout(options);
44
+ const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
89
45
  try {
46
+ // Get tools consistently with generate method
47
+ const shouldUseTools = !options.disableTools && this.supportsTools();
48
+ const tools = shouldUseTools ? await this.getAllTools() : {};
90
49
  const result = await streamText({
91
50
  model: this.model,
92
51
  prompt: options.input.text,
52
+ system: options.systemPrompt,
93
53
  temperature: options.temperature,
94
- maxTokens: options.maxTokens,
95
- tools: options.tools,
96
- toolChoice: "auto",
54
+ maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
55
+ tools,
56
+ maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
57
+ toolChoice: shouldUseTools ? "auto" : "none",
58
+ abortSignal: timeoutController?.controller.signal,
97
59
  });
98
- // Transform stream to match StreamResult interface
99
- const transformedStream = async function* () {
100
- for await (const chunk of result.textStream) {
101
- yield { content: chunk };
102
- }
103
- };
60
+ timeoutController?.cleanup();
61
+ // Transform string stream to content object stream using BaseProvider method
62
+ const transformedStream = this.createTextStream(result);
104
63
  // Create analytics promise that resolves after stream completion
105
64
  const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
106
65
  requestId: `mistral-stream-${Date.now()}`,
107
66
  streamingMode: true,
108
67
  });
109
68
  return {
110
- stream: transformedStream(),
69
+ stream: transformedStream,
111
70
  provider: this.providerName,
112
71
  model: this.modelName,
113
72
  analytics: analyticsPromise,
@@ -118,38 +77,41 @@ export class MistralProvider extends BaseProvider {
118
77
  };
119
78
  }
120
79
  catch (error) {
121
- logger.error("Mistral streaming failed", {
122
- error: error instanceof Error ? error.message : String(error),
123
- });
124
- throw new Error(`Mistral streaming failed: ${error instanceof Error ? error.message : String(error)}`);
80
+ timeoutController?.cleanup();
81
+ throw this.handleProviderError(error);
125
82
  }
126
83
  }
127
- /**
128
- * Get default model name for this provider
129
- */
130
- getDefaultModel() {
131
- return getDefaultMistralModel();
132
- }
133
- /**
134
- * Get provider name
135
- */
84
+ // ===================
85
+ // ABSTRACT METHOD IMPLEMENTATIONS
86
+ // ===================
136
87
  getProviderName() {
137
88
  return this.providerName;
138
89
  }
90
+ getDefaultModel() {
91
+ return getDefaultMistralModel();
92
+ }
139
93
  /**
140
- * Get AI SDK model instance
94
+ * Returns the Vercel AI SDK model instance for Mistral
141
95
  */
142
96
  getAISDKModel() {
143
97
  return this.model;
144
98
  }
145
- /**
146
- * Handle provider-specific errors
147
- */
148
99
  handleProviderError(error) {
149
- if (error instanceof Error) {
150
- return error;
100
+ if (error instanceof TimeoutError) {
101
+ return new Error(`Mistral request timed out: ${error.message}`);
102
+ }
103
+ const errorRecord = error;
104
+ const message = typeof errorRecord?.message === "string"
105
+ ? errorRecord.message
106
+ : "Unknown error";
107
+ if (message.includes("API_KEY_INVALID") ||
108
+ message.includes("Invalid API key")) {
109
+ return new Error("Invalid Mistral API key. Please check your MISTRAL_API_KEY environment variable.");
110
+ }
111
+ if (message.includes("rate limit")) {
112
+ return new Error("Mistral rate limit exceeded. Please try again later.");
151
113
  }
152
- return new Error(`Mistral provider error: ${String(error)}`);
114
+ return new Error(`Mistral error: ${message}`);
153
115
  }
154
116
  /**
155
117
  * Validate provider configuration
@@ -75,7 +75,6 @@ export declare class OllamaProvider extends BaseProvider {
75
75
  */
76
76
  private createOllamaStream;
77
77
  protected handleProviderError(error: unknown): Error;
78
- private validateStreamOptions;
79
78
  /**
80
79
  * Check if Ollama service is healthy and accessible
81
80
  */
@@ -560,18 +560,6 @@ export class OllamaProvider extends BaseProvider {
560
560
  }
561
561
  return new Error(`āŒ Ollama Provider Error\n\n${error.message || "Unknown error occurred"}\n\nšŸ”§ Troubleshooting:\n1. Check if Ollama service is running\n2. Verify model is installed: 'ollama list'\n3. Check network connectivity to ${this.baseUrl}\n4. Review Ollama logs for details`);
562
562
  }
563
- validateStreamOptions(options) {
564
- if (!options.input?.text?.trim()) {
565
- throw new Error("Prompt is required for streaming");
566
- }
567
- if (options.maxTokens && options.maxTokens < 1) {
568
- throw new Error("maxTokens must be greater than 0");
569
- }
570
- if (options.temperature &&
571
- (options.temperature < 0 || options.temperature > 2)) {
572
- throw new Error("temperature must be between 0 and 2");
573
- }
574
- }
575
563
  /**
576
564
  * Check if Ollama service is healthy and accessible
577
565
  */