@juspay/neurolink 1.5.3 → 1.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/CHANGELOG.md +54 -0
  2. package/README.md +17 -7
  3. package/dist/cli/commands/config.d.ts +70 -3
  4. package/dist/cli/commands/config.js +75 -3
  5. package/dist/cli/commands/ollama.d.ts +8 -0
  6. package/dist/cli/commands/ollama.js +323 -0
  7. package/dist/cli/index.js +11 -13
  8. package/dist/core/factory.js +17 -2
  9. package/dist/core/types.d.ts +4 -1
  10. package/dist/core/types.js +3 -0
  11. package/dist/lib/core/factory.js +17 -2
  12. package/dist/lib/core/types.d.ts +4 -1
  13. package/dist/lib/core/types.js +3 -0
  14. package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +4 -4
  15. package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +13 -9
  16. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +250 -152
  17. package/dist/lib/neurolink.d.ts +2 -2
  18. package/dist/lib/neurolink.js +18 -8
  19. package/dist/lib/providers/huggingFace.d.ts +31 -0
  20. package/dist/lib/providers/huggingFace.js +355 -0
  21. package/dist/lib/providers/index.d.ts +6 -0
  22. package/dist/lib/providers/index.js +7 -1
  23. package/dist/lib/providers/mistralAI.d.ts +32 -0
  24. package/dist/lib/providers/mistralAI.js +217 -0
  25. package/dist/lib/providers/ollama.d.ts +51 -0
  26. package/dist/lib/providers/ollama.js +493 -0
  27. package/dist/lib/utils/providerUtils.js +17 -2
  28. package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +4 -4
  29. package/dist/mcp/servers/ai-providers/ai-core-server.js +13 -9
  30. package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +248 -152
  31. package/dist/neurolink.d.ts +2 -2
  32. package/dist/neurolink.js +18 -8
  33. package/dist/providers/huggingFace.d.ts +31 -0
  34. package/dist/providers/huggingFace.js +355 -0
  35. package/dist/providers/index.d.ts +6 -0
  36. package/dist/providers/index.js +7 -1
  37. package/dist/providers/mistralAI.d.ts +32 -0
  38. package/dist/providers/mistralAI.js +217 -0
  39. package/dist/providers/ollama.d.ts +51 -0
  40. package/dist/providers/ollama.js +493 -0
  41. package/dist/utils/providerUtils.js +17 -2
  42. package/package.json +161 -151
@@ -0,0 +1,355 @@
1
+ import { HfInference } from '@huggingface/inference';
2
+ import { streamText, generateText, Output } from 'ai';
3
+ import { logger } from '../utils/logger.js';
4
+ // Default system context
5
+ const DEFAULT_SYSTEM_CONTEXT = {
6
+ systemPrompt: 'You are a helpful AI assistant.'
7
+ };
8
+ // Configuration helpers
9
+ const getHuggingFaceApiKey = () => {
10
+ const apiKey = process.env.HUGGINGFACE_API_KEY || process.env.HF_TOKEN;
11
+ if (!apiKey) {
12
+ throw new Error('HUGGINGFACE_API_KEY environment variable is not set');
13
+ }
14
+ return apiKey;
15
+ };
16
+ const getHuggingFaceModelId = () => {
17
+ return process.env.HUGGINGFACE_MODEL || 'microsoft/DialoGPT-medium';
18
+ };
19
+ const hasValidAuth = () => {
20
+ return !!(process.env.HUGGINGFACE_API_KEY || process.env.HF_TOKEN);
21
+ };
22
+ // Lazy initialization cache
23
+ let _hfClient = null;
24
+ function getHuggingFaceClient() {
25
+ if (!_hfClient) {
26
+ const apiKey = getHuggingFaceApiKey();
27
+ _hfClient = new HfInference(apiKey);
28
+ }
29
+ return _hfClient;
30
+ }
31
+ // Retry configuration for model loading
32
+ const RETRY_CONFIG = {
33
+ maxRetries: 3,
34
+ baseDelay: 2000, // 2 seconds
35
+ maxDelay: 30000, // 30 seconds
36
+ backoffMultiplier: 2
37
+ };
38
+ // Helper function for exponential backoff retry
39
+ async function retryWithBackoff(operation, retryConfig = RETRY_CONFIG) {
40
+ let lastError;
41
+ for (let attempt = 0; attempt <= retryConfig.maxRetries; attempt++) {
42
+ try {
43
+ return await operation();
44
+ }
45
+ catch (error) {
46
+ lastError = error;
47
+ // Check if it's a model loading error (503 status)
48
+ if (error instanceof Error && error.message.includes('503')) {
49
+ if (attempt < retryConfig.maxRetries) {
50
+ const delay = Math.min(retryConfig.baseDelay * Math.pow(retryConfig.backoffMultiplier, attempt), retryConfig.maxDelay);
51
+ logger.debug('HuggingFace model loading, retrying...', {
52
+ attempt: attempt + 1,
53
+ maxRetries: retryConfig.maxRetries,
54
+ delayMs: delay,
55
+ error: error.message
56
+ });
57
+ await new Promise(resolve => setTimeout(resolve, delay));
58
+ continue;
59
+ }
60
+ }
61
+ // For non-503 errors or final attempt, throw immediately
62
+ throw error;
63
+ }
64
+ }
65
+ throw lastError;
66
+ }
67
+ // Custom LanguageModelV1 implementation for Hugging Face
68
+ class HuggingFaceLanguageModel {
69
+ specificationVersion = 'v1';
70
+ provider = 'huggingface';
71
+ modelId;
72
+ maxTokens;
73
+ supportsStreaming = true;
74
+ defaultObjectGenerationMode = 'json';
75
+ client;
76
+ constructor(modelId, client) {
77
+ this.modelId = modelId;
78
+ this.client = client;
79
+ }
80
+ estimateTokens(text) {
81
+ return Math.ceil(text.length / 4); // Rough estimation: 4 characters per token
82
+ }
83
+ convertMessagesToPrompt(messages) {
84
+ return messages
85
+ .map(msg => {
86
+ if (typeof msg.content === 'string') {
87
+ return `${msg.role}: ${msg.content}`;
88
+ }
89
+ else if (Array.isArray(msg.content)) {
90
+ // Handle multi-part content (text, images, etc.)
91
+ return `${msg.role}: ${msg.content
92
+ .filter((part) => part.type === 'text')
93
+ .map((part) => part.text)
94
+ .join(' ')}`;
95
+ }
96
+ return '';
97
+ })
98
+ .join('\n');
99
+ }
100
+ async doGenerate(options) {
101
+ const prompt = this.convertMessagesToPrompt(options.prompt);
102
+ const response = await retryWithBackoff(async () => {
103
+ return await this.client.textGeneration({
104
+ model: this.modelId,
105
+ inputs: prompt,
106
+ parameters: {
107
+ temperature: options.temperature || 0.7,
108
+ max_new_tokens: options.maxTokens || 500,
109
+ return_full_text: false,
110
+ do_sample: (options.temperature || 0.7) > 0
111
+ }
112
+ });
113
+ });
114
+ const generatedText = response.generated_text || '';
115
+ const promptTokens = this.estimateTokens(prompt);
116
+ const completionTokens = this.estimateTokens(generatedText);
117
+ return {
118
+ text: generatedText,
119
+ usage: {
120
+ promptTokens,
121
+ completionTokens,
122
+ totalTokens: promptTokens + completionTokens
123
+ },
124
+ finishReason: 'stop',
125
+ logprobs: undefined,
126
+ rawCall: { rawPrompt: prompt, rawSettings: options },
127
+ rawResponse: { headers: {} }
128
+ };
129
+ }
130
+ async doStream(options) {
131
+ const prompt = this.convertMessagesToPrompt(options.prompt);
132
+ // HuggingFace Inference API doesn't support true streaming
133
+ // We'll simulate streaming by generating the full text and chunking it
134
+ const response = await this.doGenerate(options);
135
+ // Create a ReadableStream that chunks the response
136
+ const stream = new ReadableStream({
137
+ start(controller) {
138
+ const text = response.text || '';
139
+ const chunkSize = Math.max(1, Math.floor(text.length / 10)); // 10 chunks
140
+ let index = 0;
141
+ const pushChunk = () => {
142
+ if (index < text.length) {
143
+ const chunk = text.slice(index, index + chunkSize);
144
+ controller.enqueue({
145
+ type: 'text-delta',
146
+ textDelta: chunk
147
+ });
148
+ index += chunkSize;
149
+ // Add delay to simulate streaming
150
+ setTimeout(pushChunk, 50);
151
+ }
152
+ else {
153
+ // Send finish event
154
+ controller.enqueue({
155
+ type: 'finish',
156
+ finishReason: response.finishReason,
157
+ usage: response.usage,
158
+ logprobs: response.logprobs
159
+ });
160
+ controller.close();
161
+ }
162
+ };
163
+ pushChunk();
164
+ }
165
+ });
166
+ return {
167
+ stream,
168
+ rawCall: response.rawCall,
169
+ rawResponse: response.rawResponse
170
+ };
171
+ }
172
+ }
173
+ // Hugging Face class with enhanced error handling
174
+ export class HuggingFace {
175
+ modelName;
176
+ client;
177
+ /**
178
+ * Initializes a new instance of HuggingFace
179
+ * @param modelName - Optional model name to override the default from config
180
+ */
181
+ constructor(modelName) {
182
+ const functionTag = 'HuggingFace.constructor';
183
+ this.modelName = modelName || getHuggingFaceModelId();
184
+ try {
185
+ this.client = getHuggingFaceClient();
186
+ logger.debug(`[${functionTag}] Initialization started`, {
187
+ modelName: this.modelName,
188
+ hasApiKey: hasValidAuth()
189
+ });
190
+ logger.debug(`[${functionTag}] Initialization completed`, {
191
+ modelName: this.modelName,
192
+ success: true
193
+ });
194
+ }
195
+ catch (err) {
196
+ logger.error(`[${functionTag}] Initialization failed`, {
197
+ message: 'Error in initializing Hugging Face',
198
+ modelName: this.modelName,
199
+ error: err instanceof Error ? err.message : String(err),
200
+ stack: err instanceof Error ? err.stack : undefined
201
+ });
202
+ throw err;
203
+ }
204
+ }
205
+ /**
206
+ * Gets the appropriate model instance
207
+ * @private
208
+ */
209
+ getModel() {
210
+ logger.debug('HuggingFace.getModel - Hugging Face model selected', {
211
+ modelName: this.modelName
212
+ });
213
+ return new HuggingFaceLanguageModel(this.modelName, this.client);
214
+ }
215
+ /**
216
+ * Processes text using streaming approach with enhanced error handling callbacks
217
+ * @param prompt - The input text prompt to analyze
218
+ * @param analysisSchema - Optional Zod schema or Schema object for output validation
219
+ * @returns Promise resolving to StreamTextResult or null if operation fails
220
+ */
221
+ async streamText(optionsOrPrompt, analysisSchema) {
222
+ const functionTag = 'HuggingFace.streamText';
223
+ const provider = 'huggingface';
224
+ let chunkCount = 0;
225
+ try {
226
+ // Parse parameters - support both string and options object
227
+ const options = typeof optionsOrPrompt === 'string'
228
+ ? { prompt: optionsOrPrompt }
229
+ : optionsOrPrompt;
230
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
231
+ // Use schema from options or fallback parameter
232
+ const finalSchema = schema || analysisSchema;
233
+ logger.debug(`[${functionTag}] Stream request started`, {
234
+ provider,
235
+ modelName: this.modelName,
236
+ promptLength: prompt.length,
237
+ temperature,
238
+ maxTokens,
239
+ hasSchema: !!finalSchema
240
+ });
241
+ const model = this.getModel();
242
+ const streamOptions = {
243
+ model: model,
244
+ prompt: prompt,
245
+ system: systemPrompt,
246
+ temperature,
247
+ maxTokens,
248
+ onError: (event) => {
249
+ const error = event.error;
250
+ const errorMessage = error instanceof Error ? error.message : String(error);
251
+ const errorStack = error instanceof Error ? error.stack : undefined;
252
+ logger.error(`[${functionTag}] Stream text error`, {
253
+ provider,
254
+ modelName: this.modelName,
255
+ error: errorMessage,
256
+ stack: errorStack,
257
+ promptLength: prompt.length,
258
+ chunkCount
259
+ });
260
+ },
261
+ onFinish: (event) => {
262
+ logger.debug(`[${functionTag}] Stream text finished`, {
263
+ provider,
264
+ modelName: this.modelName,
265
+ finishReason: event.finishReason,
266
+ usage: event.usage,
267
+ totalChunks: chunkCount,
268
+ promptLength: prompt.length,
269
+ responseLength: event.text?.length || 0
270
+ });
271
+ },
272
+ onChunk: (event) => {
273
+ chunkCount++;
274
+ logger.debug(`[${functionTag}] Stream text chunk`, {
275
+ provider,
276
+ modelName: this.modelName,
277
+ chunkNumber: chunkCount,
278
+ chunkLength: event.chunk.text?.length || 0,
279
+ chunkType: event.chunk.type
280
+ });
281
+ }
282
+ };
283
+ if (finalSchema) {
284
+ streamOptions.experimental_output = Output.object({ schema: finalSchema });
285
+ }
286
+ const result = streamText(streamOptions);
287
+ return result;
288
+ }
289
+ catch (err) {
290
+ logger.error(`[${functionTag}] Exception`, {
291
+ provider,
292
+ modelName: this.modelName,
293
+ message: 'Error in streaming text',
294
+ err: String(err),
295
+ promptLength: typeof optionsOrPrompt === 'string' ? optionsOrPrompt.length : optionsOrPrompt.prompt.length
296
+ });
297
+ throw err; // Re-throw error to trigger fallback
298
+ }
299
+ }
300
+ /**
301
+ * Processes text using non-streaming approach with optional schema validation
302
+ * @param prompt - The input text prompt to analyze
303
+ * @param analysisSchema - Optional Zod schema or Schema object for output validation
304
+ * @returns Promise resolving to GenerateTextResult or null if operation fails
305
+ */
306
+ async generateText(optionsOrPrompt, analysisSchema) {
307
+ const functionTag = 'HuggingFace.generateText';
308
+ const provider = 'huggingface';
309
+ try {
310
+ // Parse parameters - support both string and options object
311
+ const options = typeof optionsOrPrompt === 'string'
312
+ ? { prompt: optionsOrPrompt }
313
+ : optionsOrPrompt;
314
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
315
+ // Use schema from options or fallback parameter
316
+ const finalSchema = schema || analysisSchema;
317
+ logger.debug(`[${functionTag}] Generate request started`, {
318
+ provider,
319
+ modelName: this.modelName,
320
+ promptLength: prompt.length,
321
+ temperature,
322
+ maxTokens
323
+ });
324
+ const model = this.getModel();
325
+ const generateOptions = {
326
+ model: model,
327
+ prompt: prompt,
328
+ system: systemPrompt,
329
+ temperature,
330
+ maxTokens
331
+ };
332
+ if (finalSchema) {
333
+ generateOptions.experimental_output = Output.object({ schema: finalSchema });
334
+ }
335
+ const result = await generateText(generateOptions);
336
+ logger.debug(`[${functionTag}] Generate text completed`, {
337
+ provider,
338
+ modelName: this.modelName,
339
+ usage: result.usage,
340
+ finishReason: result.finishReason,
341
+ responseLength: result.text?.length || 0
342
+ });
343
+ return result;
344
+ }
345
+ catch (err) {
346
+ logger.error(`[${functionTag}] Exception`, {
347
+ provider,
348
+ modelName: this.modelName,
349
+ message: 'Error in generating text',
350
+ err: String(err)
351
+ });
352
+ throw err; // Re-throw error to trigger fallback
353
+ }
354
+ }
355
+ }
@@ -8,6 +8,9 @@ export { OpenAI } from './openAI.js';
8
8
  export { AnthropicProvider } from './anthropic.js';
9
9
  export { AzureOpenAIProvider } from './azureOpenAI.js';
10
10
  export { GoogleAIStudio } from './googleAIStudio.js';
11
+ export { HuggingFace } from './huggingFace.js';
12
+ export { Ollama } from './ollama.js';
13
+ export { MistralAI } from './mistralAI.js';
11
14
  export type { AIProvider } from '../core/types.js';
12
15
  /**
13
16
  * Provider registry for dynamic provider instantiation
@@ -19,6 +22,9 @@ export declare const PROVIDERS: {
19
22
  readonly anthropic: "AnthropicProvider";
20
23
  readonly azure: "AzureOpenAIProvider";
21
24
  readonly 'google-ai': "GoogleAIStudio";
25
+ readonly huggingface: "HuggingFace";
26
+ readonly ollama: "Ollama";
27
+ readonly mistral: "MistralAI";
22
28
  };
23
29
  /**
24
30
  * Type for valid provider names
@@ -8,6 +8,9 @@ export { OpenAI } from './openAI.js';
8
8
  export { AnthropicProvider } from './anthropic.js';
9
9
  export { AzureOpenAIProvider } from './azureOpenAI.js';
10
10
  export { GoogleAIStudio } from './googleAIStudio.js';
11
+ export { HuggingFace } from './huggingFace.js';
12
+ export { Ollama } from './ollama.js';
13
+ export { MistralAI } from './mistralAI.js';
11
14
  /**
12
15
  * Provider registry for dynamic provider instantiation
13
16
  */
@@ -17,7 +20,10 @@ export const PROVIDERS = {
17
20
  openai: 'OpenAI',
18
21
  anthropic: 'AnthropicProvider',
19
22
  azure: 'AzureOpenAIProvider',
20
- 'google-ai': 'GoogleAIStudio'
23
+ 'google-ai': 'GoogleAIStudio',
24
+ huggingface: 'HuggingFace',
25
+ ollama: 'Ollama',
26
+ mistral: 'MistralAI'
21
27
  };
22
28
  /**
23
29
  * List of all available provider names
@@ -0,0 +1,32 @@
1
+ import type { ZodType, ZodTypeDef } from 'zod';
2
+ import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult } from 'ai';
3
+ import type { AIProvider, TextGenerationOptions, StreamTextOptions } from '../core/types.js';
4
+ export declare class MistralAI implements AIProvider {
5
+ private modelName;
6
+ private client;
7
+ /**
8
+ * Initializes a new instance of MistralAI
9
+ * @param modelName - Optional model name to override the default from config
10
+ */
11
+ constructor(modelName?: string | null);
12
+ /**
13
+ * Gets the appropriate model instance
14
+ * @private
15
+ */
16
+ private getModel;
17
+ /**
18
+ * Processes text using streaming approach with enhanced error handling callbacks
19
+ * @param prompt - The input text prompt to analyze
20
+ * @param analysisSchema - Optional Zod schema or Schema object for output validation
21
+ * @returns Promise resolving to StreamTextResult or null if operation fails
22
+ */
23
+ streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
24
+ /**
25
+ * Processes text using non-streaming approach with optional schema validation
26
+ * @param prompt - The input text prompt to analyze
27
+ * @param analysisSchema - Optional Zod schema or Schema object for output validation
28
+ * @returns Promise resolving to GenerateTextResult or null if operation fails
29
+ */
30
+ generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
31
+ }
32
+ export default MistralAI;
@@ -0,0 +1,217 @@
1
+ import { createMistral } from '@ai-sdk/mistral';
2
+ import { streamText, generateText, Output } from 'ai';
3
+ import { logger } from '../utils/logger.js';
4
+ // Default system context
5
+ const DEFAULT_SYSTEM_CONTEXT = {
6
+ systemPrompt: 'You are a helpful AI assistant.'
7
+ };
8
+ // Configuration helpers
9
+ const getMistralApiKey = () => {
10
+ const apiKey = process.env.MISTRAL_API_KEY;
11
+ if (!apiKey) {
12
+ throw new Error('MISTRAL_API_KEY environment variable is not set');
13
+ }
14
+ return apiKey;
15
+ };
16
+ const getMistralModelId = () => {
17
+ return process.env.MISTRAL_MODEL || 'mistral-small';
18
+ };
19
+ const hasValidAuth = () => {
20
+ return !!process.env.MISTRAL_API_KEY;
21
+ };
22
+ // Lazy initialization cache
23
+ let _mistralClient = null;
24
+ function getMistralClient() {
25
+ if (!_mistralClient) {
26
+ const apiKey = getMistralApiKey();
27
+ _mistralClient = createMistral({
28
+ apiKey,
29
+ baseURL: process.env.MISTRAL_ENDPOINT || 'https://api.mistral.ai/v1'
30
+ });
31
+ }
32
+ return _mistralClient;
33
+ }
34
+ // Mistral AI class with enhanced error handling
35
+ export class MistralAI {
36
+ modelName;
37
+ client;
38
+ /**
39
+ * Initializes a new instance of MistralAI
40
+ * @param modelName - Optional model name to override the default from config
41
+ */
42
+ constructor(modelName) {
43
+ const functionTag = 'MistralAI.constructor';
44
+ this.modelName = modelName || getMistralModelId();
45
+ try {
46
+ this.client = getMistralClient();
47
+ logger.debug(`[${functionTag}] Initialization started`, {
48
+ modelName: this.modelName,
49
+ hasApiKey: hasValidAuth()
50
+ });
51
+ logger.debug(`[${functionTag}] Initialization completed`, {
52
+ modelName: this.modelName,
53
+ success: true
54
+ });
55
+ }
56
+ catch (err) {
57
+ logger.error(`[${functionTag}] Initialization failed`, {
58
+ message: 'Error in initializing Mistral AI',
59
+ modelName: this.modelName,
60
+ error: err instanceof Error ? err.message : String(err),
61
+ stack: err instanceof Error ? err.stack : undefined
62
+ });
63
+ throw err;
64
+ }
65
+ }
66
+ /**
67
+ * Gets the appropriate model instance
68
+ * @private
69
+ */
70
+ getModel() {
71
+ logger.debug('MistralAI.getModel - Mistral AI model selected', {
72
+ modelName: this.modelName
73
+ });
74
+ return this.client(this.modelName);
75
+ }
76
+ /**
77
+ * Processes text using streaming approach with enhanced error handling callbacks
78
+ * @param prompt - The input text prompt to analyze
79
+ * @param analysisSchema - Optional Zod schema or Schema object for output validation
80
+ * @returns Promise resolving to StreamTextResult or null if operation fails
81
+ */
82
+ async streamText(optionsOrPrompt, analysisSchema) {
83
+ const functionTag = 'MistralAI.streamText';
84
+ const provider = 'mistral';
85
+ let chunkCount = 0;
86
+ try {
87
+ // Parse parameters - support both string and options object
88
+ const options = typeof optionsOrPrompt === 'string'
89
+ ? { prompt: optionsOrPrompt }
90
+ : optionsOrPrompt;
91
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
92
+ // Use schema from options or fallback parameter
93
+ const finalSchema = schema || analysisSchema;
94
+ logger.debug(`[${functionTag}] Stream request started`, {
95
+ provider,
96
+ modelName: this.modelName,
97
+ promptLength: prompt.length,
98
+ temperature,
99
+ maxTokens,
100
+ hasSchema: !!finalSchema
101
+ });
102
+ const model = this.getModel();
103
+ const streamOptions = {
104
+ model: model,
105
+ prompt: prompt,
106
+ system: systemPrompt,
107
+ temperature,
108
+ maxTokens,
109
+ onError: (event) => {
110
+ const error = event.error;
111
+ const errorMessage = error instanceof Error ? error.message : String(error);
112
+ const errorStack = error instanceof Error ? error.stack : undefined;
113
+ logger.error(`[${functionTag}] Stream text error`, {
114
+ provider,
115
+ modelName: this.modelName,
116
+ error: errorMessage,
117
+ stack: errorStack,
118
+ promptLength: prompt.length,
119
+ chunkCount
120
+ });
121
+ },
122
+ onFinish: (event) => {
123
+ logger.debug(`[${functionTag}] Stream text finished`, {
124
+ provider,
125
+ modelName: this.modelName,
126
+ finishReason: event.finishReason,
127
+ usage: event.usage,
128
+ totalChunks: chunkCount,
129
+ promptLength: prompt.length,
130
+ responseLength: event.text?.length || 0
131
+ });
132
+ },
133
+ onChunk: (event) => {
134
+ chunkCount++;
135
+ logger.debug(`[${functionTag}] Stream text chunk`, {
136
+ provider,
137
+ modelName: this.modelName,
138
+ chunkNumber: chunkCount,
139
+ chunkLength: event.chunk.text?.length || 0,
140
+ chunkType: event.chunk.type
141
+ });
142
+ }
143
+ };
144
+ if (finalSchema) {
145
+ streamOptions.experimental_output = Output.object({ schema: finalSchema });
146
+ }
147
+ const result = streamText(streamOptions);
148
+ return result;
149
+ }
150
+ catch (err) {
151
+ logger.error(`[${functionTag}] Exception`, {
152
+ provider,
153
+ modelName: this.modelName,
154
+ message: 'Error in streaming text',
155
+ err: String(err),
156
+ promptLength: typeof optionsOrPrompt === 'string' ? optionsOrPrompt.length : optionsOrPrompt.prompt.length
157
+ });
158
+ throw err; // Re-throw error to trigger fallback
159
+ }
160
+ }
161
+ /**
162
+ * Processes text using non-streaming approach with optional schema validation
163
+ * @param prompt - The input text prompt to analyze
164
+ * @param analysisSchema - Optional Zod schema or Schema object for output validation
165
+ * @returns Promise resolving to GenerateTextResult or null if operation fails
166
+ */
167
+ async generateText(optionsOrPrompt, analysisSchema) {
168
+ const functionTag = 'MistralAI.generateText';
169
+ const provider = 'mistral';
170
+ try {
171
+ // Parse parameters - support both string and options object
172
+ const options = typeof optionsOrPrompt === 'string'
173
+ ? { prompt: optionsOrPrompt }
174
+ : optionsOrPrompt;
175
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
176
+ // Use schema from options or fallback parameter
177
+ const finalSchema = schema || analysisSchema;
178
+ logger.debug(`[${functionTag}] Generate request started`, {
179
+ provider,
180
+ modelName: this.modelName,
181
+ promptLength: prompt.length,
182
+ temperature,
183
+ maxTokens
184
+ });
185
+ const model = this.getModel();
186
+ const generateOptions = {
187
+ model: model,
188
+ prompt: prompt,
189
+ system: systemPrompt,
190
+ temperature,
191
+ maxTokens
192
+ };
193
+ if (finalSchema) {
194
+ generateOptions.experimental_output = Output.object({ schema: finalSchema });
195
+ }
196
+ const result = await generateText(generateOptions);
197
+ logger.debug(`[${functionTag}] Generate text completed`, {
198
+ provider,
199
+ modelName: this.modelName,
200
+ usage: result.usage,
201
+ finishReason: result.finishReason,
202
+ responseLength: result.text?.length || 0
203
+ });
204
+ return result;
205
+ }
206
+ catch (err) {
207
+ logger.error(`[${functionTag}] Exception`, {
208
+ provider,
209
+ modelName: this.modelName,
210
+ message: 'Error in generating text',
211
+ err: String(err)
212
+ });
213
+ throw err; // Re-throw error to trigger fallback
214
+ }
215
+ }
216
+ }
217
+ export default MistralAI;