@juspay/neurolink 2.0.0 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/CHANGELOG.md +34 -7
  2. package/README.md +38 -34
  3. package/dist/cli/commands/config.d.ts +6 -6
  4. package/dist/cli/index.js +46 -35
  5. package/dist/core/types.d.ts +2 -0
  6. package/dist/lib/core/types.d.ts +2 -0
  7. package/dist/lib/mcp/plugins/filesystem-mcp.d.ts +1 -1
  8. package/dist/lib/neurolink.d.ts +2 -0
  9. package/dist/lib/neurolink.js +23 -2
  10. package/dist/lib/providers/agent-enhanced-provider.d.ts +1 -0
  11. package/dist/lib/providers/agent-enhanced-provider.js +115 -51
  12. package/dist/lib/providers/amazonBedrock.js +74 -24
  13. package/dist/lib/providers/anthropic.js +80 -16
  14. package/dist/lib/providers/azureOpenAI.js +77 -15
  15. package/dist/lib/providers/googleAIStudio.js +77 -26
  16. package/dist/lib/providers/googleVertexAI.js +77 -24
  17. package/dist/lib/providers/huggingFace.js +74 -26
  18. package/dist/lib/providers/mistralAI.js +74 -26
  19. package/dist/lib/providers/ollama.d.ts +1 -1
  20. package/dist/lib/providers/ollama.js +32 -10
  21. package/dist/lib/providers/openAI.js +71 -23
  22. package/dist/lib/providers/timeout-wrapper.d.ts +40 -0
  23. package/dist/lib/providers/timeout-wrapper.js +100 -0
  24. package/dist/lib/proxy/proxy-fetch.d.ts +18 -0
  25. package/dist/lib/proxy/proxy-fetch.js +64 -0
  26. package/dist/lib/utils/timeout.d.ts +69 -0
  27. package/dist/lib/utils/timeout.js +138 -0
  28. package/dist/mcp/plugins/filesystem-mcp.d.ts +1 -1
  29. package/dist/mcp/plugins/filesystem-mcp.js +1 -1
  30. package/dist/neurolink.d.ts +2 -0
  31. package/dist/neurolink.js +23 -2
  32. package/dist/providers/agent-enhanced-provider.d.ts +1 -0
  33. package/dist/providers/agent-enhanced-provider.js +115 -51
  34. package/dist/providers/amazonBedrock.js +74 -24
  35. package/dist/providers/anthropic.js +80 -16
  36. package/dist/providers/azureOpenAI.js +77 -15
  37. package/dist/providers/googleAIStudio.js +77 -26
  38. package/dist/providers/googleVertexAI.js +77 -24
  39. package/dist/providers/huggingFace.js +74 -26
  40. package/dist/providers/mistralAI.js +74 -26
  41. package/dist/providers/ollama.d.ts +1 -1
  42. package/dist/providers/ollama.js +32 -10
  43. package/dist/providers/openAI.js +71 -23
  44. package/dist/providers/timeout-wrapper.d.ts +40 -0
  45. package/dist/providers/timeout-wrapper.js +100 -0
  46. package/dist/proxy/proxy-fetch.d.ts +18 -0
  47. package/dist/proxy/proxy-fetch.js +64 -0
  48. package/dist/utils/timeout.d.ts +69 -0
  49. package/dist/utils/timeout.js +138 -0
  50. package/package.json +2 -1
@@ -23,6 +23,8 @@ async function getCreateVertexAnthropic() {
23
23
  }
24
24
  import { streamText, generateText, Output, } from "ai";
25
25
  import { logger } from "../utils/logger.js";
26
+ import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
27
+ import { createProxyFetch } from "../proxy/proxy-fetch.js";
26
28
  // Default system context
27
29
  const DEFAULT_SYSTEM_CONTEXT = {
28
30
  systemPrompt: "You are a helpful AI assistant.",
@@ -145,9 +147,11 @@ const createVertexSettings = async () => {
145
147
  const functionTag = "createVertexSettings";
146
148
  // Setup authentication first
147
149
  await setupGoogleAuth();
150
+ const proxyFetch = createProxyFetch();
148
151
  const baseSettings = {
149
152
  project: getGCPVertexBreezeProjectId(),
150
153
  location: getGCPVertexBreezeLocation(),
154
+ fetch: proxyFetch,
151
155
  };
152
156
  // Method 1: Principal Account Authentication (file path) - Recommended for production
153
157
  if (hasPrincipalAccountAuth()) {
@@ -285,7 +289,7 @@ export class GoogleVertexAI {
285
289
  const options = typeof optionsOrPrompt === "string"
286
290
  ? { prompt: optionsOrPrompt }
287
291
  : optionsOrPrompt;
288
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
292
+ const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
289
293
  // Use schema from options or fallback parameter
290
294
  const finalSchema = schema || analysisSchema;
291
295
  logger.debug(`[${functionTag}] Stream request started`, {
@@ -296,14 +300,21 @@ export class GoogleVertexAI {
296
300
  temperature,
297
301
  maxTokens,
298
302
  hasSchema: !!finalSchema,
303
+ timeout,
299
304
  });
300
305
  const model = await this.getModel();
306
+ // Create timeout controller if timeout is specified
307
+ const timeoutController = createTimeoutController(timeout, provider, "stream");
301
308
  const streamOptions = {
302
309
  model: model,
303
310
  prompt: prompt,
304
311
  system: systemPrompt,
305
312
  temperature,
306
313
  maxTokens,
314
+ // Add abort signal if available
315
+ ...(timeoutController && {
316
+ abortSignal: timeoutController.controller.signal,
317
+ }),
307
318
  onError: (event) => {
308
319
  const error = event.error;
309
320
  const errorMessage = error instanceof Error ? error.message : String(error);
@@ -345,16 +356,30 @@ export class GoogleVertexAI {
345
356
  });
346
357
  }
347
358
  const result = streamText(streamOptions);
359
+ // For streaming, we can't clean up immediately, but the timeout will auto-clean
360
+ // The user should handle the stream and any timeout errors
348
361
  return result;
349
362
  }
350
363
  catch (err) {
351
- logger.error(`[${functionTag}] Exception`, {
352
- provider,
353
- modelName: this.modelName,
354
- message: "Error in streaming text",
355
- err: String(err),
356
- promptLength: prompt.length,
357
- });
364
+ // Log timeout errors specifically
365
+ if (err instanceof TimeoutError) {
366
+ logger.error(`[${functionTag}] Timeout error`, {
367
+ provider,
368
+ modelName: this.modelName,
369
+ isAnthropic: isAnthropicModel(this.modelName),
370
+ timeout: err.timeout,
371
+ message: err.message,
372
+ });
373
+ }
374
+ else {
375
+ logger.error(`[${functionTag}] Exception`, {
376
+ provider,
377
+ modelName: this.modelName,
378
+ message: "Error in streaming text",
379
+ err: String(err),
380
+ promptLength: prompt.length,
381
+ });
382
+ }
358
383
  throw err; // Re-throw error to trigger fallback
359
384
  }
360
385
  }
@@ -372,7 +397,7 @@ export class GoogleVertexAI {
372
397
  const options = typeof optionsOrPrompt === "string"
373
398
  ? { prompt: optionsOrPrompt }
374
399
  : optionsOrPrompt;
375
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
400
+ const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
376
401
  // Use schema from options or fallback parameter
377
402
  const finalSchema = schema || analysisSchema;
378
403
  logger.debug(`[${functionTag}] Generate request started`, {
@@ -382,37 +407,65 @@ export class GoogleVertexAI {
382
407
  promptLength: prompt.length,
383
408
  temperature,
384
409
  maxTokens,
410
+ timeout,
385
411
  });
386
412
  const model = await this.getModel();
413
+ // Create timeout controller if timeout is specified
414
+ const timeoutController = createTimeoutController(timeout, provider, "generate");
387
415
  const generateOptions = {
388
416
  model: model,
389
417
  prompt: prompt,
390
418
  system: systemPrompt,
391
419
  temperature,
392
420
  maxTokens,
421
+ // Add abort signal if available
422
+ ...(timeoutController && {
423
+ abortSignal: timeoutController.controller.signal,
424
+ }),
393
425
  };
394
426
  if (finalSchema) {
395
427
  generateOptions.experimental_output = Output.object({
396
428
  schema: finalSchema,
397
429
  });
398
430
  }
399
- const result = await generateText(generateOptions);
400
- logger.debug(`[${functionTag}] Generate text completed`, {
401
- provider,
402
- modelName: this.modelName,
403
- usage: result.usage,
404
- finishReason: result.finishReason,
405
- responseLength: result.text?.length || 0,
406
- });
407
- return result;
431
+ try {
432
+ const result = await generateText(generateOptions);
433
+ // Clean up timeout if successful
434
+ timeoutController?.cleanup();
435
+ logger.debug(`[${functionTag}] Generate text completed`, {
436
+ provider,
437
+ modelName: this.modelName,
438
+ usage: result.usage,
439
+ finishReason: result.finishReason,
440
+ responseLength: result.text?.length || 0,
441
+ timeout,
442
+ });
443
+ return result;
444
+ }
445
+ finally {
446
+ // Always cleanup timeout
447
+ timeoutController?.cleanup();
448
+ }
408
449
  }
409
450
  catch (err) {
410
- logger.error(`[${functionTag}] Exception`, {
411
- provider,
412
- modelName: this.modelName,
413
- message: "Error in generating text",
414
- err: String(err),
415
- });
451
+ // Log timeout errors specifically
452
+ if (err instanceof TimeoutError) {
453
+ logger.error(`[${functionTag}] Timeout error`, {
454
+ provider,
455
+ modelName: this.modelName,
456
+ isAnthropic: isAnthropicModel(this.modelName),
457
+ timeout: err.timeout,
458
+ message: err.message,
459
+ });
460
+ }
461
+ else {
462
+ logger.error(`[${functionTag}] Exception`, {
463
+ provider,
464
+ modelName: this.modelName,
465
+ message: "Error in generating text",
466
+ err: String(err),
467
+ });
468
+ }
416
469
  throw err; // Re-throw error to trigger fallback
417
470
  }
418
471
  }
@@ -1,6 +1,7 @@
1
1
  import { HfInference } from "@huggingface/inference";
2
2
  import { streamText, generateText, Output, } from "ai";
3
3
  import { logger } from "../utils/logger.js";
4
+ import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
4
5
  // Default system context
5
6
  const DEFAULT_SYSTEM_CONTEXT = {
6
7
  systemPrompt: "You are a helpful AI assistant.",
@@ -228,7 +229,7 @@ export class HuggingFace {
228
229
  const options = typeof optionsOrPrompt === "string"
229
230
  ? { prompt: optionsOrPrompt }
230
231
  : optionsOrPrompt;
231
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
232
+ const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
232
233
  // Use schema from options or fallback parameter
233
234
  const finalSchema = schema || analysisSchema;
234
235
  logger.debug(`[${functionTag}] Stream request started`, {
@@ -238,14 +239,21 @@ export class HuggingFace {
238
239
  temperature,
239
240
  maxTokens,
240
241
  hasSchema: !!finalSchema,
242
+ timeout,
241
243
  });
242
244
  const model = this.getModel();
245
+ // Create timeout controller if timeout is specified
246
+ const timeoutController = createTimeoutController(timeout, provider, "stream");
243
247
  const streamOptions = {
244
248
  model: model,
245
249
  prompt: prompt,
246
250
  system: systemPrompt,
247
251
  temperature,
248
252
  maxTokens,
253
+ // Add abort signal if available
254
+ ...(timeoutController && {
255
+ abortSignal: timeoutController.controller.signal,
256
+ }),
249
257
  onError: (event) => {
250
258
  const error = event.error;
251
259
  const errorMessage = error instanceof Error ? error.message : String(error);
@@ -287,18 +295,31 @@ export class HuggingFace {
287
295
  });
288
296
  }
289
297
  const result = streamText(streamOptions);
298
+ // For streaming, we can't clean up immediately, but the timeout will auto-clean
299
+ // The user should handle the stream and any timeout errors
290
300
  return result;
291
301
  }
292
302
  catch (err) {
293
- logger.error(`[${functionTag}] Exception`, {
294
- provider,
295
- modelName: this.modelName,
296
- message: "Error in streaming text",
297
- err: String(err),
298
- promptLength: typeof optionsOrPrompt === "string"
299
- ? optionsOrPrompt.length
300
- : optionsOrPrompt.prompt.length,
301
- });
303
+ // Log timeout errors specifically
304
+ if (err instanceof TimeoutError) {
305
+ logger.error(`[${functionTag}] Timeout error`, {
306
+ provider,
307
+ modelName: this.modelName,
308
+ timeout: err.timeout,
309
+ message: err.message,
310
+ });
311
+ }
312
+ else {
313
+ logger.error(`[${functionTag}] Exception`, {
314
+ provider,
315
+ modelName: this.modelName,
316
+ message: "Error in streaming text",
317
+ err: String(err),
318
+ promptLength: typeof optionsOrPrompt === "string"
319
+ ? optionsOrPrompt.length
320
+ : optionsOrPrompt.prompt.length,
321
+ });
322
+ }
302
323
  throw err; // Re-throw error to trigger fallback
303
324
  }
304
325
  }
@@ -316,7 +337,7 @@ export class HuggingFace {
316
337
  const options = typeof optionsOrPrompt === "string"
317
338
  ? { prompt: optionsOrPrompt }
318
339
  : optionsOrPrompt;
319
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
340
+ const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
320
341
  // Use schema from options or fallback parameter
321
342
  const finalSchema = schema || analysisSchema;
322
343
  logger.debug(`[${functionTag}] Generate request started`, {
@@ -325,37 +346,64 @@ export class HuggingFace {
325
346
  promptLength: prompt.length,
326
347
  temperature,
327
348
  maxTokens,
349
+ timeout,
328
350
  });
329
351
  const model = this.getModel();
352
+ // Create timeout controller if timeout is specified
353
+ const timeoutController = createTimeoutController(timeout, provider, "generate");
330
354
  const generateOptions = {
331
355
  model: model,
332
356
  prompt: prompt,
333
357
  system: systemPrompt,
334
358
  temperature,
335
359
  maxTokens,
360
+ // Add abort signal if available
361
+ ...(timeoutController && {
362
+ abortSignal: timeoutController.controller.signal,
363
+ }),
336
364
  };
337
365
  if (finalSchema) {
338
366
  generateOptions.experimental_output = Output.object({
339
367
  schema: finalSchema,
340
368
  });
341
369
  }
342
- const result = await generateText(generateOptions);
343
- logger.debug(`[${functionTag}] Generate text completed`, {
344
- provider,
345
- modelName: this.modelName,
346
- usage: result.usage,
347
- finishReason: result.finishReason,
348
- responseLength: result.text?.length || 0,
349
- });
350
- return result;
370
+ try {
371
+ const result = await generateText(generateOptions);
372
+ // Clean up timeout if successful
373
+ timeoutController?.cleanup();
374
+ logger.debug(`[${functionTag}] Generate text completed`, {
375
+ provider,
376
+ modelName: this.modelName,
377
+ usage: result.usage,
378
+ finishReason: result.finishReason,
379
+ responseLength: result.text?.length || 0,
380
+ timeout,
381
+ });
382
+ return result;
383
+ }
384
+ finally {
385
+ // Always cleanup timeout
386
+ timeoutController?.cleanup();
387
+ }
351
388
  }
352
389
  catch (err) {
353
- logger.error(`[${functionTag}] Exception`, {
354
- provider,
355
- modelName: this.modelName,
356
- message: "Error in generating text",
357
- err: String(err),
358
- });
390
+ // Log timeout errors specifically
391
+ if (err instanceof TimeoutError) {
392
+ logger.error(`[${functionTag}] Timeout error`, {
393
+ provider,
394
+ modelName: this.modelName,
395
+ timeout: err.timeout,
396
+ message: err.message,
397
+ });
398
+ }
399
+ else {
400
+ logger.error(`[${functionTag}] Exception`, {
401
+ provider,
402
+ modelName: this.modelName,
403
+ message: "Error in generating text",
404
+ err: String(err),
405
+ });
406
+ }
359
407
  throw err; // Re-throw error to trigger fallback
360
408
  }
361
409
  }
@@ -1,6 +1,7 @@
1
1
  import { createMistral } from "@ai-sdk/mistral";
2
2
  import { streamText, generateText, Output, } from "ai";
3
3
  import { logger } from "../utils/logger.js";
4
+ import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
4
5
  // Default system context
5
6
  const DEFAULT_SYSTEM_CONTEXT = {
6
7
  systemPrompt: "You are a helpful AI assistant.",
@@ -88,7 +89,7 @@ export class MistralAI {
88
89
  const options = typeof optionsOrPrompt === "string"
89
90
  ? { prompt: optionsOrPrompt }
90
91
  : optionsOrPrompt;
91
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
92
+ const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
92
93
  // Use schema from options or fallback parameter
93
94
  const finalSchema = schema || analysisSchema;
94
95
  logger.debug(`[${functionTag}] Stream request started`, {
@@ -98,14 +99,21 @@ export class MistralAI {
98
99
  temperature,
99
100
  maxTokens,
100
101
  hasSchema: !!finalSchema,
102
+ timeout,
101
103
  });
102
104
  const model = this.getModel();
105
+ // Create timeout controller if timeout is specified
106
+ const timeoutController = createTimeoutController(timeout, provider, "stream");
103
107
  const streamOptions = {
104
108
  model: model,
105
109
  prompt: prompt,
106
110
  system: systemPrompt,
107
111
  temperature,
108
112
  maxTokens,
113
+ // Add abort signal if available
114
+ ...(timeoutController && {
115
+ abortSignal: timeoutController.controller.signal,
116
+ }),
109
117
  onError: (event) => {
110
118
  const error = event.error;
111
119
  const errorMessage = error instanceof Error ? error.message : String(error);
@@ -147,18 +155,31 @@ export class MistralAI {
147
155
  });
148
156
  }
149
157
  const result = streamText(streamOptions);
158
+ // For streaming, we can't clean up immediately, but the timeout will auto-clean
159
+ // The user should handle the stream and any timeout errors
150
160
  return result;
151
161
  }
152
162
  catch (err) {
153
- logger.error(`[${functionTag}] Exception`, {
154
- provider,
155
- modelName: this.modelName,
156
- message: "Error in streaming text",
157
- err: String(err),
158
- promptLength: typeof optionsOrPrompt === "string"
159
- ? optionsOrPrompt.length
160
- : optionsOrPrompt.prompt.length,
161
- });
163
+ // Log timeout errors specifically
164
+ if (err instanceof TimeoutError) {
165
+ logger.error(`[${functionTag}] Timeout error`, {
166
+ provider,
167
+ modelName: this.modelName,
168
+ timeout: err.timeout,
169
+ message: err.message,
170
+ });
171
+ }
172
+ else {
173
+ logger.error(`[${functionTag}] Exception`, {
174
+ provider,
175
+ modelName: this.modelName,
176
+ message: "Error in streaming text",
177
+ err: String(err),
178
+ promptLength: typeof optionsOrPrompt === "string"
179
+ ? optionsOrPrompt.length
180
+ : optionsOrPrompt.prompt.length,
181
+ });
182
+ }
162
183
  throw err; // Re-throw error to trigger fallback
163
184
  }
164
185
  }
@@ -176,7 +197,7 @@ export class MistralAI {
176
197
  const options = typeof optionsOrPrompt === "string"
177
198
  ? { prompt: optionsOrPrompt }
178
199
  : optionsOrPrompt;
179
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
200
+ const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
180
201
  // Use schema from options or fallback parameter
181
202
  const finalSchema = schema || analysisSchema;
182
203
  logger.debug(`[${functionTag}] Generate request started`, {
@@ -185,37 +206,64 @@ export class MistralAI {
185
206
  promptLength: prompt.length,
186
207
  temperature,
187
208
  maxTokens,
209
+ timeout,
188
210
  });
189
211
  const model = this.getModel();
212
+ // Create timeout controller if timeout is specified
213
+ const timeoutController = createTimeoutController(timeout, provider, "generate");
190
214
  const generateOptions = {
191
215
  model: model,
192
216
  prompt: prompt,
193
217
  system: systemPrompt,
194
218
  temperature,
195
219
  maxTokens,
220
+ // Add abort signal if available
221
+ ...(timeoutController && {
222
+ abortSignal: timeoutController.controller.signal,
223
+ }),
196
224
  };
197
225
  if (finalSchema) {
198
226
  generateOptions.experimental_output = Output.object({
199
227
  schema: finalSchema,
200
228
  });
201
229
  }
202
- const result = await generateText(generateOptions);
203
- logger.debug(`[${functionTag}] Generate text completed`, {
204
- provider,
205
- modelName: this.modelName,
206
- usage: result.usage,
207
- finishReason: result.finishReason,
208
- responseLength: result.text?.length || 0,
209
- });
210
- return result;
230
+ try {
231
+ const result = await generateText(generateOptions);
232
+ // Clean up timeout if successful
233
+ timeoutController?.cleanup();
234
+ logger.debug(`[${functionTag}] Generate text completed`, {
235
+ provider,
236
+ modelName: this.modelName,
237
+ usage: result.usage,
238
+ finishReason: result.finishReason,
239
+ responseLength: result.text?.length || 0,
240
+ timeout,
241
+ });
242
+ return result;
243
+ }
244
+ finally {
245
+ // Always cleanup timeout
246
+ timeoutController?.cleanup();
247
+ }
211
248
  }
212
249
  catch (err) {
213
- logger.error(`[${functionTag}] Exception`, {
214
- provider,
215
- modelName: this.modelName,
216
- message: "Error in generating text",
217
- err: String(err),
218
- });
250
+ // Log timeout errors specifically
251
+ if (err instanceof TimeoutError) {
252
+ logger.error(`[${functionTag}] Timeout error`, {
253
+ provider,
254
+ modelName: this.modelName,
255
+ timeout: err.timeout,
256
+ message: err.message,
257
+ });
258
+ }
259
+ else {
260
+ logger.error(`[${functionTag}] Exception`, {
261
+ provider,
262
+ modelName: this.modelName,
263
+ message: "Error in generating text",
264
+ err: String(err),
265
+ });
266
+ }
219
267
  throw err; // Re-throw error to trigger fallback
220
268
  }
221
269
  }
@@ -17,7 +17,7 @@ import type { Schema } from "ai";
17
17
  export declare class Ollama implements AIProvider {
18
18
  private baseUrl;
19
19
  private modelName;
20
- private timeout;
20
+ private defaultTimeout;
21
21
  constructor(modelName?: string);
22
22
  /**
23
23
  * Gets the appropriate model instance
@@ -12,6 +12,7 @@
12
12
  */
13
13
  import { streamText, generateText, Output } from "ai";
14
14
  import { logger } from "../utils/logger.js";
15
+ import { getDefaultTimeout } from "../utils/timeout.js";
15
16
  // Default system context
16
17
  const DEFAULT_SYSTEM_CONTEXT = {
17
18
  systemPrompt: "You are a helpful AI assistant.",
@@ -275,32 +276,39 @@ class OllamaLanguageModel {
275
276
  export class Ollama {
276
277
  baseUrl;
277
278
  modelName;
278
- timeout;
279
+ defaultTimeout;
279
280
  constructor(modelName) {
280
281
  this.baseUrl = process.env.OLLAMA_BASE_URL || "http://localhost:11434";
281
282
  this.modelName = modelName || process.env.OLLAMA_MODEL || "llama2";
282
- this.timeout = parseInt(process.env.OLLAMA_TIMEOUT || "60000"); // 60 seconds default
283
+ // Use environment variable for backward compatibility, but convert to format used by other providers
284
+ const envTimeout = process.env.OLLAMA_TIMEOUT
285
+ ? parseInt(process.env.OLLAMA_TIMEOUT)
286
+ : undefined;
287
+ this.defaultTimeout =
288
+ envTimeout ||
289
+ parseInt(getDefaultTimeout("ollama", "generate").replace(/[^\d]/g, ""));
283
290
  logger.debug("[Ollama] Initialized", {
284
291
  baseUrl: this.baseUrl,
285
292
  modelName: this.modelName,
286
- timeout: this.timeout,
293
+ defaultTimeout: this.defaultTimeout,
287
294
  });
288
295
  }
289
296
  /**
290
297
  * Gets the appropriate model instance
291
298
  * @private
292
299
  */
293
- getModel() {
300
+ getModel(timeout) {
294
301
  logger.debug("Ollama.getModel - Ollama model selected", {
295
302
  modelName: this.modelName,
303
+ timeout: timeout || this.defaultTimeout,
296
304
  });
297
- return new OllamaLanguageModel(this.modelName, this.baseUrl, this.timeout);
305
+ return new OllamaLanguageModel(this.modelName, this.baseUrl, timeout || this.defaultTimeout);
298
306
  }
299
307
  /**
300
308
  * Health check - verify Ollama service is running and accessible
301
309
  */
302
310
  async checkHealth() {
303
- const model = new OllamaLanguageModel(this.modelName, this.baseUrl, this.timeout);
311
+ const model = new OllamaLanguageModel(this.modelName, this.baseUrl, this.defaultTimeout);
304
312
  return await model["checkHealth"]();
305
313
  }
306
314
  /**
@@ -388,17 +396,24 @@ export class Ollama {
388
396
  const options = typeof optionsOrPrompt === "string"
389
397
  ? { prompt: optionsOrPrompt }
390
398
  : optionsOrPrompt;
391
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
399
+ const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout, } = options;
392
400
  // Use schema from options or fallback parameter
393
401
  const finalSchema = schema || analysisSchema;
402
+ // Convert timeout to milliseconds if provided as string
403
+ const timeoutMs = timeout
404
+ ? typeof timeout === "string"
405
+ ? parseInt(getDefaultTimeout("ollama", "generate").replace(/[^\d]/g, ""))
406
+ : timeout
407
+ : this.defaultTimeout;
394
408
  logger.debug(`[${functionTag}] Generate request started`, {
395
409
  provider,
396
410
  modelName: this.modelName,
397
411
  promptLength: prompt.length,
398
412
  temperature,
399
413
  maxTokens,
414
+ timeout: timeoutMs,
400
415
  });
401
- const model = this.getModel();
416
+ const model = this.getModel(timeoutMs);
402
417
  const generateOptions = {
403
418
  model: model,
404
419
  prompt: prompt,
@@ -446,9 +461,15 @@ export class Ollama {
446
461
  const options = typeof optionsOrPrompt === "string"
447
462
  ? { prompt: optionsOrPrompt }
448
463
  : optionsOrPrompt;
449
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
464
+ const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout, } = options;
450
465
  // Use schema from options or fallback parameter
451
466
  const finalSchema = schema || analysisSchema;
467
+ // Convert timeout to milliseconds if provided as string
468
+ const timeoutMs = timeout
469
+ ? typeof timeout === "string"
470
+ ? parseInt(getDefaultTimeout("ollama", "stream").replace(/[^\d]/g, ""))
471
+ : timeout
472
+ : this.defaultTimeout;
452
473
  logger.debug(`[${functionTag}] Stream request started`, {
453
474
  provider,
454
475
  modelName: this.modelName,
@@ -456,8 +477,9 @@ export class Ollama {
456
477
  temperature,
457
478
  maxTokens,
458
479
  hasSchema: !!finalSchema,
480
+ timeout: timeoutMs,
459
481
  });
460
- const model = this.getModel();
482
+ const model = this.getModel(timeoutMs);
461
483
  const streamOptions = {
462
484
  model: model,
463
485
  prompt: prompt,