@juspay/neurolink 2.0.0 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/CHANGELOG.md +7 -0
  2. package/README.md +31 -5
  3. package/dist/cli/commands/config.d.ts +6 -6
  4. package/dist/cli/index.js +29 -30
  5. package/dist/core/types.d.ts +2 -0
  6. package/dist/lib/core/types.d.ts +2 -0
  7. package/dist/lib/neurolink.d.ts +2 -0
  8. package/dist/lib/neurolink.js +23 -2
  9. package/dist/lib/providers/agent-enhanced-provider.d.ts +1 -0
  10. package/dist/lib/providers/agent-enhanced-provider.js +59 -3
  11. package/dist/lib/providers/amazonBedrock.js +70 -24
  12. package/dist/lib/providers/anthropic.js +77 -15
  13. package/dist/lib/providers/azureOpenAI.js +77 -15
  14. package/dist/lib/providers/googleAIStudio.js +70 -26
  15. package/dist/lib/providers/googleVertexAI.js +70 -24
  16. package/dist/lib/providers/huggingFace.js +70 -26
  17. package/dist/lib/providers/mistralAI.js +70 -26
  18. package/dist/lib/providers/ollama.d.ts +1 -1
  19. package/dist/lib/providers/ollama.js +24 -10
  20. package/dist/lib/providers/openAI.js +67 -23
  21. package/dist/lib/providers/timeout-wrapper.d.ts +40 -0
  22. package/dist/lib/providers/timeout-wrapper.js +100 -0
  23. package/dist/lib/utils/timeout.d.ts +69 -0
  24. package/dist/lib/utils/timeout.js +130 -0
  25. package/dist/neurolink.d.ts +2 -0
  26. package/dist/neurolink.js +23 -2
  27. package/dist/providers/agent-enhanced-provider.d.ts +1 -0
  28. package/dist/providers/agent-enhanced-provider.js +59 -3
  29. package/dist/providers/amazonBedrock.js +70 -24
  30. package/dist/providers/anthropic.js +77 -15
  31. package/dist/providers/azureOpenAI.js +77 -15
  32. package/dist/providers/googleAIStudio.js +70 -26
  33. package/dist/providers/googleVertexAI.js +70 -24
  34. package/dist/providers/huggingFace.js +70 -26
  35. package/dist/providers/mistralAI.js +70 -26
  36. package/dist/providers/ollama.d.ts +1 -1
  37. package/dist/providers/ollama.js +24 -10
  38. package/dist/providers/openAI.js +67 -23
  39. package/dist/providers/timeout-wrapper.d.ts +40 -0
  40. package/dist/providers/timeout-wrapper.js +100 -0
  41. package/dist/utils/timeout.d.ts +69 -0
  42. package/dist/utils/timeout.js +130 -0
  43. package/package.json +1 -1
@@ -1,6 +1,7 @@
1
1
  import { createMistral } from "@ai-sdk/mistral";
2
2
  import { streamText, generateText, Output, } from "ai";
3
3
  import { logger } from "../utils/logger.js";
4
+ import { createTimeoutController, TimeoutError, getDefaultTimeout } from "../utils/timeout.js";
4
5
  // Default system context
5
6
  const DEFAULT_SYSTEM_CONTEXT = {
6
7
  systemPrompt: "You are a helpful AI assistant.",
@@ -88,7 +89,7 @@ export class MistralAI {
88
89
  const options = typeof optionsOrPrompt === "string"
89
90
  ? { prompt: optionsOrPrompt }
90
91
  : optionsOrPrompt;
91
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
92
+ const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, 'stream'), } = options;
92
93
  // Use schema from options or fallback parameter
93
94
  const finalSchema = schema || analysisSchema;
94
95
  logger.debug(`[${functionTag}] Stream request started`, {
@@ -98,14 +99,19 @@ export class MistralAI {
98
99
  temperature,
99
100
  maxTokens,
100
101
  hasSchema: !!finalSchema,
102
+ timeout,
101
103
  });
102
104
  const model = this.getModel();
105
+ // Create timeout controller if timeout is specified
106
+ const timeoutController = createTimeoutController(timeout, provider, 'stream');
103
107
  const streamOptions = {
104
108
  model: model,
105
109
  prompt: prompt,
106
110
  system: systemPrompt,
107
111
  temperature,
108
112
  maxTokens,
113
+ // Add abort signal if available
114
+ ...(timeoutController && { abortSignal: timeoutController.controller.signal }),
109
115
  onError: (event) => {
110
116
  const error = event.error;
111
117
  const errorMessage = error instanceof Error ? error.message : String(error);
@@ -147,18 +153,31 @@ export class MistralAI {
147
153
  });
148
154
  }
149
155
  const result = streamText(streamOptions);
156
+ // For streaming, we can't clean up immediately, but the timeout will auto-clean
157
+ // The user should handle the stream and any timeout errors
150
158
  return result;
151
159
  }
152
160
  catch (err) {
153
- logger.error(`[${functionTag}] Exception`, {
154
- provider,
155
- modelName: this.modelName,
156
- message: "Error in streaming text",
157
- err: String(err),
158
- promptLength: typeof optionsOrPrompt === "string"
159
- ? optionsOrPrompt.length
160
- : optionsOrPrompt.prompt.length,
161
- });
161
+ // Log timeout errors specifically
162
+ if (err instanceof TimeoutError) {
163
+ logger.error(`[${functionTag}] Timeout error`, {
164
+ provider,
165
+ modelName: this.modelName,
166
+ timeout: err.timeout,
167
+ message: err.message,
168
+ });
169
+ }
170
+ else {
171
+ logger.error(`[${functionTag}] Exception`, {
172
+ provider,
173
+ modelName: this.modelName,
174
+ message: "Error in streaming text",
175
+ err: String(err),
176
+ promptLength: typeof optionsOrPrompt === "string"
177
+ ? optionsOrPrompt.length
178
+ : optionsOrPrompt.prompt.length,
179
+ });
180
+ }
162
181
  throw err; // Re-throw error to trigger fallback
163
182
  }
164
183
  }
@@ -176,7 +195,7 @@ export class MistralAI {
176
195
  const options = typeof optionsOrPrompt === "string"
177
196
  ? { prompt: optionsOrPrompt }
178
197
  : optionsOrPrompt;
179
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
198
+ const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, 'generate'), } = options;
180
199
  // Use schema from options or fallback parameter
181
200
  const finalSchema = schema || analysisSchema;
182
201
  logger.debug(`[${functionTag}] Generate request started`, {
@@ -185,37 +204,62 @@ export class MistralAI {
185
204
  promptLength: prompt.length,
186
205
  temperature,
187
206
  maxTokens,
207
+ timeout,
188
208
  });
189
209
  const model = this.getModel();
210
+ // Create timeout controller if timeout is specified
211
+ const timeoutController = createTimeoutController(timeout, provider, 'generate');
190
212
  const generateOptions = {
191
213
  model: model,
192
214
  prompt: prompt,
193
215
  system: systemPrompt,
194
216
  temperature,
195
217
  maxTokens,
218
+ // Add abort signal if available
219
+ ...(timeoutController && { abortSignal: timeoutController.controller.signal }),
196
220
  };
197
221
  if (finalSchema) {
198
222
  generateOptions.experimental_output = Output.object({
199
223
  schema: finalSchema,
200
224
  });
201
225
  }
202
- const result = await generateText(generateOptions);
203
- logger.debug(`[${functionTag}] Generate text completed`, {
204
- provider,
205
- modelName: this.modelName,
206
- usage: result.usage,
207
- finishReason: result.finishReason,
208
- responseLength: result.text?.length || 0,
209
- });
210
- return result;
226
+ try {
227
+ const result = await generateText(generateOptions);
228
+ // Clean up timeout if successful
229
+ timeoutController?.cleanup();
230
+ logger.debug(`[${functionTag}] Generate text completed`, {
231
+ provider,
232
+ modelName: this.modelName,
233
+ usage: result.usage,
234
+ finishReason: result.finishReason,
235
+ responseLength: result.text?.length || 0,
236
+ timeout,
237
+ });
238
+ return result;
239
+ }
240
+ finally {
241
+ // Always cleanup timeout
242
+ timeoutController?.cleanup();
243
+ }
211
244
  }
212
245
  catch (err) {
213
- logger.error(`[${functionTag}] Exception`, {
214
- provider,
215
- modelName: this.modelName,
216
- message: "Error in generating text",
217
- err: String(err),
218
- });
246
+ // Log timeout errors specifically
247
+ if (err instanceof TimeoutError) {
248
+ logger.error(`[${functionTag}] Timeout error`, {
249
+ provider,
250
+ modelName: this.modelName,
251
+ timeout: err.timeout,
252
+ message: err.message,
253
+ });
254
+ }
255
+ else {
256
+ logger.error(`[${functionTag}] Exception`, {
257
+ provider,
258
+ modelName: this.modelName,
259
+ message: "Error in generating text",
260
+ err: String(err),
261
+ });
262
+ }
219
263
  throw err; // Re-throw error to trigger fallback
220
264
  }
221
265
  }
@@ -17,7 +17,7 @@ import type { Schema } from "ai";
17
17
  export declare class Ollama implements AIProvider {
18
18
  private baseUrl;
19
19
  private modelName;
20
- private timeout;
20
+ private defaultTimeout;
21
21
  constructor(modelName?: string);
22
22
  /**
23
23
  * Gets the appropriate model instance
@@ -12,6 +12,7 @@
12
12
  */
13
13
  import { streamText, generateText, Output } from "ai";
14
14
  import { logger } from "../utils/logger.js";
15
+ import { getDefaultTimeout, TimeoutError } from "../utils/timeout.js";
15
16
  // Default system context
16
17
  const DEFAULT_SYSTEM_CONTEXT = {
17
18
  systemPrompt: "You are a helpful AI assistant.",
@@ -275,32 +276,35 @@ class OllamaLanguageModel {
275
276
  export class Ollama {
276
277
  baseUrl;
277
278
  modelName;
278
- timeout;
279
+ defaultTimeout;
279
280
  constructor(modelName) {
280
281
  this.baseUrl = process.env.OLLAMA_BASE_URL || "http://localhost:11434";
281
282
  this.modelName = modelName || process.env.OLLAMA_MODEL || "llama2";
282
- this.timeout = parseInt(process.env.OLLAMA_TIMEOUT || "60000"); // 60 seconds default
283
+ // Use environment variable for backward compatibility, but convert to format used by other providers
284
+ const envTimeout = process.env.OLLAMA_TIMEOUT ? parseInt(process.env.OLLAMA_TIMEOUT) : undefined;
285
+ this.defaultTimeout = envTimeout || parseInt(getDefaultTimeout('ollama', 'generate').replace(/[^\d]/g, ''));
283
286
  logger.debug("[Ollama] Initialized", {
284
287
  baseUrl: this.baseUrl,
285
288
  modelName: this.modelName,
286
- timeout: this.timeout,
289
+ defaultTimeout: this.defaultTimeout,
287
290
  });
288
291
  }
289
292
  /**
290
293
  * Gets the appropriate model instance
291
294
  * @private
292
295
  */
293
- getModel() {
296
+ getModel(timeout) {
294
297
  logger.debug("Ollama.getModel - Ollama model selected", {
295
298
  modelName: this.modelName,
299
+ timeout: timeout || this.defaultTimeout,
296
300
  });
297
- return new OllamaLanguageModel(this.modelName, this.baseUrl, this.timeout);
301
+ return new OllamaLanguageModel(this.modelName, this.baseUrl, timeout || this.defaultTimeout);
298
302
  }
299
303
  /**
300
304
  * Health check - verify Ollama service is running and accessible
301
305
  */
302
306
  async checkHealth() {
303
- const model = new OllamaLanguageModel(this.modelName, this.baseUrl, this.timeout);
307
+ const model = new OllamaLanguageModel(this.modelName, this.baseUrl, this.defaultTimeout);
304
308
  return await model["checkHealth"]();
305
309
  }
306
310
  /**
@@ -388,17 +392,22 @@ export class Ollama {
388
392
  const options = typeof optionsOrPrompt === "string"
389
393
  ? { prompt: optionsOrPrompt }
390
394
  : optionsOrPrompt;
391
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
395
+ const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout, } = options;
392
396
  // Use schema from options or fallback parameter
393
397
  const finalSchema = schema || analysisSchema;
398
+ // Convert timeout to milliseconds if provided as string
399
+ const timeoutMs = timeout
400
+ ? (typeof timeout === 'string' ? parseInt(getDefaultTimeout('ollama', 'generate').replace(/[^\d]/g, '')) : timeout)
401
+ : this.defaultTimeout;
394
402
  logger.debug(`[${functionTag}] Generate request started`, {
395
403
  provider,
396
404
  modelName: this.modelName,
397
405
  promptLength: prompt.length,
398
406
  temperature,
399
407
  maxTokens,
408
+ timeout: timeoutMs,
400
409
  });
401
- const model = this.getModel();
410
+ const model = this.getModel(timeoutMs);
402
411
  const generateOptions = {
403
412
  model: model,
404
413
  prompt: prompt,
@@ -446,9 +455,13 @@ export class Ollama {
446
455
  const options = typeof optionsOrPrompt === "string"
447
456
  ? { prompt: optionsOrPrompt }
448
457
  : optionsOrPrompt;
449
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
458
+ const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout, } = options;
450
459
  // Use schema from options or fallback parameter
451
460
  const finalSchema = schema || analysisSchema;
461
+ // Convert timeout to milliseconds if provided as string
462
+ const timeoutMs = timeout
463
+ ? (typeof timeout === 'string' ? parseInt(getDefaultTimeout('ollama', 'stream').replace(/[^\d]/g, '')) : timeout)
464
+ : this.defaultTimeout;
452
465
  logger.debug(`[${functionTag}] Stream request started`, {
453
466
  provider,
454
467
  modelName: this.modelName,
@@ -456,8 +469,9 @@ export class Ollama {
456
469
  temperature,
457
470
  maxTokens,
458
471
  hasSchema: !!finalSchema,
472
+ timeout: timeoutMs,
459
473
  });
460
- const model = this.getModel();
474
+ const model = this.getModel(timeoutMs);
461
475
  const streamOptions = {
462
476
  model: model,
463
477
  prompt: prompt,
@@ -1,6 +1,7 @@
1
1
  import { openai } from "@ai-sdk/openai";
2
2
  import { streamText, generateText, Output, } from "ai";
3
3
  import { logger } from "../utils/logger.js";
4
+ import { createTimeoutController, getDefaultTimeout, TimeoutError } from "../utils/timeout.js";
4
5
  // Default system context
5
6
  const DEFAULT_SYSTEM_CONTEXT = {
6
7
  systemPrompt: "You are a helpful AI assistant.",
@@ -59,7 +60,7 @@ export class OpenAI {
59
60
  const options = typeof optionsOrPrompt === "string"
60
61
  ? { prompt: optionsOrPrompt }
61
62
  : optionsOrPrompt;
62
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
63
+ const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, 'stream'), } = options;
63
64
  // Use schema from options or fallback parameter
64
65
  const finalSchema = schema || analysisSchema;
65
66
  logger.debug(`[${functionTag}] Stream text started`, {
@@ -68,13 +69,18 @@ export class OpenAI {
68
69
  promptLength: prompt.length,
69
70
  temperature,
70
71
  maxTokens,
72
+ timeout,
71
73
  });
74
+ // Create timeout controller if timeout is specified
75
+ const timeoutController = createTimeoutController(timeout, provider, 'stream');
72
76
  const streamOptions = {
73
77
  model: this.model,
74
78
  prompt: prompt,
75
79
  system: systemPrompt,
76
80
  temperature,
77
81
  maxTokens,
82
+ // Add abort signal if available
83
+ ...(timeoutController && { abortSignal: timeoutController.controller.signal }),
78
84
  onError: (event) => {
79
85
  const error = event.error;
80
86
  const errorMessage = error instanceof Error ? error.message : String(error);
@@ -116,15 +122,28 @@ export class OpenAI {
116
122
  });
117
123
  }
118
124
  const result = streamText(streamOptions);
125
+ // For streaming, we can't clean up immediately, but the timeout will auto-clean
126
+ // The user should handle the stream and any timeout errors
119
127
  return result;
120
128
  }
121
129
  catch (err) {
122
- logger.debug(`[${functionTag}] Exception`, {
123
- provider,
124
- modelName: this.modelName,
125
- message: "Error in streaming text",
126
- err: String(err),
127
- });
130
+ // Log timeout errors specifically
131
+ if (err instanceof TimeoutError) {
132
+ logger.debug(`[${functionTag}] Timeout error`, {
133
+ provider,
134
+ modelName: this.modelName,
135
+ timeout: err.timeout,
136
+ message: err.message,
137
+ });
138
+ }
139
+ else {
140
+ logger.debug(`[${functionTag}] Exception`, {
141
+ provider,
142
+ modelName: this.modelName,
143
+ message: "Error in streaming text",
144
+ err: String(err),
145
+ });
146
+ }
128
147
  throw err; // Re-throw error to trigger fallback
129
148
  }
130
149
  }
@@ -136,7 +155,7 @@ export class OpenAI {
136
155
  const options = typeof optionsOrPrompt === "string"
137
156
  ? { prompt: optionsOrPrompt }
138
157
  : optionsOrPrompt;
139
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
158
+ const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, 'generate'), } = options;
140
159
  // Use schema from options or fallback parameter
141
160
  const finalSchema = schema || analysisSchema;
142
161
  logger.debug(`[${functionTag}] Generate text started`, {
@@ -145,36 +164,61 @@ export class OpenAI {
145
164
  promptLength: prompt.length,
146
165
  temperature,
147
166
  maxTokens,
167
+ timeout,
148
168
  });
169
+ // Create timeout controller if timeout is specified
170
+ const timeoutController = createTimeoutController(timeout, provider, 'generate');
149
171
  const generateOptions = {
150
172
  model: this.model,
151
173
  prompt: prompt,
152
174
  system: systemPrompt,
153
175
  temperature,
154
176
  maxTokens,
177
+ // Add abort signal if available
178
+ ...(timeoutController && { abortSignal: timeoutController.controller.signal }),
155
179
  };
156
180
  if (finalSchema) {
157
181
  generateOptions.experimental_output = Output.object({
158
182
  schema: finalSchema,
159
183
  });
160
184
  }
161
- const result = await generateText(generateOptions);
162
- logger.debug(`[${functionTag}] Generate text completed`, {
163
- provider,
164
- modelName: this.modelName,
165
- usage: result.usage,
166
- finishReason: result.finishReason,
167
- responseLength: result.text?.length || 0,
168
- });
169
- return result;
185
+ try {
186
+ const result = await generateText(generateOptions);
187
+ // Clean up timeout if successful
188
+ timeoutController?.cleanup();
189
+ logger.debug(`[${functionTag}] Generate text completed`, {
190
+ provider,
191
+ modelName: this.modelName,
192
+ usage: result.usage,
193
+ finishReason: result.finishReason,
194
+ responseLength: result.text?.length || 0,
195
+ timeout,
196
+ });
197
+ return result;
198
+ }
199
+ finally {
200
+ // Always cleanup timeout
201
+ timeoutController?.cleanup();
202
+ }
170
203
  }
171
204
  catch (err) {
172
- logger.debug(`[${functionTag}] Exception`, {
173
- provider,
174
- modelName: this.modelName,
175
- message: "Error in generating text",
176
- err: String(err),
177
- });
205
+ // Log timeout errors specifically
206
+ if (err instanceof TimeoutError) {
207
+ logger.debug(`[${functionTag}] Timeout error`, {
208
+ provider,
209
+ modelName: this.modelName,
210
+ timeout: err.timeout,
211
+ message: err.message,
212
+ });
213
+ }
214
+ else {
215
+ logger.debug(`[${functionTag}] Exception`, {
216
+ provider,
217
+ modelName: this.modelName,
218
+ message: "Error in generating text",
219
+ err: String(err),
220
+ });
221
+ }
178
222
  throw err; // Re-throw error to trigger fallback
179
223
  }
180
224
  }
@@ -0,0 +1,40 @@
1
+ /**
2
+ * Timeout wrapper for AI provider operations
3
+ *
4
+ * Provides a consistent way to add timeout functionality to any async operation.
5
+ */
6
+ /**
7
+ * Wrap an async operation with a timeout
8
+ * @param promise - The promise to wrap
9
+ * @param timeout - Timeout duration (number in ms or string with unit)
10
+ * @param provider - Provider name for error messages
11
+ * @param operation - Operation type (generate or stream)
12
+ * @returns The result of the promise or throws TimeoutError
13
+ */
14
+ export declare function withTimeout<T>(promise: Promise<T>, timeout: number | string | undefined, provider: string, operation: 'generate' | 'stream'): Promise<T>;
15
+ /**
16
+ * Wrap a streaming async generator with timeout
17
+ * @param generator - The async generator to wrap
18
+ * @param timeout - Timeout duration for the entire stream
19
+ * @param provider - Provider name for error messages
20
+ * @returns Wrapped async generator that respects timeout
21
+ */
22
+ export declare function withStreamingTimeout<T>(generator: AsyncGenerator<T>, timeout: number | string | undefined, provider: string): AsyncGenerator<T>;
23
+ /**
24
+ * Create an abort controller with timeout
25
+ * @param timeout - Timeout duration
26
+ * @param provider - Provider name for error messages
27
+ * @param operation - Operation type
28
+ * @returns AbortController and cleanup function
29
+ */
30
+ export declare function createTimeoutController(timeout: number | string | undefined, provider: string, operation: 'generate' | 'stream'): {
31
+ controller: AbortController;
32
+ cleanup: () => void;
33
+ timeoutMs: number;
34
+ } | null;
35
+ /**
36
+ * Merge abort signals (for combining user abort with timeout)
37
+ * @param signals - Array of abort signals to merge
38
+ * @returns Combined abort controller
39
+ */
40
+ export declare function mergeAbortSignals(signals: (AbortSignal | undefined)[]): AbortController;
@@ -0,0 +1,100 @@
1
+ /**
2
+ * Timeout wrapper for AI provider operations
3
+ *
4
+ * Provides a consistent way to add timeout functionality to any async operation.
5
+ */
6
+ import { parseTimeout, TimeoutError, createTimeoutPromise } from '../utils/timeout.js';
7
+ /**
8
+ * Wrap an async operation with a timeout
9
+ * @param promise - The promise to wrap
10
+ * @param timeout - Timeout duration (number in ms or string with unit)
11
+ * @param provider - Provider name for error messages
12
+ * @param operation - Operation type (generate or stream)
13
+ * @returns The result of the promise or throws TimeoutError
14
+ */
15
+ export async function withTimeout(promise, timeout, provider, operation) {
16
+ const timeoutPromise = createTimeoutPromise(timeout, provider, operation);
17
+ if (!timeoutPromise) {
18
+ // No timeout specified, return original promise
19
+ return promise;
20
+ }
21
+ // Race between the actual operation and timeout
22
+ return Promise.race([promise, timeoutPromise]);
23
+ }
24
+ /**
25
+ * Wrap a streaming async generator with timeout
26
+ * @param generator - The async generator to wrap
27
+ * @param timeout - Timeout duration for the entire stream
28
+ * @param provider - Provider name for error messages
29
+ * @returns Wrapped async generator that respects timeout
30
+ */
31
+ export async function* withStreamingTimeout(generator, timeout, provider) {
32
+ const timeoutMs = parseTimeout(timeout);
33
+ if (!timeoutMs) {
34
+ // No timeout, pass through original generator
35
+ yield* generator;
36
+ return;
37
+ }
38
+ const startTime = Date.now();
39
+ try {
40
+ for await (const chunk of generator) {
41
+ // Check if we've exceeded the timeout
42
+ if (Date.now() - startTime > timeoutMs) {
43
+ throw new TimeoutError(`${provider} streaming operation timed out after ${timeout}`, timeoutMs, provider, 'stream');
44
+ }
45
+ yield chunk;
46
+ }
47
+ }
48
+ finally {
49
+ // Ensure generator is properly closed
50
+ if (generator.return) {
51
+ await generator.return(undefined);
52
+ }
53
+ }
54
+ }
55
+ /**
56
+ * Create an abort controller with timeout
57
+ * @param timeout - Timeout duration
58
+ * @param provider - Provider name for error messages
59
+ * @param operation - Operation type
60
+ * @returns AbortController and cleanup function
61
+ */
62
+ export function createTimeoutController(timeout, provider, operation) {
63
+ const timeoutMs = parseTimeout(timeout);
64
+ if (!timeoutMs) {
65
+ return null;
66
+ }
67
+ const controller = new AbortController();
68
+ const timer = setTimeout(() => {
69
+ controller.abort(new TimeoutError(`${provider} ${operation} operation timed out after ${timeout}`, timeoutMs, provider, operation));
70
+ }, timeoutMs);
71
+ // Cleanup function to clear the timer
72
+ const cleanup = () => {
73
+ clearTimeout(timer);
74
+ };
75
+ return { controller, cleanup, timeoutMs };
76
+ }
77
+ /**
78
+ * Merge abort signals (for combining user abort with timeout)
79
+ * @param signals - Array of abort signals to merge
80
+ * @returns Combined abort controller
81
+ */
82
+ export function mergeAbortSignals(signals) {
83
+ const controller = new AbortController();
84
+ // Listen to all signals and abort when any fires
85
+ for (const signal of signals) {
86
+ if (signal && !signal.aborted) {
87
+ signal.addEventListener('abort', () => {
88
+ if (!controller.signal.aborted) {
89
+ controller.abort(signal.reason);
90
+ }
91
+ });
92
+ }
93
+ // If any signal is already aborted, abort immediately
94
+ if (signal?.aborted) {
95
+ controller.abort(signal.reason);
96
+ break;
97
+ }
98
+ }
99
+ return controller;
100
+ }
@@ -0,0 +1,69 @@
1
+ /**
2
+ * Timeout utilities for NeuroLink
3
+ *
4
+ * Provides flexible timeout parsing and error handling for AI operations.
5
+ * Supports multiple time formats: milliseconds, seconds, minutes, hours.
6
+ */
7
+ /**
8
+ * Custom error class for timeout operations
9
+ */
10
+ export declare class TimeoutError extends Error {
11
+ readonly timeout: number;
12
+ readonly provider?: string | undefined;
13
+ readonly operation?: "generate" | "stream" | undefined;
14
+ constructor(message: string, timeout: number, provider?: string | undefined, operation?: "generate" | "stream" | undefined);
15
+ }
16
+ /**
17
+ * Parse timeout value from various formats
18
+ * @param timeout - Can be number (ms), string with unit, or undefined
19
+ * @returns Parsed timeout in milliseconds or undefined
20
+ * @throws Error if format is invalid
21
+ *
22
+ * Examples:
23
+ * - parseTimeout(5000) => 5000
24
+ * - parseTimeout('30s') => 30000
25
+ * - parseTimeout('2m') => 120000
26
+ * - parseTimeout('1.5h') => 5400000
27
+ * - parseTimeout(undefined) => undefined
28
+ */
29
+ export declare function parseTimeout(timeout: number | string | undefined): number | undefined;
30
+ /**
31
+ * Default timeout configurations for different providers and operations
32
+ */
33
+ export declare const DEFAULT_TIMEOUTS: {
34
+ global: string;
35
+ streaming: string;
36
+ providers: {
37
+ openai: string;
38
+ bedrock: string;
39
+ vertex: string;
40
+ anthropic: string;
41
+ azure: string;
42
+ 'google-ai': string;
43
+ huggingface: string;
44
+ ollama: string;
45
+ mistral: string;
46
+ };
47
+ tools: {
48
+ default: string;
49
+ filesystem: string;
50
+ network: string;
51
+ computation: string;
52
+ };
53
+ };
54
+ /**
55
+ * Get default timeout for a specific provider
56
+ * @param provider - Provider name
57
+ * @param operation - Operation type (generate or stream)
58
+ * @returns Default timeout string
59
+ */
60
+ export declare function getDefaultTimeout(provider: string, operation?: 'generate' | 'stream'): string;
61
+ /**
62
+ * Create a timeout promise that rejects after specified duration
63
+ * @param timeout - Timeout duration
64
+ * @param provider - Provider name for error message
65
+ * @param operation - Operation type for error message
66
+ * @returns Promise that rejects with TimeoutError
67
+ */
68
+ export declare function createTimeoutPromise(timeout: number | string | undefined, provider: string, operation: 'generate' | 'stream'): Promise<never> | null;
69
+ export { createTimeoutController } from '../providers/timeout-wrapper.js';