@juspay/neurolink 2.0.0 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/CHANGELOG.md +7 -0
  2. package/README.md +31 -5
  3. package/dist/cli/commands/config.d.ts +6 -6
  4. package/dist/cli/index.js +29 -30
  5. package/dist/core/types.d.ts +2 -0
  6. package/dist/lib/core/types.d.ts +2 -0
  7. package/dist/lib/neurolink.d.ts +2 -0
  8. package/dist/lib/neurolink.js +23 -2
  9. package/dist/lib/providers/agent-enhanced-provider.d.ts +1 -0
  10. package/dist/lib/providers/agent-enhanced-provider.js +59 -3
  11. package/dist/lib/providers/amazonBedrock.js +70 -24
  12. package/dist/lib/providers/anthropic.js +77 -15
  13. package/dist/lib/providers/azureOpenAI.js +77 -15
  14. package/dist/lib/providers/googleAIStudio.js +70 -26
  15. package/dist/lib/providers/googleVertexAI.js +70 -24
  16. package/dist/lib/providers/huggingFace.js +70 -26
  17. package/dist/lib/providers/mistralAI.js +70 -26
  18. package/dist/lib/providers/ollama.d.ts +1 -1
  19. package/dist/lib/providers/ollama.js +24 -10
  20. package/dist/lib/providers/openAI.js +67 -23
  21. package/dist/lib/providers/timeout-wrapper.d.ts +40 -0
  22. package/dist/lib/providers/timeout-wrapper.js +100 -0
  23. package/dist/lib/utils/timeout.d.ts +69 -0
  24. package/dist/lib/utils/timeout.js +130 -0
  25. package/dist/neurolink.d.ts +2 -0
  26. package/dist/neurolink.js +23 -2
  27. package/dist/providers/agent-enhanced-provider.d.ts +1 -0
  28. package/dist/providers/agent-enhanced-provider.js +59 -3
  29. package/dist/providers/amazonBedrock.js +70 -24
  30. package/dist/providers/anthropic.js +77 -15
  31. package/dist/providers/azureOpenAI.js +77 -15
  32. package/dist/providers/googleAIStudio.js +70 -26
  33. package/dist/providers/googleVertexAI.js +70 -24
  34. package/dist/providers/huggingFace.js +70 -26
  35. package/dist/providers/mistralAI.js +70 -26
  36. package/dist/providers/ollama.d.ts +1 -1
  37. package/dist/providers/ollama.js +24 -10
  38. package/dist/providers/openAI.js +67 -23
  39. package/dist/providers/timeout-wrapper.d.ts +40 -0
  40. package/dist/providers/timeout-wrapper.js +100 -0
  41. package/dist/utils/timeout.d.ts +69 -0
  42. package/dist/utils/timeout.js +130 -0
  43. package/package.json +1 -1
package/dist/neurolink.js CHANGED
@@ -11,6 +11,7 @@ import { toolRegistry } from "./mcp/tool-registry.js";
11
11
  import { unifiedRegistry } from "./mcp/unified-registry.js";
12
12
  import { logger } from "./utils/logger.js";
13
13
  import { getBestProvider } from "./utils/providerUtils-fixed.js";
14
+ import { TimeoutError } from "./utils/timeout.js";
14
15
  export class NeuroLink {
15
16
  mcpInitialized = false;
16
17
  contextManager;
@@ -32,10 +33,9 @@ export class NeuroLink {
32
33
  const mcpInitPromise = Promise.race([
33
34
  this.doIsolatedMCPInitialization(),
34
35
  new Promise((_, reject) => {
35
- const timer = setTimeout(() => {
36
+ setTimeout(() => {
36
37
  reject(new Error("MCP initialization timeout after 3s"));
37
38
  }, initTimeout);
38
- timer.unref(); // Don't keep process alive
39
39
  }),
40
40
  ]);
41
41
  await mcpInitPromise;
@@ -134,6 +134,7 @@ export class NeuroLink {
134
134
  temperature: options.temperature,
135
135
  maxTokens: options.maxTokens,
136
136
  systemPrompt: enhancedSystemPrompt,
137
+ timeout: options.timeout,
137
138
  }, options.schema);
138
139
  if (!result) {
139
140
  throw new Error("No response received from AI provider");
@@ -222,6 +223,7 @@ export class NeuroLink {
222
223
  temperature: options.temperature,
223
224
  maxTokens: options.maxTokens,
224
225
  systemPrompt: options.systemPrompt,
226
+ timeout: options.timeout,
225
227
  }, options.schema);
226
228
  if (!result) {
227
229
  throw new Error("No response received from AI provider");
@@ -252,9 +254,18 @@ export class NeuroLink {
252
254
  catch (error) {
253
255
  const errorMessage = error instanceof Error ? error.message : String(error);
254
256
  lastError = error instanceof Error ? error : new Error(errorMessage);
257
+ // Special handling for timeout errors
258
+ if (error instanceof TimeoutError) {
259
+ logger.warn(`[${functionTag}] Provider timed out`, {
260
+ provider: providerName,
261
+ timeout: error.timeout,
262
+ operation: error.operation,
263
+ });
264
+ }
255
265
  logger.debug(`[${functionTag}] Provider failed, trying next`, {
256
266
  provider: providerName,
257
267
  error: errorMessage,
268
+ isTimeout: error instanceof TimeoutError,
258
269
  remainingProviders: tryProviders.slice(tryProviders.indexOf(providerName) + 1),
259
270
  });
260
271
  // Continue to next provider
@@ -338,6 +349,7 @@ Note: Tool integration is currently in development. Please provide helpful respo
338
349
  temperature: options.temperature,
339
350
  maxTokens: options.maxTokens,
340
351
  systemPrompt: options.systemPrompt,
352
+ timeout: options.timeout,
341
353
  });
342
354
  if (!result) {
343
355
  throw new Error("No stream response received from AI provider");
@@ -358,9 +370,18 @@ Note: Tool integration is currently in development. Please provide helpful respo
358
370
  catch (error) {
359
371
  const errorMessage = error instanceof Error ? error.message : String(error);
360
372
  lastError = error instanceof Error ? error : new Error(errorMessage);
373
+ // Special handling for timeout errors
374
+ if (error instanceof TimeoutError) {
375
+ logger.warn(`[${functionTag}] Provider timed out`, {
376
+ provider: providerName,
377
+ timeout: error.timeout,
378
+ operation: error.operation,
379
+ });
380
+ }
361
381
  logger.debug(`[${functionTag}] Provider failed, trying next`, {
362
382
  provider: providerName,
363
383
  error: errorMessage,
384
+ isTimeout: error instanceof TimeoutError,
364
385
  remainingProviders: tryProviders.slice(tryProviders.indexOf(providerName) + 1),
365
386
  });
366
387
  // Continue to next provider
@@ -15,6 +15,7 @@ interface AgentConfig {
15
15
  enableTools?: boolean;
16
16
  enableMCP?: boolean;
17
17
  mcpInitTimeoutMs?: number;
18
+ toolExecutionTimeout?: number | string;
18
19
  mcpDiscoveryOptions?: {
19
20
  searchPaths?: string[];
20
21
  configFiles?: string[];
@@ -9,6 +9,7 @@ import { anthropic } from "@ai-sdk/anthropic";
9
9
  import { directAgentTools, getToolsForCategory, } from "../agent/direct-tools.js";
10
10
  import { UnifiedMCPSystem } from "../mcp/unified-mcp.js";
11
11
  import { mcpLogger } from "../mcp/logging.js";
12
+ import { parseTimeout } from "../utils/timeout.js";
12
13
  /**
13
14
  * Agent-Enhanced Provider Class
14
15
  * Provides AI generation with tool calling capabilities
@@ -114,7 +115,21 @@ export class AgentEnhancedProvider {
114
115
  description: toolInfo.description || `MCP tool: ${toolInfo.name}`,
115
116
  parameters: toolInfo.inputSchema || {},
116
117
  execute: async (args) => {
118
+ let timeoutId;
117
119
  try {
120
+ // Create timeout controller for tool execution if configured
121
+ const toolTimeout = this.config.toolExecutionTimeout;
122
+ const toolAbortController = toolTimeout
123
+ ? new AbortController()
124
+ : undefined;
125
+ if (toolAbortController && toolTimeout) {
126
+ const timeoutMs = typeof toolTimeout === 'string'
127
+ ? parseTimeout(toolTimeout)
128
+ : toolTimeout;
129
+ timeoutId = setTimeout(() => {
130
+ toolAbortController.abort();
131
+ }, timeoutMs);
132
+ }
118
133
  const context = {
119
134
  sessionId: 'cli-session',
120
135
  userId: 'cli-user',
@@ -185,10 +200,33 @@ export class AgentEnhancedProvider {
185
200
  }
186
201
  }
187
202
  };
188
- const result = await this.mcpSystem.executeTool(toolInfo.name, args, context);
203
+ const toolPromise = this.mcpSystem.executeTool(toolInfo.name, args, context);
204
+ let result;
205
+ if (toolAbortController) {
206
+ // Race between tool execution and timeout
207
+ result = await Promise.race([
208
+ toolPromise,
209
+ new Promise((_, reject) => {
210
+ toolAbortController.signal.addEventListener('abort', () => {
211
+ reject(new Error(`Tool ${toolInfo.name} timed out after ${this.config.toolExecutionTimeout}`));
212
+ });
213
+ })
214
+ ]);
215
+ }
216
+ else {
217
+ result = await toolPromise;
218
+ }
219
+ // Clear timeout if successful
220
+ if (timeoutId) {
221
+ clearTimeout(timeoutId);
222
+ }
189
223
  return result.data || result;
190
224
  }
191
225
  catch (error) {
226
+ // Clear timeout on error
227
+ if (timeoutId) {
228
+ clearTimeout(timeoutId);
229
+ }
192
230
  mcpLogger.error(`MCP tool ${toolInfo.name} execution failed:`, error);
193
231
  throw error;
194
232
  }
@@ -206,7 +244,7 @@ export class AgentEnhancedProvider {
206
244
  const options = typeof optionsOrPrompt === "string"
207
245
  ? { prompt: optionsOrPrompt }
208
246
  : optionsOrPrompt;
209
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt, schema, } = options;
247
+ const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt, schema, timeout, } = options;
210
248
  // Get combined tools (direct + MCP) if enabled
211
249
  const tools = this.config.enableTools
212
250
  ? await this.getCombinedTools()
@@ -220,6 +258,14 @@ export class AgentEnhancedProvider {
220
258
  maxSteps: this.config.maxSteps
221
259
  });
222
260
  try {
261
+ // Parse timeout if provided
262
+ let abortSignal;
263
+ if (timeout) {
264
+ const timeoutMs = typeof timeout === 'string' ? parseTimeout(timeout) : timeout;
265
+ if (timeoutMs !== undefined) {
266
+ abortSignal = AbortSignal.timeout(timeoutMs);
267
+ }
268
+ }
223
269
  // The AI SDK with maxSteps automatically handles tool calling and result integration
224
270
  const result = await generateText({
225
271
  model: this.model,
@@ -231,6 +277,7 @@ export class AgentEnhancedProvider {
231
277
  temperature,
232
278
  maxTokens,
233
279
  toolChoice: this.shouldForceToolUsage(prompt) ? "required" : "auto",
280
+ abortSignal, // Pass abort signal for timeout support
234
281
  });
235
282
  log('Generation completed', {
236
283
  text: result.text?.substring(0, 200),
@@ -307,12 +354,20 @@ export class AgentEnhancedProvider {
307
354
  const options = typeof optionsOrPrompt === "string"
308
355
  ? { prompt: optionsOrPrompt }
309
356
  : optionsOrPrompt;
310
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt, } = options;
357
+ const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt, timeout, } = options;
311
358
  // Get combined tools (direct + MCP) if enabled
312
359
  const tools = this.config.enableTools
313
360
  ? await this.getCombinedTools()
314
361
  : {};
315
362
  try {
363
+ // Parse timeout if provided
364
+ let abortSignal;
365
+ if (timeout) {
366
+ const timeoutMs = typeof timeout === 'string' ? parseTimeout(timeout) : timeout;
367
+ if (timeoutMs !== undefined) {
368
+ abortSignal = AbortSignal.timeout(timeoutMs);
369
+ }
370
+ }
316
371
  const result = await streamText({
317
372
  model: this.model,
318
373
  prompt: systemPrompt
@@ -323,6 +378,7 @@ export class AgentEnhancedProvider {
323
378
  temperature,
324
379
  maxTokens,
325
380
  toolChoice: this.shouldForceToolUsage(prompt) ? "required" : "auto",
381
+ abortSignal, // Pass abort signal for timeout support
326
382
  });
327
383
  return result;
328
384
  }
@@ -1,6 +1,7 @@
1
1
  import { createAmazonBedrock } from "@ai-sdk/amazon-bedrock";
2
2
  import { streamText, generateText, Output, } from "ai";
3
3
  import { logger } from "../utils/logger.js";
4
+ import { createTimeoutController, TimeoutError, getDefaultTimeout } from "../utils/timeout.js";
4
5
  // Default system context
5
6
  const DEFAULT_SYSTEM_CONTEXT = {
6
7
  systemPrompt: "You are a helpful AI assistant.",
@@ -128,7 +129,7 @@ export class AmazonBedrock {
128
129
  const options = typeof optionsOrPrompt === "string"
129
130
  ? { prompt: optionsOrPrompt }
130
131
  : optionsOrPrompt;
131
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
132
+ const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, 'stream'), } = options;
132
133
  // Use schema from options or fallback parameter
133
134
  const finalSchema = schema || analysisSchema;
134
135
  logger.debug(`[${functionTag}] Stream request started`, {
@@ -137,13 +138,18 @@ export class AmazonBedrock {
137
138
  promptLength: prompt.length,
138
139
  temperature,
139
140
  maxTokens,
141
+ timeout,
140
142
  });
143
+ // Create timeout controller if timeout is specified
144
+ const timeoutController = createTimeoutController(timeout, provider, 'stream');
141
145
  const streamOptions = {
142
146
  model: this.model,
143
147
  prompt: prompt,
144
148
  system: systemPrompt,
145
149
  temperature,
146
150
  maxTokens,
151
+ // Add abort signal if available
152
+ ...(timeoutController && { abortSignal: timeoutController.controller.signal }),
147
153
  onError: (event) => {
148
154
  const error = event.error;
149
155
  const errorMessage = error instanceof Error ? error.message : String(error);
@@ -193,16 +199,30 @@ export class AmazonBedrock {
193
199
  modelName: this.modelName,
194
200
  promptLength: prompt.length,
195
201
  });
202
+ // For streaming, we can't clean up immediately, but the timeout will auto-clean
203
+ // The user should handle the stream and any timeout errors
196
204
  return result;
197
205
  }
198
206
  catch (err) {
199
- logger.error(`[${functionTag}] Exception`, {
200
- provider,
201
- modelName: this.modelName,
202
- region: getAWSRegion(),
203
- message: "Error in streaming text",
204
- err: String(err),
205
- });
207
+ // Log timeout errors specifically
208
+ if (err instanceof TimeoutError) {
209
+ logger.error(`[${functionTag}] Timeout error`, {
210
+ provider,
211
+ modelName: this.modelName,
212
+ region: getAWSRegion(),
213
+ timeout: err.timeout,
214
+ message: err.message,
215
+ });
216
+ }
217
+ else {
218
+ logger.error(`[${functionTag}] Exception`, {
219
+ provider,
220
+ modelName: this.modelName,
221
+ region: getAWSRegion(),
222
+ message: "Error in streaming text",
223
+ err: String(err),
224
+ });
225
+ }
206
226
  throw err; // Re-throw error to trigger fallback
207
227
  }
208
228
  }
@@ -214,7 +234,7 @@ export class AmazonBedrock {
214
234
  const options = typeof optionsOrPrompt === "string"
215
235
  ? { prompt: optionsOrPrompt }
216
236
  : optionsOrPrompt;
217
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
237
+ const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, 'generate'), } = options;
218
238
  // Use schema from options or fallback parameter
219
239
  const finalSchema = schema || analysisSchema;
220
240
  logger.debug(`[${functionTag}] Generate text started`, {
@@ -224,36 +244,62 @@ export class AmazonBedrock {
224
244
  promptLength: prompt.length,
225
245
  temperature,
226
246
  maxTokens,
247
+ timeout,
227
248
  });
249
+ // Create timeout controller if timeout is specified
250
+ const timeoutController = createTimeoutController(timeout, provider, 'generate');
228
251
  const generateOptions = {
229
252
  model: this.model,
230
253
  prompt: prompt,
231
254
  system: systemPrompt,
232
255
  temperature,
233
256
  maxTokens,
257
+ // Add abort signal if available
258
+ ...(timeoutController && { abortSignal: timeoutController.controller.signal }),
234
259
  };
235
260
  if (finalSchema) {
236
261
  generateOptions.experimental_output = Output.object({
237
262
  schema: finalSchema,
238
263
  });
239
264
  }
240
- const result = await generateText(generateOptions);
241
- logger.debug(`[${functionTag}] Generate text completed`, {
242
- provider,
243
- modelName: this.modelName,
244
- usage: result.usage,
245
- finishReason: result.finishReason,
246
- responseLength: result.text?.length || 0,
247
- });
248
- return result;
265
+ try {
266
+ const result = await generateText(generateOptions);
267
+ // Clean up timeout if successful
268
+ timeoutController?.cleanup();
269
+ logger.debug(`[${functionTag}] Generate text completed`, {
270
+ provider,
271
+ modelName: this.modelName,
272
+ usage: result.usage,
273
+ finishReason: result.finishReason,
274
+ responseLength: result.text?.length || 0,
275
+ timeout,
276
+ });
277
+ return result;
278
+ }
279
+ finally {
280
+ // Always cleanup timeout
281
+ timeoutController?.cleanup();
282
+ }
249
283
  }
250
284
  catch (err) {
251
- logger.error(`[${functionTag}] Exception`, {
252
- provider,
253
- modelName: this.modelName,
254
- message: "Error in generating text",
255
- err: String(err),
256
- });
285
+ // Log timeout errors specifically
286
+ if (err instanceof TimeoutError) {
287
+ logger.error(`[${functionTag}] Timeout error`, {
288
+ provider,
289
+ modelName: this.modelName,
290
+ region: getAWSRegion(),
291
+ timeout: err.timeout,
292
+ message: err.message,
293
+ });
294
+ }
295
+ else {
296
+ logger.error(`[${functionTag}] Exception`, {
297
+ provider,
298
+ modelName: this.modelName,
299
+ message: "Error in generating text",
300
+ err: String(err),
301
+ });
302
+ }
257
303
  throw err; // Re-throw error to trigger fallback instead of returning null
258
304
  }
259
305
  }
@@ -6,6 +6,7 @@
6
6
  */
7
7
  import { AIProviderName } from "../core/types.js";
8
8
  import { logger } from "../utils/logger.js";
9
+ import { createTimeoutController, TimeoutError, getDefaultTimeout } from "../utils/timeout.js";
9
10
  export class AnthropicProvider {
10
11
  name = AIProviderName.ANTHROPIC;
11
12
  apiKey;
@@ -29,7 +30,7 @@ export class AnthropicProvider {
29
30
  getModel() {
30
31
  return this.defaultModel;
31
32
  }
32
- async makeRequest(endpoint, body, stream = false) {
33
+ async makeRequest(endpoint, body, stream = false, signal) {
33
34
  const url = `${this.baseURL}/v1/${endpoint}`;
34
35
  const headers = {
35
36
  "Content-Type": "application/json",
@@ -43,6 +44,7 @@ export class AnthropicProvider {
43
44
  method: "POST",
44
45
  headers,
45
46
  body: JSON.stringify(body),
47
+ signal, // Add abort signal for timeout support
46
48
  });
47
49
  if (!response.ok) {
48
50
  const errorText = await response.text();
@@ -52,13 +54,15 @@ export class AnthropicProvider {
52
54
  return response;
53
55
  }
54
56
  async generateText(optionsOrPrompt, schema) {
55
- logger.debug("[AnthropicProvider.generateText] Starting text generation");
57
+ const functionTag = "AnthropicProvider.generateText";
58
+ const provider = "anthropic";
59
+ logger.debug(`[${functionTag}] Starting text generation`);
56
60
  // Parse parameters with backward compatibility
57
61
  const options = typeof optionsOrPrompt === "string"
58
62
  ? { prompt: optionsOrPrompt }
59
63
  : optionsOrPrompt;
60
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = "You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.", } = options;
61
- logger.debug(`[AnthropicProvider.generateText] Prompt: "${prompt.substring(0, 100)}...", Temperature: ${temperature}, Max tokens: ${maxTokens}`);
64
+ const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = "You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.", timeout = getDefaultTimeout(provider, 'generate'), } = options;
65
+ logger.debug(`[${functionTag}] Prompt: "${prompt.substring(0, 100)}...", Temperature: ${temperature}, Max tokens: ${maxTokens}, Timeout: ${timeout}`);
62
66
  const requestBody = {
63
67
  model: this.getModel(),
64
68
  max_tokens: maxTokens,
@@ -71,10 +75,14 @@ export class AnthropicProvider {
71
75
  temperature,
72
76
  system: systemPrompt,
73
77
  };
78
+ // Create timeout controller if timeout is specified
79
+ const timeoutController = createTimeoutController(timeout, provider, 'generate');
74
80
  try {
75
- const response = await this.makeRequest("messages", requestBody);
81
+ const response = await this.makeRequest("messages", requestBody, false, timeoutController?.controller.signal);
76
82
  const data = await response.json();
77
- logger.debug(`[AnthropicProvider.generateText] Success. Generated ${data.usage.output_tokens} tokens`);
83
+ // Clean up timeout if successful
84
+ timeoutController?.cleanup();
85
+ logger.debug(`[${functionTag}] Success. Generated ${data.usage.output_tokens} tokens`);
78
86
  const content = data.content.map((block) => block.text).join("");
79
87
  return {
80
88
  content,
@@ -89,18 +97,42 @@ export class AnthropicProvider {
89
97
  };
90
98
  }
91
99
  catch (error) {
92
- logger.error("[AnthropicProvider.generateText] Error:", error);
100
+ // Always cleanup timeout
101
+ timeoutController?.cleanup();
102
+ // Log timeout errors specifically
103
+ if (error instanceof TimeoutError) {
104
+ logger.error(`[${functionTag}] Timeout error`, {
105
+ provider,
106
+ timeout: error.timeout,
107
+ message: error.message,
108
+ });
109
+ }
110
+ else if (error?.name === 'AbortError') {
111
+ // Convert AbortError to TimeoutError
112
+ const timeoutError = new TimeoutError(`${provider} generate operation timed out after ${timeout}`, timeoutController?.timeoutMs || 0, provider, 'generate');
113
+ logger.error(`[${functionTag}] Timeout error`, {
114
+ provider,
115
+ timeout: timeoutController?.timeoutMs,
116
+ message: timeoutError.message,
117
+ });
118
+ throw timeoutError;
119
+ }
120
+ else {
121
+ logger.error(`[${functionTag}] Error:`, error);
122
+ }
93
123
  throw error;
94
124
  }
95
125
  }
96
126
  async streamText(optionsOrPrompt, schema) {
97
- logger.debug("[AnthropicProvider.streamText] Starting text streaming");
127
+ const functionTag = "AnthropicProvider.streamText";
128
+ const provider = "anthropic";
129
+ logger.debug(`[${functionTag}] Starting text streaming`);
98
130
  // Parse parameters with backward compatibility
99
131
  const options = typeof optionsOrPrompt === "string"
100
132
  ? { prompt: optionsOrPrompt }
101
133
  : optionsOrPrompt;
102
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = "You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.", } = options;
103
- logger.debug(`[AnthropicProvider.streamText] Streaming prompt: "${prompt.substring(0, 100)}..."`);
134
+ const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = "You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.", timeout = getDefaultTimeout(provider, 'stream'), } = options;
135
+ logger.debug(`[${functionTag}] Streaming prompt: "${prompt.substring(0, 100)}...", Timeout: ${timeout}`);
104
136
  const requestBody = {
105
137
  model: this.getModel(),
106
138
  max_tokens: maxTokens,
@@ -114,30 +146,60 @@ export class AnthropicProvider {
114
146
  system: systemPrompt,
115
147
  stream: true,
116
148
  };
149
+ // Create timeout controller if timeout is specified
150
+ const timeoutController = createTimeoutController(timeout, provider, 'stream');
117
151
  try {
118
- const response = await this.makeRequest("messages", requestBody, true);
152
+ const response = await this.makeRequest("messages", requestBody, true, timeoutController?.controller.signal);
119
153
  if (!response.body) {
120
154
  throw new Error("No response body received");
121
155
  }
122
- // Return a StreamTextResult-like object
156
+ // Return a StreamTextResult-like object with timeout signal
123
157
  return {
124
- textStream: this.createAsyncIterable(response.body),
158
+ textStream: this.createAsyncIterable(response.body, timeoutController?.controller.signal),
125
159
  text: "",
126
160
  usage: { promptTokens: 0, completionTokens: 0, totalTokens: 0 },
127
161
  finishReason: "end_turn",
162
+ // Store timeout controller for external cleanup if needed
163
+ _timeoutController: timeoutController,
128
164
  };
129
165
  }
130
166
  catch (error) {
131
- logger.error("[AnthropicProvider.streamText] Error:", error);
167
+ // Cleanup timeout on error
168
+ timeoutController?.cleanup();
169
+ // Log timeout errors specifically
170
+ if (error instanceof TimeoutError) {
171
+ logger.error(`[${functionTag}] Timeout error`, {
172
+ provider,
173
+ timeout: error.timeout,
174
+ message: error.message,
175
+ });
176
+ }
177
+ else if (error?.name === 'AbortError') {
178
+ // Convert AbortError to TimeoutError
179
+ const timeoutError = new TimeoutError(`${provider} stream operation timed out after ${timeout}`, timeoutController?.timeoutMs || 0, provider, 'stream');
180
+ logger.error(`[${functionTag}] Timeout error`, {
181
+ provider,
182
+ timeout: timeoutController?.timeoutMs,
183
+ message: timeoutError.message,
184
+ });
185
+ throw timeoutError;
186
+ }
187
+ else {
188
+ logger.error(`[${functionTag}] Error:`, error);
189
+ }
132
190
  throw error;
133
191
  }
134
192
  }
135
- async *createAsyncIterable(body) {
193
+ async *createAsyncIterable(body, signal) {
136
194
  const reader = body.getReader();
137
195
  const decoder = new TextDecoder();
138
196
  let buffer = "";
139
197
  try {
140
198
  while (true) {
199
+ // Check if aborted
200
+ if (signal?.aborted) {
201
+ throw new Error('AbortError');
202
+ }
141
203
  const { done, value } = await reader.read();
142
204
  if (done) {
143
205
  break;