@juspay/neurolink 1.11.3 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/CHANGELOG.md +19 -0
  2. package/README.md +63 -21
  3. package/dist/cli/commands/config.d.ts +6 -6
  4. package/dist/cli/index.js +89 -39
  5. package/dist/core/types.d.ts +2 -0
  6. package/dist/lib/core/types.d.ts +2 -0
  7. package/dist/lib/neurolink.d.ts +2 -0
  8. package/dist/lib/neurolink.js +23 -2
  9. package/dist/lib/providers/agent-enhanced-provider.d.ts +1 -0
  10. package/dist/lib/providers/agent-enhanced-provider.js +59 -3
  11. package/dist/lib/providers/amazonBedrock.js +70 -24
  12. package/dist/lib/providers/anthropic.js +77 -15
  13. package/dist/lib/providers/azureOpenAI.js +77 -15
  14. package/dist/lib/providers/googleAIStudio.js +70 -26
  15. package/dist/lib/providers/googleVertexAI.js +70 -24
  16. package/dist/lib/providers/huggingFace.js +70 -26
  17. package/dist/lib/providers/mistralAI.js +70 -26
  18. package/dist/lib/providers/ollama.d.ts +1 -1
  19. package/dist/lib/providers/ollama.js +24 -10
  20. package/dist/lib/providers/openAI.js +67 -23
  21. package/dist/lib/providers/timeout-wrapper.d.ts +40 -0
  22. package/dist/lib/providers/timeout-wrapper.js +100 -0
  23. package/dist/lib/utils/timeout.d.ts +69 -0
  24. package/dist/lib/utils/timeout.js +130 -0
  25. package/dist/neurolink.d.ts +2 -0
  26. package/dist/neurolink.js +23 -2
  27. package/dist/providers/agent-enhanced-provider.d.ts +1 -0
  28. package/dist/providers/agent-enhanced-provider.js +59 -3
  29. package/dist/providers/amazonBedrock.js +70 -24
  30. package/dist/providers/anthropic.js +77 -15
  31. package/dist/providers/azureOpenAI.js +77 -15
  32. package/dist/providers/googleAIStudio.js +70 -26
  33. package/dist/providers/googleVertexAI.js +70 -24
  34. package/dist/providers/huggingFace.js +70 -26
  35. package/dist/providers/mistralAI.js +70 -26
  36. package/dist/providers/ollama.d.ts +1 -1
  37. package/dist/providers/ollama.js +24 -10
  38. package/dist/providers/openAI.js +67 -23
  39. package/dist/providers/timeout-wrapper.d.ts +40 -0
  40. package/dist/providers/timeout-wrapper.js +100 -0
  41. package/dist/utils/timeout.d.ts +69 -0
  42. package/dist/utils/timeout.js +130 -0
  43. package/package.json +1 -1
package/CHANGELOG.md CHANGED
@@ -1,3 +1,22 @@
1
+ # [2.1.0](https://github.com/juspay/neurolink/compare/v2.0.0...v2.1.0) (2025-06-29)
2
+
3
+
4
+ ### Features
5
+
6
+ * **timeout:** add comprehensive timeout support for all AI providers ([8610f4a](https://github.com/juspay/neurolink/commit/8610f4ade418345b0395ab72af6e675f6eec6f93))
7
+
8
+ # [2.0.0](https://github.com/juspay/neurolink/compare/v1.11.3...v2.0.0) (2025-06-28)
9
+
10
+
11
+ ### Features
12
+
13
+ * **cli:** add command variations and stream agent support ([5fc4c26](https://github.com/juspay/neurolink/commit/5fc4c26b23bd189be52272521bdd2ca40dd55837))
14
+
15
+
16
+ ### BREAKING CHANGES
17
+
18
+ * **cli:** 'generate-text' command is deprecated and will be removed in v2.0
19
+
1
20
  ## [1.11.3](https://github.com/juspay/neurolink/compare/v1.11.2...v1.11.3) (2025-06-22)
2
21
 
3
22
 
package/README.md CHANGED
@@ -18,13 +18,19 @@
18
18
  ✅ **Function Calling Ready**: AI can now execute real filesystem operations, data analysis, and system commands
19
19
  ✅ **Production Validated**: 23,230+ token MCP context loading confirmed via comprehensive CLI testing
20
20
  ✅ **Zero Build Errors**: Clean TypeScript compilation after resolving all 13 blocking errors
21
- ✅ **CLI Tool Integration**: Both `generate-text` and `agent-generate` commands use full MCP capabilities
21
+ ✅ **CLI Tool Integration**: Both `generate`/`gen` and `agent-generate` commands use full MCP capabilities
22
22
  ✅ **Backward Compatible**: Tools enabled by default with opt-out flag for traditional usage
23
23
 
24
24
  ```bash
25
- # NEW: AI can now access your filesystem and execute tools
25
+ # NEW: AI can now access your filesystem and execute tools (use preferred commands)
26
+ npx @juspay/neurolink generate "List files in this directory" --provider google-ai
27
+
28
+ # Alternative shorter command
29
+ npx @juspay/neurolink gen "List files in this directory" --provider google-ai
30
+
31
+ # ⚠️ DEPRECATED: generate-text will be removed in v2.0 (use 'generate' or 'gen' instead)
32
+ # This command shows a deprecation warning and is kept for backward compatibility only
26
33
  npx @juspay/neurolink generate-text "List files in this directory" --provider google-ai
27
- # Result: AI uses listDirectory tool and returns actual file listing
28
34
  ```
29
35
 
30
36
  ## 🚀 Quick Start
@@ -36,7 +42,8 @@ npx @juspay/neurolink generate-text "List files in this directory" --provider go
36
42
  export GOOGLE_AI_API_KEY="AIza-your-google-ai-api-key"
37
43
 
38
44
  # CLI - No installation required
39
- npx @juspay/neurolink generate-text "Hello, AI"
45
+ npx @juspay/neurolink generate "Hello, AI"
46
+ npx @juspay/neurolink gen "Hello, AI" # Shortest form
40
47
  npx @juspay/neurolink status
41
48
  ```
42
49
 
@@ -54,6 +61,7 @@ import { createBestAIProvider } from "@juspay/neurolink";
54
61
  const provider = createBestAIProvider();
55
62
  const result = await provider.generateText({
56
63
  prompt: "Write a haiku about programming",
64
+ timeout: '30s' // Optional: Set custom timeout (default: 30s)
57
65
  });
58
66
 
59
67
  console.log(result.text);
@@ -106,12 +114,16 @@ npx @juspay/neurolink status
106
114
 
107
115
  ```bash
108
116
  # Test built-in tools (works immediately)
109
- npx @juspay/neurolink generate-text "What time is it?" --debug
110
- # Returns: "The current time is Friday, December 13, 2024 at 10:30:45 AM PST"
117
+ npx @juspay/neurolink generate "What time is it?" --debug
118
+
119
+ # Alternative short form
120
+ npx @juspay/neurolink gen "What time is it?" --debug
111
121
 
112
122
  # Test tool discovery
113
- npx @juspay/neurolink generate-text "What tools do you have access to?" --debug
114
- # AI will list 5+ built-in tools and 58+ discovered external servers
123
+ npx @juspay/neurolink generate "What tools do you have access to?" --debug
124
+
125
+ # Alternative short form
126
+ npx @juspay/neurolink gen "What tools do you have access to?" --debug
115
127
 
116
128
  # Test external server discovery
117
129
  npx @juspay/neurolink mcp discover --format table
@@ -134,13 +146,13 @@ NeuroLink now features a revolutionary dynamic model configuration system that e
134
146
 
135
147
  ```bash
136
148
  # Cost optimization - automatically use cheapest model
137
- npx @juspay/neurolink generate-text "Hello" --optimize-cost
149
+ npx @juspay/neurolink generate "Hello" --optimize-cost
138
150
 
139
151
  # Capability search - find models with specific features
140
- npx @juspay/neurolink generate-text "Describe this image" --capability vision
152
+ npx @juspay/neurolink generate "Describe this image" --capability vision
141
153
 
142
154
  # Model aliases - use friendly names
143
- npx @juspay/neurolink generate-text "Write code" --model best-coding
155
+ npx @juspay/neurolink gen "Write code" --model best-coding
144
156
 
145
157
  # Test dynamic model server
146
158
  npm run model-server # Starts config server on localhost:3001
@@ -163,14 +175,25 @@ npm run test:dynamic-models # Comprehensive test suite
163
175
 
164
176
  ```bash
165
177
  # Text generation with automatic MCP tool detection (default)
166
- npx @juspay/neurolink generate-text "What time is it?"
167
- # AI automatically uses time tool for real-time data
178
+ npx @juspay/neurolink generate "What time is it?"
179
+
180
+ # Alternative short form
181
+ npx @juspay/neurolink gen "What time is it?"
168
182
 
169
183
  # Disable tools for training-data-only responses
170
- npx @juspay/neurolink generate-text "What time is it?" --disable-tools
184
+ npx @juspay/neurolink generate "What time is it?" --disable-tools
171
185
 
172
- # Real-time streaming
173
- npx @juspay/neurolink stream "Tell me a story about robots"
186
+ # With custom timeout for complex prompts
187
+ npx @juspay/neurolink generate "Explain quantum computing in detail" --timeout 1m
188
+
189
+ # Real-time streaming with agent support (default)
190
+ npx @juspay/neurolink stream "What time is it?"
191
+
192
+ # Streaming without tools (traditional mode)
193
+ npx @juspay/neurolink stream "Tell me a story" --disable-tools
194
+
195
+ # Streaming with extended timeout
196
+ npx @juspay/neurolink stream "Write a long story" --timeout 5m
174
197
 
175
198
  # Provider diagnostics
176
199
  npx @juspay/neurolink status --verbose
@@ -178,24 +201,43 @@ npx @juspay/neurolink status --verbose
178
201
  # Batch processing
179
202
  echo -e "Write a haiku\nExplain gravity" > prompts.txt
180
203
  npx @juspay/neurolink batch prompts.txt --output results.json
204
+
205
+ # Batch with custom timeout per request
206
+ npx @juspay/neurolink batch prompts.txt --timeout 45s --output results.json
181
207
  ```
182
208
 
183
209
  ### SDK Integration
184
210
 
185
211
  ```typescript
186
- // SvelteKit API route
212
+ // SvelteKit API route with timeout handling
187
213
  export const POST: RequestHandler = async ({ request }) => {
188
214
  const { message } = await request.json();
189
215
  const provider = createBestAIProvider();
190
- const result = await provider.streamText({ prompt: message });
191
- return new Response(result.toReadableStream());
216
+
217
+ try {
218
+ const result = await provider.streamText({
219
+ prompt: message,
220
+ timeout: '2m' // 2 minutes for streaming
221
+ });
222
+ return new Response(result.toReadableStream());
223
+ } catch (error) {
224
+ if (error.name === 'TimeoutError') {
225
+ return new Response('Request timed out', { status: 408 });
226
+ }
227
+ throw error;
228
+ }
192
229
  };
193
230
 
194
- // Next.js API route
231
+ // Next.js API route with timeout
195
232
  export async function POST(request: NextRequest) {
196
233
  const { prompt } = await request.json();
197
234
  const provider = createBestAIProvider();
198
- const result = await provider.generateText({ prompt });
235
+
236
+ const result = await provider.generateText({
237
+ prompt,
238
+ timeout: process.env.AI_TIMEOUT || '30s' // Configurable timeout
239
+ });
240
+
199
241
  return NextResponse.json({ text: result.text });
200
242
  }
201
243
  ```
@@ -117,12 +117,12 @@ declare const ConfigSchema: z.ZodObject<{
117
117
  model: z.ZodDefault<z.ZodString>;
118
118
  timeout: z.ZodDefault<z.ZodNumber>;
119
119
  }, "strip", z.ZodTypeAny, {
120
- model: string;
121
120
  timeout: number;
121
+ model: string;
122
122
  baseUrl: string;
123
123
  }, {
124
- model?: string | undefined;
125
124
  timeout?: number | undefined;
125
+ model?: string | undefined;
126
126
  baseUrl?: string | undefined;
127
127
  }>>;
128
128
  mistral: z.ZodOptional<z.ZodObject<{
@@ -176,8 +176,8 @@ declare const ConfigSchema: z.ZodObject<{
176
176
  apiKey?: string | undefined;
177
177
  } | undefined;
178
178
  ollama?: {
179
- model: string;
180
179
  timeout: number;
180
+ model: string;
181
181
  baseUrl: string;
182
182
  } | undefined;
183
183
  mistral?: {
@@ -225,8 +225,8 @@ declare const ConfigSchema: z.ZodObject<{
225
225
  apiKey?: string | undefined;
226
226
  } | undefined;
227
227
  ollama?: {
228
- model?: string | undefined;
229
228
  timeout?: number | undefined;
229
+ model?: string | undefined;
230
230
  baseUrl?: string | undefined;
231
231
  } | undefined;
232
232
  mistral?: {
@@ -299,8 +299,8 @@ declare const ConfigSchema: z.ZodObject<{
299
299
  apiKey?: string | undefined;
300
300
  } | undefined;
301
301
  ollama?: {
302
- model: string;
303
302
  timeout: number;
303
+ model: string;
304
304
  baseUrl: string;
305
305
  } | undefined;
306
306
  mistral?: {
@@ -360,8 +360,8 @@ declare const ConfigSchema: z.ZodObject<{
360
360
  apiKey?: string | undefined;
361
361
  } | undefined;
362
362
  ollama?: {
363
- model?: string | undefined;
364
363
  timeout?: number | undefined;
364
+ model?: string | undefined;
365
365
  baseUrl?: string | undefined;
366
366
  } | undefined;
367
367
  mistral?: {
package/dist/cli/index.js CHANGED
@@ -204,7 +204,7 @@ const cli = yargs(args)
204
204
  exitProcess(); // Default exit
205
205
  })
206
206
  // Generate Text Command
207
- .command(["generate-text <prompt>", "generate <prompt>"], "Generate text using AI providers", (yargsInstance) => yargsInstance
207
+ .command(["generate-text <prompt>", "generate <prompt>", "gen <prompt>"], "Generate text using AI providers", (yargsInstance) => yargsInstance
208
208
  .usage("Usage: $0 generate-text <prompt> [options]")
209
209
  .positional("prompt", {
210
210
  type: "string",
@@ -253,9 +253,9 @@ const cli = yargs(args)
253
253
  description: "Enable debug mode with verbose output",
254
254
  }) // Kept for potential specific debug logic
255
255
  .option("timeout", {
256
- type: "number",
257
- default: 30000,
258
- description: "Timeout for the request in milliseconds",
256
+ type: "string",
257
+ default: "30s",
258
+ description: "Timeout for the request (e.g., 30s, 2m, 1h, 5000)",
259
259
  })
260
260
  .option("disable-tools", {
261
261
  type: "boolean",
@@ -266,6 +266,11 @@ const cli = yargs(args)
266
266
  .example('$0 generate-text "Write a story" --provider openai', "Use specific provider")
267
267
  .example('$0 generate-text "What time is it?"', "Use with natural tool integration (default)")
268
268
  .example('$0 generate-text "Hello world" --disable-tools', "Use without tool integration"), async (argv) => {
269
+ // Check if generate-text was used specifically (for deprecation warning)
270
+ const usedCommand = argv._[0];
271
+ if (usedCommand === 'generate-text' && !argv.quiet) {
272
+ console.warn(chalk.yellow('⚠️ Warning: "generate-text" is deprecated. Use "generate" or "gen" instead for multimodal support.'));
273
+ }
269
274
  let originalConsole = {};
270
275
  if (argv.format === "json" && !argv.quiet) {
271
276
  // Suppress only if not quiet, as quiet implies no spinners anyway
@@ -280,9 +285,8 @@ const cli = yargs(args)
280
285
  ? null
281
286
  : ora("🤖 Generating text...").start();
282
287
  try {
283
- const timeoutPromise = new Promise((_, reject) => {
284
- setTimeout(() => reject(new Error(`Request timeout (${argv.timeout}ms)`)), argv.timeout);
285
- });
288
+ // The SDK will handle the timeout internally, so we don't need this wrapper anymore
289
+ // Just pass the timeout to the SDK
286
290
  // Use AgentEnhancedProvider when tools are enabled, otherwise use standard SDK
287
291
  let generatePromise;
288
292
  if (argv.disableTools === true) {
@@ -295,6 +299,7 @@ const cli = yargs(args)
295
299
  temperature: argv.temperature,
296
300
  maxTokens: argv.maxTokens,
297
301
  systemPrompt: argv.system,
302
+ timeout: argv.timeout,
298
303
  });
299
304
  }
300
305
  else {
@@ -318,10 +323,7 @@ const cli = yargs(args)
318
323
  });
319
324
  generatePromise = agentProvider.generateText(argv.prompt);
320
325
  }
321
- const result = (await Promise.race([
322
- generatePromise,
323
- timeoutPromise,
324
- ]));
326
+ const result = await generatePromise;
325
327
  if (argv.format === "json" && originalConsole.log) {
326
328
  Object.assign(console, originalConsole);
327
329
  }
@@ -329,20 +331,20 @@ const cli = yargs(args)
329
331
  spinner.succeed(chalk.green("✅ Text generated successfully!"));
330
332
  }
331
333
  // Handle both AgentEnhancedProvider (AI SDK) and standard NeuroLink SDK responses
332
- const responseText = result.text || result.content || "";
333
- const responseUsage = result.usage || {
334
+ const responseText = result ? (result.text || result.content || "") : "";
335
+ const responseUsage = result ? (result.usage || {
334
336
  promptTokens: 0,
335
337
  completionTokens: 0,
336
338
  totalTokens: 0,
337
- };
339
+ }) : { promptTokens: 0, completionTokens: 0, totalTokens: 0 };
338
340
  if (argv.format === "json") {
339
341
  const jsonOutput = {
340
342
  content: responseText,
341
- provider: result.provider || argv.provider,
343
+ provider: result ? (result.provider || argv.provider) : argv.provider,
342
344
  usage: responseUsage,
343
- responseTime: result.responseTime || 0,
344
- toolCalls: result.toolCalls || [],
345
- toolResults: result.toolResults || [],
345
+ responseTime: result ? (result.responseTime || 0) : 0,
346
+ toolCalls: result ? (result.toolCalls || []) : [],
347
+ toolResults: result ? (result.toolResults || []) : [],
346
348
  };
347
349
  process.stdout.write(JSON.stringify(jsonOutput, null, 2) + "\n");
348
350
  }
@@ -352,7 +354,7 @@ const cli = yargs(args)
352
354
  console.log("\n" + responseText + "\n");
353
355
  }
354
356
  // Show tool calls if any
355
- if (result.toolCalls && result.toolCalls.length > 0) {
357
+ if (result && result.toolCalls && result.toolCalls.length > 0) {
356
358
  console.log(chalk.blue("🔧 Tools Called:"));
357
359
  for (const toolCall of result.toolCalls) {
358
360
  console.log(`- ${toolCall.toolName}`);
@@ -361,7 +363,7 @@ const cli = yargs(args)
361
363
  console.log();
362
364
  }
363
365
  // Show tool results if any
364
- if (result.toolResults && result.toolResults.length > 0) {
366
+ if (result && result.toolResults && result.toolResults.length > 0) {
365
367
  console.log(chalk.blue("📋 Tool Results:"));
366
368
  for (const toolResult of result.toolResults) {
367
369
  console.log(`- ${toolResult.toolCallId}`);
@@ -370,9 +372,9 @@ const cli = yargs(args)
370
372
  console.log();
371
373
  }
372
374
  console.log(JSON.stringify({
373
- provider: result.provider || argv.provider,
375
+ provider: result ? (result.provider || argv.provider) : argv.provider,
374
376
  usage: responseUsage,
375
- responseTime: result.responseTime || 0,
377
+ responseTime: result ? (result.responseTime || 0) : 0,
376
378
  }, null, 2));
377
379
  if (responseUsage.totalTokens) {
378
380
  console.log(chalk.blue(`ℹ️ ${responseUsage.totalTokens} tokens used`));
@@ -431,13 +433,25 @@ const cli = yargs(args)
431
433
  type: "number",
432
434
  default: 0.7,
433
435
  description: "Creativity level",
436
+ })
437
+ .option("timeout", {
438
+ type: "string",
439
+ default: "2m",
440
+ description: "Timeout for streaming (e.g., 30s, 2m, 1h)",
434
441
  })
435
442
  .option("debug", {
436
443
  type: "boolean",
437
444
  default: false,
438
445
  description: "Enable debug mode with interleaved logging",
439
446
  })
440
- .example('$0 stream "Tell me a story"', "Stream a story in real-time"), async (argv) => {
447
+ .option("disable-tools", {
448
+ type: "boolean",
449
+ default: false,
450
+ description: "Disable MCP tool integration (tools enabled by default)",
451
+ })
452
+ .example('$0 stream "Tell me a story"', "Stream a story in real-time")
453
+ .example('$0 stream "What time is it?"', "Stream with natural tool integration (default)")
454
+ .example('$0 stream "Tell me a story" --disable-tools', "Stream without tool integration"), async (argv) => {
441
455
  // Default mode: Simple streaming message
442
456
  // Debug mode: More detailed information
443
457
  if (!argv.quiet && !argv.debug) {
@@ -447,13 +461,53 @@ const cli = yargs(args)
447
461
  console.log(chalk.blue(`🔄 Streaming from ${argv.provider} provider with debug logging...\n`));
448
462
  }
449
463
  try {
450
- const stream = await sdk.generateTextStream({
451
- prompt: argv.prompt,
452
- provider: argv.provider === "auto"
453
- ? undefined
454
- : argv.provider,
455
- temperature: argv.temperature,
456
- });
464
+ let stream;
465
+ if (argv.disableTools === true) {
466
+ // Tools disabled - use standard SDK
467
+ stream = await sdk.generateTextStream({
468
+ prompt: argv.prompt,
469
+ provider: argv.provider === "auto"
470
+ ? undefined
471
+ : argv.provider,
472
+ temperature: argv.temperature,
473
+ timeout: argv.timeout,
474
+ });
475
+ }
476
+ else {
477
+ // Tools enabled - use AgentEnhancedProvider for streaming tool calls
478
+ // Map provider to supported AgentEnhancedProvider types
479
+ const supportedProvider = (() => {
480
+ switch (argv.provider) {
481
+ case "openai":
482
+ case "anthropic":
483
+ case "google-ai":
484
+ return argv.provider;
485
+ case "auto":
486
+ default:
487
+ return "google-ai"; // Default to google-ai for best tool support
488
+ }
489
+ })();
490
+ const agentProvider = new AgentEnhancedProvider({
491
+ provider: supportedProvider,
492
+ model: undefined, // Use default model for provider
493
+ toolCategory: "all", // Enable all tool categories
494
+ });
495
+ // Note: AgentEnhancedProvider doesn't support streaming with tools yet
496
+ // Fall back to generateText for now
497
+ const result = await agentProvider.generateText(argv.prompt);
498
+ // Simulate streaming by outputting the result
499
+ const text = result?.text || "";
500
+ const CHUNK_SIZE = 10;
501
+ const DELAY_MS = 50;
502
+ for (let i = 0; i < text.length; i += CHUNK_SIZE) {
503
+ process.stdout.write(text.slice(i, i + CHUNK_SIZE));
504
+ await new Promise(resolve => setTimeout(resolve, DELAY_MS)); // Small delay
505
+ }
506
+ if (!argv.quiet) {
507
+ process.stdout.write("\n");
508
+ }
509
+ return; // Exit early for agent mode
510
+ }
457
511
  for await (const chunk of stream) {
458
512
  process.stdout.write(chunk.content);
459
513
  // In debug mode, interleaved logging would appear here
@@ -501,9 +555,9 @@ const cli = yargs(args)
501
555
  description: "AI provider to use",
502
556
  })
503
557
  .option("timeout", {
504
- type: "number",
505
- default: 30000,
506
- description: "Timeout for each request in milliseconds",
558
+ type: "string",
559
+ default: "30s",
560
+ description: "Timeout for each request (e.g., 30s, 2m, 1h)",
507
561
  })
508
562
  .option("temperature", {
509
563
  type: "number",
@@ -556,8 +610,7 @@ const cli = yargs(args)
556
610
  spinner.text = `Processing ${i + 1}/${prompts.length}: ${prompts[i].substring(0, 30)}...`;
557
611
  }
558
612
  try {
559
- const timeoutPromise = new Promise((_, reject) => setTimeout(() => reject(new Error("Request timeout")), argv.timeout));
560
- const generatePromise = sdk.generateText({
613
+ const result = await sdk.generateText({
561
614
  prompt: prompts[i],
562
615
  provider: argv.provider === "auto"
563
616
  ? undefined
@@ -565,11 +618,8 @@ const cli = yargs(args)
565
618
  temperature: argv.temperature,
566
619
  maxTokens: argv.maxTokens,
567
620
  systemPrompt: argv.system,
621
+ timeout: argv.timeout,
568
622
  });
569
- const result = (await Promise.race([
570
- generatePromise,
571
- timeoutPromise,
572
- ]));
573
623
  results.push({ prompt: prompts[i], response: result.content });
574
624
  if (spinner) {
575
625
  spinner.render();
@@ -80,6 +80,7 @@ export interface TextGenerationOptions {
80
80
  systemPrompt?: string;
81
81
  schema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>;
82
82
  tools?: Record<string, Tool>;
83
+ timeout?: number | string;
83
84
  }
84
85
  /**
85
86
  * Stream text options interface
@@ -92,6 +93,7 @@ export interface StreamTextOptions {
92
93
  systemPrompt?: string;
93
94
  schema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>;
94
95
  tools?: Record<string, Tool>;
96
+ timeout?: number | string;
95
97
  }
96
98
  /**
97
99
  * AI Provider interface with flexible parameter support
@@ -80,6 +80,7 @@ export interface TextGenerationOptions {
80
80
  systemPrompt?: string;
81
81
  schema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>;
82
82
  tools?: Record<string, Tool>;
83
+ timeout?: number | string;
83
84
  }
84
85
  /**
85
86
  * Stream text options interface
@@ -92,6 +93,7 @@ export interface StreamTextOptions {
92
93
  systemPrompt?: string;
93
94
  schema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>;
94
95
  tools?: Record<string, Tool>;
96
+ timeout?: number | string;
95
97
  }
96
98
  /**
97
99
  * AI Provider interface with flexible parameter support
@@ -12,6 +12,7 @@ export interface TextGenerationOptions {
12
12
  maxTokens?: number;
13
13
  systemPrompt?: string;
14
14
  schema?: any;
15
+ timeout?: number | string;
15
16
  disableTools?: boolean;
16
17
  }
17
18
  export interface StreamTextOptions {
@@ -20,6 +21,7 @@ export interface StreamTextOptions {
20
21
  temperature?: number;
21
22
  maxTokens?: number;
22
23
  systemPrompt?: string;
24
+ timeout?: number | string;
23
25
  }
24
26
  export interface TextGenerationResult {
25
27
  content: string;
@@ -11,6 +11,7 @@ import { toolRegistry } from "./mcp/tool-registry.js";
11
11
  import { unifiedRegistry } from "./mcp/unified-registry.js";
12
12
  import { logger } from "./utils/logger.js";
13
13
  import { getBestProvider } from "./utils/providerUtils-fixed.js";
14
+ import { TimeoutError } from "./utils/timeout.js";
14
15
  export class NeuroLink {
15
16
  mcpInitialized = false;
16
17
  contextManager;
@@ -32,10 +33,9 @@ export class NeuroLink {
32
33
  const mcpInitPromise = Promise.race([
33
34
  this.doIsolatedMCPInitialization(),
34
35
  new Promise((_, reject) => {
35
- const timer = setTimeout(() => {
36
+ setTimeout(() => {
36
37
  reject(new Error("MCP initialization timeout after 3s"));
37
38
  }, initTimeout);
38
- timer.unref(); // Don't keep process alive
39
39
  }),
40
40
  ]);
41
41
  await mcpInitPromise;
@@ -134,6 +134,7 @@ export class NeuroLink {
134
134
  temperature: options.temperature,
135
135
  maxTokens: options.maxTokens,
136
136
  systemPrompt: enhancedSystemPrompt,
137
+ timeout: options.timeout,
137
138
  }, options.schema);
138
139
  if (!result) {
139
140
  throw new Error("No response received from AI provider");
@@ -222,6 +223,7 @@ export class NeuroLink {
222
223
  temperature: options.temperature,
223
224
  maxTokens: options.maxTokens,
224
225
  systemPrompt: options.systemPrompt,
226
+ timeout: options.timeout,
225
227
  }, options.schema);
226
228
  if (!result) {
227
229
  throw new Error("No response received from AI provider");
@@ -252,9 +254,18 @@ export class NeuroLink {
252
254
  catch (error) {
253
255
  const errorMessage = error instanceof Error ? error.message : String(error);
254
256
  lastError = error instanceof Error ? error : new Error(errorMessage);
257
+ // Special handling for timeout errors
258
+ if (error instanceof TimeoutError) {
259
+ logger.warn(`[${functionTag}] Provider timed out`, {
260
+ provider: providerName,
261
+ timeout: error.timeout,
262
+ operation: error.operation,
263
+ });
264
+ }
255
265
  logger.debug(`[${functionTag}] Provider failed, trying next`, {
256
266
  provider: providerName,
257
267
  error: errorMessage,
268
+ isTimeout: error instanceof TimeoutError,
258
269
  remainingProviders: tryProviders.slice(tryProviders.indexOf(providerName) + 1),
259
270
  });
260
271
  // Continue to next provider
@@ -338,6 +349,7 @@ Note: Tool integration is currently in development. Please provide helpful respo
338
349
  temperature: options.temperature,
339
350
  maxTokens: options.maxTokens,
340
351
  systemPrompt: options.systemPrompt,
352
+ timeout: options.timeout,
341
353
  });
342
354
  if (!result) {
343
355
  throw new Error("No stream response received from AI provider");
@@ -358,9 +370,18 @@ Note: Tool integration is currently in development. Please provide helpful respo
358
370
  catch (error) {
359
371
  const errorMessage = error instanceof Error ? error.message : String(error);
360
372
  lastError = error instanceof Error ? error : new Error(errorMessage);
373
+ // Special handling for timeout errors
374
+ if (error instanceof TimeoutError) {
375
+ logger.warn(`[${functionTag}] Provider timed out`, {
376
+ provider: providerName,
377
+ timeout: error.timeout,
378
+ operation: error.operation,
379
+ });
380
+ }
361
381
  logger.debug(`[${functionTag}] Provider failed, trying next`, {
362
382
  provider: providerName,
363
383
  error: errorMessage,
384
+ isTimeout: error instanceof TimeoutError,
364
385
  remainingProviders: tryProviders.slice(tryProviders.indexOf(providerName) + 1),
365
386
  });
366
387
  // Continue to next provider
@@ -15,6 +15,7 @@ interface AgentConfig {
15
15
  enableTools?: boolean;
16
16
  enableMCP?: boolean;
17
17
  mcpInitTimeoutMs?: number;
18
+ toolExecutionTimeout?: number | string;
18
19
  mcpDiscoveryOptions?: {
19
20
  searchPaths?: string[];
20
21
  configFiles?: string[];