@juspay/neurolink 7.0.0 → 7.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. package/CHANGELOG.md +15 -4
  2. package/README.md +16 -11
  3. package/dist/cli/commands/config.d.ts +2 -2
  4. package/dist/cli/commands/config.js +22 -21
  5. package/dist/cli/commands/mcp.d.ts +79 -0
  6. package/dist/cli/commands/mcp.js +916 -0
  7. package/dist/cli/commands/models.d.ts +63 -0
  8. package/dist/cli/commands/models.js +653 -0
  9. package/dist/cli/commands/ollama.js +56 -55
  10. package/dist/cli/factories/commandFactory.d.ts +67 -2
  11. package/dist/cli/factories/commandFactory.js +840 -92
  12. package/dist/cli/index.d.ts +6 -0
  13. package/dist/cli/index.js +42 -999
  14. package/dist/cli/utils/completeSetup.js +9 -8
  15. package/dist/cli/utils/envManager.js +7 -6
  16. package/dist/cli/utils/interactiveSetup.js +20 -19
  17. package/dist/core/analytics.js +25 -38
  18. package/dist/core/baseProvider.d.ts +8 -0
  19. package/dist/core/baseProvider.js +177 -68
  20. package/dist/core/constants.d.ts +11 -0
  21. package/dist/core/constants.js +17 -0
  22. package/dist/core/evaluation.js +25 -14
  23. package/dist/core/factory.js +21 -18
  24. package/dist/core/streamAnalytics.d.ts +65 -0
  25. package/dist/core/streamAnalytics.js +125 -0
  26. package/dist/factories/providerRegistry.js +3 -1
  27. package/dist/lib/core/analytics.js +25 -38
  28. package/dist/lib/core/baseProvider.d.ts +8 -0
  29. package/dist/lib/core/baseProvider.js +177 -68
  30. package/dist/lib/core/constants.d.ts +11 -0
  31. package/dist/lib/core/constants.js +17 -0
  32. package/dist/lib/core/evaluation.js +25 -14
  33. package/dist/lib/core/factory.js +22 -18
  34. package/dist/lib/core/streamAnalytics.d.ts +65 -0
  35. package/dist/lib/core/streamAnalytics.js +125 -0
  36. package/dist/lib/factories/providerRegistry.js +3 -1
  37. package/dist/lib/mcp/toolRegistry.d.ts +5 -0
  38. package/dist/lib/mcp/toolRegistry.js +60 -0
  39. package/dist/lib/models/modelRegistry.d.ts +132 -0
  40. package/dist/lib/models/modelRegistry.js +483 -0
  41. package/dist/lib/models/modelResolver.d.ts +115 -0
  42. package/dist/lib/models/modelResolver.js +467 -0
  43. package/dist/lib/neurolink.d.ts +4 -1
  44. package/dist/lib/neurolink.js +108 -69
  45. package/dist/lib/providers/anthropic.js +3 -0
  46. package/dist/lib/providers/googleAiStudio.js +13 -0
  47. package/dist/lib/providers/huggingFace.js +15 -3
  48. package/dist/lib/providers/mistral.js +19 -7
  49. package/dist/lib/providers/ollama.js +31 -7
  50. package/dist/lib/providers/openAI.js +12 -0
  51. package/dist/lib/sdk/toolRegistration.js +17 -0
  52. package/dist/lib/types/cli.d.ts +56 -1
  53. package/dist/lib/types/contextTypes.d.ts +110 -0
  54. package/dist/lib/types/contextTypes.js +176 -0
  55. package/dist/lib/types/index.d.ts +4 -1
  56. package/dist/lib/types/mcpTypes.d.ts +118 -7
  57. package/dist/lib/types/providers.d.ts +81 -0
  58. package/dist/lib/types/streamTypes.d.ts +44 -7
  59. package/dist/lib/types/tools.d.ts +9 -0
  60. package/dist/lib/types/universalProviderOptions.d.ts +3 -1
  61. package/dist/lib/types/universalProviderOptions.js +2 -1
  62. package/dist/lib/utils/logger.d.ts +7 -0
  63. package/dist/lib/utils/logger.js +16 -6
  64. package/dist/lib/utils/performance.d.ts +105 -0
  65. package/dist/lib/utils/performance.js +210 -0
  66. package/dist/lib/utils/providerUtils.js +9 -2
  67. package/dist/lib/utils/retryHandler.d.ts +89 -0
  68. package/dist/lib/utils/retryHandler.js +269 -0
  69. package/dist/mcp/toolRegistry.d.ts +5 -0
  70. package/dist/mcp/toolRegistry.js +60 -0
  71. package/dist/models/modelRegistry.d.ts +132 -0
  72. package/dist/models/modelRegistry.js +483 -0
  73. package/dist/models/modelResolver.d.ts +115 -0
  74. package/dist/models/modelResolver.js +468 -0
  75. package/dist/neurolink.d.ts +4 -1
  76. package/dist/neurolink.js +108 -69
  77. package/dist/providers/anthropic.js +3 -0
  78. package/dist/providers/googleAiStudio.js +13 -0
  79. package/dist/providers/huggingFace.js +15 -3
  80. package/dist/providers/mistral.js +19 -7
  81. package/dist/providers/ollama.js +31 -7
  82. package/dist/providers/openAI.js +12 -0
  83. package/dist/sdk/toolRegistration.js +17 -0
  84. package/dist/types/cli.d.ts +56 -1
  85. package/dist/types/contextTypes.d.ts +110 -0
  86. package/dist/types/contextTypes.js +177 -0
  87. package/dist/types/index.d.ts +4 -1
  88. package/dist/types/mcpTypes.d.ts +118 -7
  89. package/dist/types/providers.d.ts +81 -0
  90. package/dist/types/streamTypes.d.ts +44 -7
  91. package/dist/types/tools.d.ts +9 -0
  92. package/dist/types/universalProviderOptions.d.ts +3 -1
  93. package/dist/types/universalProviderOptions.js +3 -1
  94. package/dist/utils/logger.d.ts +7 -0
  95. package/dist/utils/logger.js +16 -6
  96. package/dist/utils/performance.d.ts +105 -0
  97. package/dist/utils/performance.js +210 -0
  98. package/dist/utils/providerUtils.js +9 -2
  99. package/dist/utils/retryHandler.d.ts +89 -0
  100. package/dist/utils/retryHandler.js +269 -0
  101. package/package.json +2 -1
@@ -7,6 +7,7 @@
7
7
  import { runInteractiveSetup, testProviderConnectivity, displaySetupSummary, } from "./interactiveSetup.js";
8
8
  import { updateEnvFile, displayEnvUpdateSummary } from "./envManager.js";
9
9
  import chalk from "chalk";
10
+ import { logger } from "../../lib/utils/logger.js";
10
11
  /**
11
12
  * Run the complete interactive setup process
12
13
  */
@@ -14,20 +15,20 @@ export async function runCompleteSetup(quiet = false) {
14
15
  try {
15
16
  // Step 1: Run interactive setup wizard
16
17
  if (!quiet) {
17
- console.log(chalk.blue("🚀 Starting NeuroLink Configuration Setup...\n"));
18
+ logger.always(chalk.blue("🚀 Starting NeuroLink Configuration Setup...\n"));
18
19
  }
19
20
  const setupResult = await runInteractiveSetup(quiet);
20
21
  // If no providers selected, exit early
21
22
  if (setupResult.selectedProviders.length === 0) {
22
23
  if (!quiet) {
23
- console.log(chalk.yellow("⚠️ No providers selected. Setup cancelled."));
24
+ logger.always(chalk.yellow("⚠️ No providers selected. Setup cancelled."));
24
25
  }
25
26
  return setupResult;
26
27
  }
27
28
  // Step 2: Update environment file with credentials
28
29
  if (Object.keys(setupResult.credentials).length > 0) {
29
30
  if (!quiet) {
30
- console.log(chalk.blue("\n💾 Updating environment configuration...\n"));
31
+ logger.always(chalk.blue("\n💾 Updating environment configuration...\n"));
31
32
  }
32
33
  try {
33
34
  const envResult = updateEnvFile(setupResult.credentials, ".env", true);
@@ -42,14 +43,14 @@ export async function runCompleteSetup(quiet = false) {
42
43
  }
43
44
  catch (error) {
44
45
  if (!quiet) {
45
- console.error(chalk.red(`❌ Failed to update environment file: ${error instanceof Error ? error.message : String(error)}`));
46
+ logger.error(chalk.red(`❌ Failed to update environment file: ${error instanceof Error ? error.message : String(error)}`));
46
47
  }
47
48
  throw error;
48
49
  }
49
50
  }
50
51
  // Step 3: Test provider connectivity
51
52
  if (!quiet) {
52
- console.log(chalk.blue("\n🧪 Testing configured providers...\n"));
53
+ logger.always(chalk.blue("\n🧪 Testing configured providers...\n"));
53
54
  }
54
55
  setupResult.testResults = await testProviderConnectivity(setupResult.selectedProviders, quiet);
55
56
  // Step 4: Display summary
@@ -58,8 +59,8 @@ export async function runCompleteSetup(quiet = false) {
58
59
  }
59
60
  catch (error) {
60
61
  if (!quiet) {
61
- console.error(chalk.red(`❌ Setup failed: ${error instanceof Error ? error.message : String(error)}`));
62
- console.log(chalk.yellow("💡 You can retry setup with: neurolink config setup"));
62
+ logger.error(chalk.red(`❌ Setup failed: ${error instanceof Error ? error.message : String(error)}`));
63
+ logger.always(chalk.yellow("💡 You can retry setup with: neurolink config setup"));
63
64
  }
64
65
  throw error;
65
66
  }
@@ -75,7 +76,7 @@ export async function configSetup(quiet = false) {
75
76
  */
76
77
  export async function configInit(quiet = false) {
77
78
  if (!quiet) {
78
- console.log(chalk.gray("📝 config init is an alias for config setup\n"));
79
+ logger.always(chalk.gray("📝 config init is an alias for config setup\n"));
79
80
  }
80
81
  await runCompleteSetup(quiet);
81
82
  }
@@ -5,6 +5,7 @@
5
5
  */
6
6
  import fs from "fs";
7
7
  import chalk from "chalk";
8
+ import { logger } from "../../lib/utils/logger.js";
8
9
  /**
9
10
  * Create a timestamped backup of the existing .env file
10
11
  */
@@ -158,23 +159,23 @@ export function displayEnvUpdateSummary(result, quiet = false) {
158
159
  return;
159
160
  }
160
161
  if (result.backup.existed && result.backup.backupPath) {
161
- console.log(chalk.gray(`💾 Created backup: ${result.backup.backupPath}`));
162
+ logger.always(chalk.gray(`💾 Created backup: ${result.backup.backupPath}`));
162
163
  }
163
164
  if (result.added.length > 0) {
164
- console.log(chalk.green(`➕ Added ${result.added.length} new variables: ${result.added.join(", ")}`));
165
+ logger.always(chalk.green(`➕ Added ${result.added.length} new variables: ${result.added.join(", ")}`));
165
166
  }
166
167
  if (result.updated.length > 0) {
167
- console.log(chalk.yellow(`🔄 Updated ${result.updated.length} existing variables: ${result.updated.join(", ")}`));
168
+ logger.always(chalk.yellow(`🔄 Updated ${result.updated.length} existing variables: ${result.updated.join(", ")}`));
168
169
  }
169
170
  if (result.unchanged.length > 0) {
170
- console.log(chalk.gray(`✓ ${result.unchanged.length} variables unchanged: ${result.unchanged.join(", ")}`));
171
+ logger.always(chalk.gray(`✓ ${result.unchanged.length} variables unchanged: ${result.unchanged.join(", ")}`));
171
172
  }
172
173
  const totalChanges = result.added.length + result.updated.length;
173
174
  if (totalChanges > 0) {
174
- console.log(chalk.blue(`📝 Environment file updated with ${totalChanges} changes`));
175
+ logger.always(chalk.blue(`📝 Environment file updated with ${totalChanges} changes`));
175
176
  }
176
177
  else {
177
- console.log(chalk.gray("📝 No changes needed to environment file"));
178
+ logger.always(chalk.gray("📝 No changes needed to environment file"));
178
179
  }
179
180
  }
180
181
  /**
@@ -8,6 +8,7 @@ import { AIProviderName } from "../../lib/core/types.js";
8
8
  import { NeuroLink } from "../../lib/neurolink.js";
9
9
  import chalk from "chalk";
10
10
  import ora from "ora";
11
+ import { logger } from "../../lib/utils/logger.js";
11
12
  export const PROVIDER_CONFIGS = [
12
13
  {
13
14
  id: AIProviderName.OPENAI,
@@ -145,8 +146,8 @@ export async function runInteractiveSetup(quiet = false) {
145
146
  testResults: [],
146
147
  };
147
148
  if (!quiet) {
148
- console.log(chalk.blue("\n🎉 Welcome to NeuroLink Interactive Setup!"));
149
- console.log(chalk.gray("This wizard will help you configure AI providers for NeuroLink.\n"));
149
+ logger.always(chalk.blue("\n🎉 Welcome to NeuroLink Interactive Setup!"));
150
+ logger.always(chalk.gray("This wizard will help you configure AI providers for NeuroLink.\n"));
150
151
  }
151
152
  // Step 1: Provider Selection
152
153
  const providerChoices = PROVIDER_CONFIGS.map((config) => ({
@@ -171,7 +172,7 @@ export async function runInteractiveSetup(quiet = false) {
171
172
  result.selectedProviders = selectedProviders;
172
173
  // Step 2: Credential Collection
173
174
  if (!quiet) {
174
- console.log(chalk.blue("\n🔑 Collecting credentials for selected providers...\n"));
175
+ logger.always(chalk.blue("\n🔑 Collecting credentials for selected providers...\n"));
175
176
  }
176
177
  for (const providerId of selectedProviders) {
177
178
  const config = PROVIDER_CONFIGS.find((c) => c.id === providerId);
@@ -179,7 +180,7 @@ export async function runInteractiveSetup(quiet = false) {
179
180
  continue;
180
181
  }
181
182
  if (!quiet) {
182
- console.log(chalk.yellow(`\n📋 Configuring ${config.name}:`));
183
+ logger.always(chalk.yellow(`\n📋 Configuring ${config.name}:`));
183
184
  }
184
185
  for (const envVar of config.envVars) {
185
186
  const currentValue = process.env[envVar.key];
@@ -235,7 +236,7 @@ export async function testProviderConnectivity(providers, quiet = false) {
235
236
  const sdk = new NeuroLink();
236
237
  const results = [];
237
238
  if (!quiet) {
238
- console.log(chalk.blue("\n🧪 Testing provider connectivity...\n"));
239
+ logger.always(chalk.blue("\n🧪 Testing provider connectivity...\n"));
239
240
  }
240
241
  const spinner = quiet ? null : ora().start();
241
242
  for (const provider of providers) {
@@ -252,7 +253,7 @@ export async function testProviderConnectivity(providers, quiet = false) {
252
253
  spinner.start(); // Restart for next provider
253
254
  }
254
255
  else if (!quiet) {
255
- console.log(`${provider}: ${chalk.green("✅ Working")} (${duration}ms)`);
256
+ logger.always(`${provider}: ${chalk.green("✅ Working")} (${duration}ms)`);
256
257
  }
257
258
  }
258
259
  catch (error) {
@@ -263,7 +264,7 @@ export async function testProviderConnectivity(providers, quiet = false) {
263
264
  spinner.start(); // Restart for next provider
264
265
  }
265
266
  else if (!quiet) {
266
- console.error(`${provider}: ${chalk.red("❌ Failed")} - ${errorMessage.split("\n")[0]}`);
267
+ logger.error(`${provider}: ${chalk.red("❌ Failed")} - ${errorMessage.split("\n")[0]}`);
267
268
  }
268
269
  }
269
270
  }
@@ -281,22 +282,22 @@ export function displaySetupSummary(result, quiet = false) {
281
282
  }
282
283
  const working = result.testResults.filter((r) => r.status === "working").length;
283
284
  const total = result.testResults.length;
284
- console.log(chalk.blue("\n📊 Setup Summary:"));
285
- console.log(chalk.blue("================"));
286
- console.log(`Selected providers: ${result.selectedProviders.length}`);
287
- console.log(`Working providers: ${working}/${total}`);
285
+ logger.always(chalk.blue("\n📊 Setup Summary:"));
286
+ logger.always(chalk.blue("================"));
287
+ logger.always(`Selected providers: ${result.selectedProviders.length}`);
288
+ logger.always(`Working providers: ${working}/${total}`);
288
289
  if (result.envFileBackup) {
289
- console.log(chalk.gray(`Environment backup: ${result.envFileBackup}`));
290
+ logger.always(chalk.gray(`Environment backup: ${result.envFileBackup}`));
290
291
  }
291
292
  if (working > 0) {
292
- console.log(chalk.green("\n✅ Setup completed successfully!"));
293
- console.log(chalk.yellow("💡 You can now use NeuroLink with your configured providers."));
294
- console.log(chalk.gray(' Try: neurolink generate "Hello, AI!"'));
293
+ logger.always(chalk.green("\n✅ Setup completed successfully!"));
294
+ logger.always(chalk.yellow("💡 You can now use NeuroLink with your configured providers."));
295
+ logger.always(chalk.gray(' Try: neurolink generate "Hello, AI!"'));
295
296
  }
296
297
  else {
297
- console.log(chalk.red("\n❌ No providers are working."));
298
- console.log(chalk.yellow("💡 Please check your credentials and try again."));
299
- console.log(chalk.gray(" Run: neurolink config setup"));
298
+ logger.always(chalk.red("\n❌ No providers are working."));
299
+ logger.always(chalk.yellow("💡 Please check your credentials and try again."));
300
+ logger.always(chalk.gray(" Run: neurolink config setup"));
300
301
  }
301
- console.log(chalk.blue("\n📚 Documentation: https://github.com/juspay/neurolink#setup"));
302
+ logger.always(chalk.blue("\n📚 Documentation: https://github.com/juspay/neurolink#setup"));
302
303
  }
@@ -50,52 +50,39 @@ export function createAnalytics(provider, model, result, responseTime, context)
50
50
  * Extract token usage from various AI result formats
51
51
  */
52
52
  function extractTokenUsage(result) {
53
- // Handle different response formats
53
+ // Use properly typed usage object from BaseProvider or direct AI SDK
54
54
  if (result.usage &&
55
55
  typeof result.usage === "object" &&
56
56
  result.usage !== null) {
57
57
  const usage = result.usage;
58
- // Standard format
59
- if (typeof usage.promptTokens === "number" &&
60
- typeof usage.completionTokens === "number") {
61
- return {
62
- input: usage.promptTokens || 0,
63
- output: usage.completionTokens || 0,
64
- total: typeof usage.totalTokens === "number"
65
- ? usage.totalTokens
66
- : usage.promptTokens + usage.completionTokens,
67
- };
58
+ // Try BaseProvider normalized format first (inputTokens/outputTokens)
59
+ if (typeof usage.inputTokens === "number" ||
60
+ typeof usage.outputTokens === "number") {
61
+ const input = typeof usage.inputTokens === "number" ? usage.inputTokens : 0;
62
+ const output = typeof usage.outputTokens === "number" ? usage.outputTokens : 0;
63
+ const total = typeof usage.totalTokens === "number"
64
+ ? usage.totalTokens
65
+ : input + output;
66
+ return { input, output, total };
68
67
  }
69
- // Alternative formats
70
- if (typeof usage.input_tokens === "number" &&
71
- typeof usage.output_tokens === "number") {
72
- return {
73
- input: usage.input_tokens || 0,
74
- output: usage.output_tokens || 0,
75
- total: typeof usage.total_tokens === "number"
76
- ? usage.total_tokens
77
- : usage.input_tokens + usage.output_tokens,
78
- };
68
+ // Try OpenAI/Mistral format (promptTokens/completionTokens)
69
+ if (typeof usage.promptTokens === "number" ||
70
+ typeof usage.completionTokens === "number") {
71
+ const input = typeof usage.promptTokens === "number" ? usage.promptTokens : 0;
72
+ const output = typeof usage.completionTokens === "number" ? usage.completionTokens : 0;
73
+ const total = typeof usage.totalTokens === "number"
74
+ ? usage.totalTokens
75
+ : input + output;
76
+ return { input, output, total };
79
77
  }
80
- // Generic tokens field
81
- if (typeof usage.tokens === "number") {
82
- return {
83
- input: 0,
84
- output: 0,
85
- total: usage.tokens,
86
- };
78
+ // Handle total-only case
79
+ if (typeof usage.totalTokens === "number") {
80
+ return { input: 0, output: 0, total: usage.totalTokens };
87
81
  }
88
82
  }
89
- // Fallback: estimate from text length
90
- const textLength = (typeof result.text === "string" ? result.text.length : 0) ||
91
- (typeof result.content === "string" ? result.content.length : 0) ||
92
- 0;
93
- const estimatedTokens = Math.ceil(textLength / 4); // ~4 chars per token
94
- return {
95
- input: 0,
96
- output: estimatedTokens,
97
- total: estimatedTokens,
98
- };
83
+ // Fallback for edge cases
84
+ logger.debug("Token extraction failed: unknown usage format", { result });
85
+ return { input: 0, output: 0, total: 0 };
99
86
  }
100
87
  /**
101
88
  * Estimate cost based on provider, model, and token usage
@@ -447,5 +447,13 @@ export declare abstract class BaseProvider implements AIProvider {
447
447
  * Get timeout value in milliseconds
448
448
  */
449
449
  getTimeout(options: TextGenerationOptions | StreamOptions): number;
450
+ /**
451
+ * Utility method to chunk large prompts into smaller pieces
452
+ * @param prompt The prompt to chunk
453
+ * @param maxChunkSize Maximum size per chunk (default: 900,000 characters)
454
+ * @param overlap Overlap between chunks to maintain context (default: 100 characters)
455
+ * @returns Array of prompt chunks
456
+ */
457
+ static chunkPrompt(prompt: string, maxChunkSize?: number, overlap?: number): string[];
450
458
  }
451
459
  export {};
@@ -1,4 +1,5 @@
1
1
  import { logger } from "../utils/logger.js";
2
+ import { SYSTEM_LIMITS } from "../core/constants.js";
2
3
  import { directAgentTools } from "../agent/directTools.js";
3
4
  /**
4
5
  * Validates if a result contains a valid toolsObject structure
@@ -49,80 +50,96 @@ export class BaseProvider {
49
50
  */
50
51
  async stream(optionsOrPrompt, analysisSchema) {
51
52
  const options = this.normalizeStreamOptions(optionsOrPrompt);
52
- // If tools are not disabled AND provider supports tools, use generate() and create synthetic stream
53
- if (!options.disableTools && this.supportsTools()) {
54
- try {
55
- // Convert stream options to text generation options
56
- const textOptions = {
57
- prompt: options.input?.text || "",
58
- systemPrompt: options.systemPrompt,
59
- temperature: options.temperature,
60
- maxTokens: options.maxTokens,
61
- disableTools: false,
62
- maxSteps: options.maxSteps || 5,
63
- provider: options.provider,
64
- model: options.model,
65
- };
66
- const result = await this.generate(textOptions, analysisSchema);
67
- // Create a synthetic stream from the generate result that simulates progressive delivery
68
- return {
69
- stream: (async function* () {
70
- if (result?.content) {
71
- // Split content into words for more natural streaming
72
- const words = result.content.split(/(\s+)/); // Keep whitespace
73
- let buffer = "";
74
- for (let i = 0; i < words.length; i++) {
75
- buffer += words[i];
76
- // Yield chunks of roughly 5-10 words or at punctuation
77
- const shouldYield = i === words.length - 1 || // Last word
78
- buffer.length > 50 || // Buffer getting long
79
- /[.!?;,]\s*$/.test(buffer); // End of sentence/clause
80
- if (shouldYield && buffer.trim()) {
53
+ // CRITICAL FIX: Always prefer real streaming over fake streaming
54
+ // Try real streaming first, use fake streaming only as fallback
55
+ try {
56
+ const realStreamResult = await this.executeStream(options, analysisSchema);
57
+ // If real streaming succeeds, return it (with tools support via Vercel AI SDK)
58
+ return realStreamResult;
59
+ }
60
+ catch (realStreamError) {
61
+ logger.warn(`Real streaming failed for ${this.providerName}, falling back to fake streaming:`, realStreamError);
62
+ // Fallback to fake streaming only if real streaming fails AND tools are enabled
63
+ if (!options.disableTools && this.supportsTools()) {
64
+ try {
65
+ // Convert stream options to text generation options
66
+ const textOptions = {
67
+ prompt: options.input?.text || "",
68
+ systemPrompt: options.systemPrompt,
69
+ temperature: options.temperature,
70
+ maxTokens: options.maxTokens,
71
+ disableTools: false,
72
+ maxSteps: options.maxSteps || 5,
73
+ provider: options.provider,
74
+ model: options.model,
75
+ // 🔧 FIX: Include analytics and evaluation options from stream options
76
+ enableAnalytics: options.enableAnalytics,
77
+ enableEvaluation: options.enableEvaluation,
78
+ evaluationDomain: options.evaluationDomain,
79
+ toolUsageContext: options.toolUsageContext,
80
+ context: options.context,
81
+ };
82
+ const result = await this.generate(textOptions, analysisSchema);
83
+ // Create a synthetic stream from the generate result that simulates progressive delivery
84
+ return {
85
+ stream: (async function* () {
86
+ if (result?.content) {
87
+ // Split content into words for more natural streaming
88
+ const words = result.content.split(/(\s+)/); // Keep whitespace
89
+ let buffer = "";
90
+ for (let i = 0; i < words.length; i++) {
91
+ buffer += words[i];
92
+ // Yield chunks of roughly 5-10 words or at punctuation
93
+ const shouldYield = i === words.length - 1 || // Last word
94
+ buffer.length > 50 || // Buffer getting long
95
+ /[.!?;,]\s*$/.test(buffer); // End of sentence/clause
96
+ if (shouldYield && buffer.trim()) {
97
+ yield { content: buffer };
98
+ buffer = "";
99
+ // Small delay to simulate streaming (1-10ms)
100
+ await new Promise((resolve) => setTimeout(resolve, Math.random() * 9 + 1));
101
+ }
102
+ }
103
+ // Yield any remaining content
104
+ if (buffer.trim()) {
81
105
  yield { content: buffer };
82
- buffer = "";
83
- // Small delay to simulate streaming (1-10ms)
84
- await new Promise((resolve) => setTimeout(resolve, Math.random() * 9 + 1));
85
106
  }
86
107
  }
87
- // Yield any remaining content
88
- if (buffer.trim()) {
89
- yield { content: buffer };
90
- }
91
- }
92
- })(),
93
- usage: result?.usage,
94
- provider: result?.provider,
95
- model: result?.model,
96
- toolCalls: result?.toolCalls?.map((call) => ({
97
- toolName: call.toolName,
98
- parameters: call.args,
99
- id: call.toolCallId,
100
- })),
101
- toolResults: result?.toolResults
102
- ? result.toolResults.map((tr) => ({
103
- toolName: tr.toolName || "unknown",
104
- status: (tr.status === "error"
105
- ? "failure"
106
- : "success"),
107
- result: tr.result,
108
- error: tr.error,
109
- }))
110
- : undefined,
111
- };
108
+ })(),
109
+ usage: result?.usage,
110
+ provider: result?.provider,
111
+ model: result?.model,
112
+ toolCalls: result?.toolCalls?.map((call) => ({
113
+ toolName: call.toolName,
114
+ parameters: call.args,
115
+ id: call.toolCallId,
116
+ })),
117
+ toolResults: result?.toolResults
118
+ ? result.toolResults.map((tr) => ({
119
+ toolName: tr.toolName || "unknown",
120
+ status: (tr.status === "error"
121
+ ? "failure"
122
+ : "success"),
123
+ result: tr.result,
124
+ error: tr.error,
125
+ }))
126
+ : undefined,
127
+ // 🔧 FIX: Include analytics and evaluation from generate result
128
+ analytics: result?.analytics,
129
+ evaluation: result?.evaluation,
130
+ };
131
+ }
132
+ catch (error) {
133
+ logger.error(`Fake streaming fallback failed for ${this.providerName}:`, error);
134
+ throw this.handleProviderError(error);
135
+ }
112
136
  }
113
- catch (error) {
114
- logger.error(`Stream with tools failed for ${this.providerName}:`, error);
115
- throw this.handleProviderError(error);
137
+ else {
138
+ // If real streaming failed and no tools are enabled, re-throw the original error
139
+ logger.error(`Real streaming failed for ${this.providerName}:`, realStreamError);
140
+ throw this.handleProviderError(realStreamError);
116
141
  }
117
142
  }
118
- // Traditional streaming without tools
119
- try {
120
- return await this.executeStream(options, analysisSchema);
121
- }
122
- catch (error) {
123
- logger.error(`Stream failed for ${this.providerName}:`, error);
124
- throw this.handleProviderError(error);
125
- }
126
143
  }
127
144
  /**
128
145
  * Text generation method - implements AIProvider interface
@@ -150,6 +167,31 @@ export class BaseProvider {
150
167
  temperature: options.temperature,
151
168
  maxTokens: options.maxTokens || 8192,
152
169
  });
170
+ // Extract tool names from tool calls for tracking
171
+ // AI SDK puts tool calls in steps array for multi-step generation
172
+ const toolsUsed = [];
173
+ // First check direct tool calls (fallback)
174
+ if (result.toolCalls && result.toolCalls.length > 0) {
175
+ toolsUsed.push(...result.toolCalls.map((tc) => {
176
+ return (tc.toolName ||
177
+ tc.name ||
178
+ "unknown");
179
+ }));
180
+ }
181
+ // Then check steps for tool calls (primary source for multi-step)
182
+ if (result.steps &&
183
+ Array.isArray(result.steps)) {
184
+ for (const step of result.steps ||
185
+ []) {
186
+ if (step?.toolCalls && Array.isArray(step.toolCalls)) {
187
+ toolsUsed.push(...step.toolCalls.map((tc) => {
188
+ return tc.toolName || tc.name || "unknown";
189
+ }));
190
+ }
191
+ }
192
+ }
193
+ // Remove duplicates
194
+ const uniqueToolsUsed = [...new Set(toolsUsed)];
153
195
  // Format the result with tool executions included
154
196
  const enhancedResult = {
155
197
  content: result.text,
@@ -174,6 +216,7 @@ export class BaseProvider {
174
216
  }))
175
217
  : [],
176
218
  toolResults: result.toolResults,
219
+ toolsUsed: uniqueToolsUsed,
177
220
  };
178
221
  // Enhanced result with analytics and evaluation
179
222
  return await this.enhanceResult(enhancedResult, options, startTime);
@@ -348,9 +391,43 @@ export class BaseProvider {
348
391
  return evaluation;
349
392
  }
350
393
  validateOptions(options) {
394
+ // 🔧 EDGE CASE: Basic prompt validation
351
395
  if (!options.prompt || options.prompt.trim().length === 0) {
352
396
  throw new Error("Prompt is required and cannot be empty");
353
397
  }
398
+ // 🔧 EDGE CASE: Handle very large prompts (>1M characters)
399
+ if (options.prompt.length > SYSTEM_LIMITS.MAX_PROMPT_LENGTH) {
400
+ throw new Error(`Prompt too large: ${options.prompt.length} characters (max: ${SYSTEM_LIMITS.MAX_PROMPT_LENGTH}). Consider breaking into smaller chunks. Use BaseProvider.chunkPrompt(prompt, maxSize, overlap) static method for chunking.`);
401
+ }
402
+ // 🔧 EDGE CASE: Validate token limits
403
+ if (options.maxTokens && options.maxTokens > 200000) {
404
+ throw new Error(`Max tokens too high: ${options.maxTokens} (recommended max: 200,000). This may cause timeouts or API errors.`);
405
+ }
406
+ if (options.maxTokens && options.maxTokens < 1) {
407
+ throw new Error("Max tokens must be at least 1");
408
+ }
409
+ // 🔧 EDGE CASE: Validate temperature range
410
+ if (options.temperature !== undefined) {
411
+ if (options.temperature < 0 || options.temperature > 2) {
412
+ throw new Error(`Temperature must be between 0 and 2, got: ${options.temperature}`);
413
+ }
414
+ }
415
+ // 🔧 EDGE CASE: Validate timeout values
416
+ if (options.timeout !== undefined) {
417
+ const timeoutMs = typeof options.timeout === "string"
418
+ ? parseInt(options.timeout, 10)
419
+ : options.timeout;
420
+ if (isNaN(timeoutMs) || timeoutMs < 1000) {
421
+ throw new Error(`Timeout must be at least 1000ms (1 second), got: ${options.timeout}`);
422
+ }
423
+ if (timeoutMs > SYSTEM_LIMITS.LONG_TIMEOUT_WARNING) {
424
+ logger.warn(`⚠️ Very long timeout: ${timeoutMs}ms. This may cause the CLI to hang.`);
425
+ }
426
+ }
427
+ // 🔧 EDGE CASE: Validate maxSteps for tool execution
428
+ if (options.maxSteps !== undefined && options.maxSteps > 20) {
429
+ throw new Error(`Max steps too high: ${options.maxSteps} (recommended max: 20). This may cause long execution times.`);
430
+ }
354
431
  }
355
432
  getProviderInfo() {
356
433
  return {
@@ -382,4 +459,36 @@ export class BaseProvider {
382
459
  }
383
460
  return this.defaultTimeout;
384
461
  }
462
+ /**
463
+ * Utility method to chunk large prompts into smaller pieces
464
+ * @param prompt The prompt to chunk
465
+ * @param maxChunkSize Maximum size per chunk (default: 900,000 characters)
466
+ * @param overlap Overlap between chunks to maintain context (default: 100 characters)
467
+ * @returns Array of prompt chunks
468
+ */
469
+ static chunkPrompt(prompt, maxChunkSize = 900000, overlap = 100) {
470
+ if (prompt.length <= maxChunkSize) {
471
+ return [prompt];
472
+ }
473
+ const chunks = [];
474
+ let start = 0;
475
+ while (start < prompt.length) {
476
+ const end = Math.min(start + maxChunkSize, prompt.length);
477
+ chunks.push(prompt.slice(start, end));
478
+ // Break if we've reached the end
479
+ if (end >= prompt.length) {
480
+ break;
481
+ }
482
+ // Move start forward, accounting for overlap
483
+ const nextStart = end - overlap;
484
+ // Ensure we make progress (avoid infinite loops)
485
+ if (nextStart <= start) {
486
+ start = end;
487
+ }
488
+ else {
489
+ start = Math.max(nextStart, 0);
490
+ }
491
+ }
492
+ return chunks;
493
+ }
385
494
  }
@@ -36,6 +36,17 @@ export declare const CLI_LIMITS: {
36
36
  default: number;
37
37
  };
38
38
  };
39
+ export declare const SYSTEM_LIMITS: {
40
+ MAX_PROMPT_LENGTH: number;
41
+ HIGH_MEMORY_THRESHOLD: number;
42
+ LONG_TIMEOUT_WARNING: number;
43
+ DEFAULT_CONCURRENCY_LIMIT: number;
44
+ MAX_CONCURRENCY_LIMIT: number;
45
+ DEFAULT_RETRY_ATTEMPTS: number;
46
+ DEFAULT_INITIAL_DELAY: number;
47
+ DEFAULT_MAX_DELAY: number;
48
+ DEFAULT_BACKOFF_MULTIPLIER: number;
49
+ };
39
50
  export declare const ENV_DEFAULTS: {
40
51
  maxTokens: number;
41
52
  temperature: number;
@@ -40,6 +40,23 @@ export const CLI_LIMITS = {
40
40
  default: DEFAULT_TEMPERATURE,
41
41
  },
42
42
  };
43
+ // Performance and System Limits
44
+ export const SYSTEM_LIMITS = {
45
+ // Prompt size limits (baseProvider.ts magic number fix)
46
+ MAX_PROMPT_LENGTH: 1000000, // 1M characters - prevents memory issues
47
+ // Memory monitoring thresholds (performance.ts)
48
+ HIGH_MEMORY_THRESHOLD: 100, // MB - when to warn about memory usage
49
+ // Timeout warnings (baseProvider.ts)
50
+ LONG_TIMEOUT_WARNING: 300000, // 5 minutes - when to warn about long timeouts
51
+ // Concurrency control (neurolink.ts provider testing)
52
+ DEFAULT_CONCURRENCY_LIMIT: 3, // Max parallel provider tests
53
+ MAX_CONCURRENCY_LIMIT: 5, // Upper bound for concurrency
54
+ // Retry system defaults (retryHandler.ts)
55
+ DEFAULT_RETRY_ATTEMPTS: 3,
56
+ DEFAULT_INITIAL_DELAY: 1000, // 1 second
57
+ DEFAULT_MAX_DELAY: 30000, // 30 seconds
58
+ DEFAULT_BACKOFF_MULTIPLIER: 2,
59
+ };
43
60
  // Environment Variable Support (for future use)
44
61
  export const ENV_DEFAULTS = {
45
62
  maxTokens: process.env.NEUROLINK_DEFAULT_MAX_TOKENS