@juspay/neurolink 7.32.0 → 7.33.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,3 +1,9 @@
1
+ ## [7.33.0](https://github.com/juspay/neurolink/compare/v7.32.0...v7.33.0) (2025-09-03)
2
+
3
+ ### Features
4
+
5
+ - **(provider):** refactor generate method to use streamText for improved performance and consistency ([a118300](https://github.com/juspay/neurolink/commit/a11830088376b899725bcb1dc2467cb73f44f5b9))
6
+
1
7
  ## [7.32.0](https://github.com/juspay/neurolink/compare/v7.31.0...v7.32.0) (2025-09-03)
2
8
 
3
9
  ### Features
@@ -37,6 +37,8 @@ export declare abstract class BaseProvider implements AIProvider {
37
37
  /**
38
38
  * Text generation method - implements AIProvider interface
39
39
  * Tools are always available unless explicitly disabled
40
+ * IMPLEMENTATION NOTE: Uses streamText() under the hood and accumulates results
41
+ * for consistency and better performance
40
42
  */
41
43
  generate(optionsOrPrompt: TextGenerationOptions | string, _analysisSchema?: ValidationSchema): Promise<EnhancedGenerateResult | null>;
42
44
  /**
@@ -110,6 +112,10 @@ export declare abstract class BaseProvider implements AIProvider {
110
112
  * MCP tools are added when available (without blocking)
111
113
  */
112
114
  protected getAllTools(): Promise<Record<string, Tool>>;
115
+ /**
116
+ * Calculate actual cost based on token usage and provider configuration
117
+ */
118
+ private calculateActualCost;
113
119
  /**
114
120
  * Convert MCP JSON Schema to Zod schema for AI SDK tools
115
121
  * Handles common MCP schema patterns safely
@@ -146,6 +146,8 @@ export class BaseProvider {
146
146
  /**
147
147
  * Text generation method - implements AIProvider interface
148
148
  * Tools are always available unless explicitly disabled
149
+ * IMPLEMENTATION NOTE: Uses streamText() under the hood and accumulates results
150
+ * for consistency and better performance
149
151
  */
150
152
  async generate(optionsOrPrompt, _analysisSchema) {
151
153
  const options = this.normalizeTextOptions(optionsOrPrompt);
@@ -153,8 +155,9 @@ export class BaseProvider {
153
155
  this.validateOptions(options);
154
156
  const startTime = Date.now();
155
157
  try {
156
- // Import generateText dynamically to avoid circular dependencies
157
- const { generateText } = await import("ai");
158
+ // Import streamText dynamically to avoid circular dependencies
159
+ // Using streamText instead of generateText for unified implementation
160
+ const { streamText } = await import("ai");
158
161
  // Get ALL available tools (direct + MCP + external from options)
159
162
  const shouldUseTools = !options.disableTools && this.supportsTools();
160
163
  const baseTools = shouldUseTools ? await this.getAllTools() : {};
@@ -175,7 +178,8 @@ export class BaseProvider {
175
178
  const model = await this.getAISDKModelWithMiddleware(options);
176
179
  // Build proper message array with conversation history
177
180
  const messages = buildMessagesArray(options);
178
- const result = await generateText({
181
+ // Use streamText and accumulate results instead of generateText
182
+ const streamResult = await streamText({
179
183
  model,
180
184
  messages: messages,
181
185
  tools,
@@ -184,31 +188,30 @@ export class BaseProvider {
184
188
  temperature: options.temperature,
185
189
  maxTokens: options.maxTokens || 8192,
186
190
  });
191
+ // Accumulate the streamed content
192
+ let accumulatedContent = "";
193
+ // Wait for the stream to complete and accumulate content
194
+ for await (const chunk of streamResult.textStream) {
195
+ accumulatedContent += chunk;
196
+ }
197
+ // Get the final result - this should include usage, toolCalls, etc.
198
+ const usage = await streamResult.usage;
199
+ const toolCalls = await streamResult.toolCalls;
200
+ const toolResults = await streamResult.toolResults;
187
201
  const responseTime = Date.now() - startTime;
202
+ // Create a result object compatible with generateText format
203
+ const result = {
204
+ text: accumulatedContent,
205
+ usage: usage,
206
+ toolCalls: toolCalls,
207
+ toolResults: toolResults,
208
+ steps: streamResult.steps, // Include steps for tool execution tracking
209
+ };
188
210
  try {
189
- // Calculate actual cost based on token usage and provider configuration
190
- const calculateActualCost = () => {
191
- try {
192
- const costInfo = modelConfig.getCostInfo(this.providerName, this.modelName);
193
- if (!costInfo) {
194
- return 0; // No cost info available
195
- }
196
- const promptTokens = result.usage?.promptTokens || 0;
197
- const completionTokens = result.usage?.completionTokens || 0;
198
- // Calculate cost per 1K tokens
199
- const inputCost = (promptTokens / 1000) * costInfo.input;
200
- const outputCost = (completionTokens / 1000) * costInfo.output;
201
- return inputCost + outputCost;
202
- }
203
- catch (error) {
204
- logger.debug(`Cost calculation failed for ${this.providerName}:`, error);
205
- return 0; // Fallback to 0 on any error
206
- }
207
- };
208
- const actualCost = calculateActualCost();
211
+ const actualCost = await this.calculateActualCost(usage || { promptTokens: 0, completionTokens: 0, totalTokens: 0 });
209
212
  recordProviderPerformanceFromMetrics(this.providerName, {
210
213
  responseTime,
211
- tokensGenerated: result.usage?.totalTokens || 0,
214
+ tokensGenerated: usage?.totalTokens || 0,
212
215
  cost: actualCost,
213
216
  success: true,
214
217
  });
@@ -216,7 +219,7 @@ export class BaseProvider {
216
219
  const optimizedProvider = getPerformanceOptimizedProvider("speed");
217
220
  logger.debug(`🚀 Performance recorded for ${this.providerName}:`, {
218
221
  responseTime: `${responseTime}ms`,
219
- tokens: result.usage?.totalTokens || 0,
222
+ tokens: usage?.totalTokens || 0,
220
223
  estimatedCost: `$${actualCost.toFixed(6)}`,
221
224
  recommendedSpeedProvider: optimizedProvider?.provider || "none",
222
225
  });
@@ -228,11 +231,9 @@ export class BaseProvider {
228
231
  // AI SDK puts tool calls in steps array for multi-step generation
229
232
  const toolsUsed = [];
230
233
  // First check direct tool calls (fallback)
231
- if (result.toolCalls && result.toolCalls.length > 0) {
232
- toolsUsed.push(...result.toolCalls.map((tc) => {
233
- return (tc.toolName ||
234
- tc.name ||
235
- "unknown");
234
+ if (toolCalls && toolCalls.length > 0) {
235
+ toolsUsed.push(...toolCalls.map((tc) => {
236
+ return tc.toolName || "unknown";
236
237
  }));
237
238
  }
238
239
  // Then check steps for tool calls (primary source for multi-step)
@@ -326,20 +327,14 @@ export class BaseProvider {
326
327
  },
327
328
  provider: this.providerName,
328
329
  model: this.modelName,
329
- toolCalls: result.toolCalls
330
- ? result.toolCalls.map((tc) => ({
331
- toolCallId: tc.toolCallId ||
332
- tc.id ||
333
- "unknown",
334
- toolName: tc.toolName ||
335
- tc.name ||
336
- "unknown",
337
- args: tc.args ||
338
- tc.parameters ||
339
- {},
330
+ toolCalls: toolCalls
331
+ ? toolCalls.map((tc) => ({
332
+ toolCallId: tc.toolCallId || "unknown",
333
+ toolName: tc.toolName || "unknown",
334
+ args: tc.args || {},
340
335
  }))
341
336
  : [],
342
- toolResults: result.toolResults,
337
+ toolResults: toolResults || [],
343
338
  toolsUsed: uniqueToolsUsed,
344
339
  toolExecutions, // ✅ Add extracted tool executions
345
340
  availableTools: Object.keys(tools).map((name) => {
@@ -655,6 +650,27 @@ export class BaseProvider {
655
650
  logger.debug(`[BaseProvider] getAllTools returning tools: ${getKeysAsString(tools)}`);
656
651
  return tools;
657
652
  }
653
+ /**
654
+ * Calculate actual cost based on token usage and provider configuration
655
+ */
656
+ async calculateActualCost(usage) {
657
+ try {
658
+ const costInfo = modelConfig.getCostInfo(this.providerName, this.modelName);
659
+ if (!costInfo) {
660
+ return 0; // No cost info available
661
+ }
662
+ const promptTokens = usage?.promptTokens || 0;
663
+ const completionTokens = usage?.completionTokens || 0;
664
+ // Calculate cost per 1K tokens
665
+ const inputCost = (promptTokens / 1000) * costInfo.input;
666
+ const outputCost = (completionTokens / 1000) * costInfo.output;
667
+ return inputCost + outputCost;
668
+ }
669
+ catch (error) {
670
+ logger.debug(`Cost calculation failed for ${this.providerName}:`, error);
671
+ return 0; // Fallback to 0 on any error
672
+ }
673
+ }
658
674
  /**
659
675
  * Convert MCP JSON Schema to Zod schema for AI SDK tools
660
676
  * Handles common MCP schema patterns safely
@@ -37,6 +37,8 @@ export declare abstract class BaseProvider implements AIProvider {
37
37
  /**
38
38
  * Text generation method - implements AIProvider interface
39
39
  * Tools are always available unless explicitly disabled
40
+ * IMPLEMENTATION NOTE: Uses streamText() under the hood and accumulates results
41
+ * for consistency and better performance
40
42
  */
41
43
  generate(optionsOrPrompt: TextGenerationOptions | string, _analysisSchema?: ValidationSchema): Promise<EnhancedGenerateResult | null>;
42
44
  /**
@@ -110,6 +112,10 @@ export declare abstract class BaseProvider implements AIProvider {
110
112
  * MCP tools are added when available (without blocking)
111
113
  */
112
114
  protected getAllTools(): Promise<Record<string, Tool>>;
115
+ /**
116
+ * Calculate actual cost based on token usage and provider configuration
117
+ */
118
+ private calculateActualCost;
113
119
  /**
114
120
  * Convert MCP JSON Schema to Zod schema for AI SDK tools
115
121
  * Handles common MCP schema patterns safely
@@ -146,6 +146,8 @@ export class BaseProvider {
146
146
  /**
147
147
  * Text generation method - implements AIProvider interface
148
148
  * Tools are always available unless explicitly disabled
149
+ * IMPLEMENTATION NOTE: Uses streamText() under the hood and accumulates results
150
+ * for consistency and better performance
149
151
  */
150
152
  async generate(optionsOrPrompt, _analysisSchema) {
151
153
  const options = this.normalizeTextOptions(optionsOrPrompt);
@@ -153,8 +155,9 @@ export class BaseProvider {
153
155
  this.validateOptions(options);
154
156
  const startTime = Date.now();
155
157
  try {
156
- // Import generateText dynamically to avoid circular dependencies
157
- const { generateText } = await import("ai");
158
+ // Import streamText dynamically to avoid circular dependencies
159
+ // Using streamText instead of generateText for unified implementation
160
+ const { streamText } = await import("ai");
158
161
  // Get ALL available tools (direct + MCP + external from options)
159
162
  const shouldUseTools = !options.disableTools && this.supportsTools();
160
163
  const baseTools = shouldUseTools ? await this.getAllTools() : {};
@@ -175,7 +178,8 @@ export class BaseProvider {
175
178
  const model = await this.getAISDKModelWithMiddleware(options);
176
179
  // Build proper message array with conversation history
177
180
  const messages = buildMessagesArray(options);
178
- const result = await generateText({
181
+ // Use streamText and accumulate results instead of generateText
182
+ const streamResult = await streamText({
179
183
  model,
180
184
  messages: messages,
181
185
  tools,
@@ -184,31 +188,30 @@ export class BaseProvider {
184
188
  temperature: options.temperature,
185
189
  maxTokens: options.maxTokens || 8192,
186
190
  });
191
+ // Accumulate the streamed content
192
+ let accumulatedContent = "";
193
+ // Wait for the stream to complete and accumulate content
194
+ for await (const chunk of streamResult.textStream) {
195
+ accumulatedContent += chunk;
196
+ }
197
+ // Get the final result - this should include usage, toolCalls, etc.
198
+ const usage = await streamResult.usage;
199
+ const toolCalls = await streamResult.toolCalls;
200
+ const toolResults = await streamResult.toolResults;
187
201
  const responseTime = Date.now() - startTime;
202
+ // Create a result object compatible with generateText format
203
+ const result = {
204
+ text: accumulatedContent,
205
+ usage: usage,
206
+ toolCalls: toolCalls,
207
+ toolResults: toolResults,
208
+ steps: streamResult.steps, // Include steps for tool execution tracking
209
+ };
188
210
  try {
189
- // Calculate actual cost based on token usage and provider configuration
190
- const calculateActualCost = () => {
191
- try {
192
- const costInfo = modelConfig.getCostInfo(this.providerName, this.modelName);
193
- if (!costInfo) {
194
- return 0; // No cost info available
195
- }
196
- const promptTokens = result.usage?.promptTokens || 0;
197
- const completionTokens = result.usage?.completionTokens || 0;
198
- // Calculate cost per 1K tokens
199
- const inputCost = (promptTokens / 1000) * costInfo.input;
200
- const outputCost = (completionTokens / 1000) * costInfo.output;
201
- return inputCost + outputCost;
202
- }
203
- catch (error) {
204
- logger.debug(`Cost calculation failed for ${this.providerName}:`, error);
205
- return 0; // Fallback to 0 on any error
206
- }
207
- };
208
- const actualCost = calculateActualCost();
211
+ const actualCost = await this.calculateActualCost(usage || { promptTokens: 0, completionTokens: 0, totalTokens: 0 });
209
212
  recordProviderPerformanceFromMetrics(this.providerName, {
210
213
  responseTime,
211
- tokensGenerated: result.usage?.totalTokens || 0,
214
+ tokensGenerated: usage?.totalTokens || 0,
212
215
  cost: actualCost,
213
216
  success: true,
214
217
  });
@@ -216,7 +219,7 @@ export class BaseProvider {
216
219
  const optimizedProvider = getPerformanceOptimizedProvider("speed");
217
220
  logger.debug(`🚀 Performance recorded for ${this.providerName}:`, {
218
221
  responseTime: `${responseTime}ms`,
219
- tokens: result.usage?.totalTokens || 0,
222
+ tokens: usage?.totalTokens || 0,
220
223
  estimatedCost: `$${actualCost.toFixed(6)}`,
221
224
  recommendedSpeedProvider: optimizedProvider?.provider || "none",
222
225
  });
@@ -228,11 +231,9 @@ export class BaseProvider {
228
231
  // AI SDK puts tool calls in steps array for multi-step generation
229
232
  const toolsUsed = [];
230
233
  // First check direct tool calls (fallback)
231
- if (result.toolCalls && result.toolCalls.length > 0) {
232
- toolsUsed.push(...result.toolCalls.map((tc) => {
233
- return (tc.toolName ||
234
- tc.name ||
235
- "unknown");
234
+ if (toolCalls && toolCalls.length > 0) {
235
+ toolsUsed.push(...toolCalls.map((tc) => {
236
+ return tc.toolName || "unknown";
236
237
  }));
237
238
  }
238
239
  // Then check steps for tool calls (primary source for multi-step)
@@ -326,20 +327,14 @@ export class BaseProvider {
326
327
  },
327
328
  provider: this.providerName,
328
329
  model: this.modelName,
329
- toolCalls: result.toolCalls
330
- ? result.toolCalls.map((tc) => ({
331
- toolCallId: tc.toolCallId ||
332
- tc.id ||
333
- "unknown",
334
- toolName: tc.toolName ||
335
- tc.name ||
336
- "unknown",
337
- args: tc.args ||
338
- tc.parameters ||
339
- {},
330
+ toolCalls: toolCalls
331
+ ? toolCalls.map((tc) => ({
332
+ toolCallId: tc.toolCallId || "unknown",
333
+ toolName: tc.toolName || "unknown",
334
+ args: tc.args || {},
340
335
  }))
341
336
  : [],
342
- toolResults: result.toolResults,
337
+ toolResults: toolResults || [],
343
338
  toolsUsed: uniqueToolsUsed,
344
339
  toolExecutions, // ✅ Add extracted tool executions
345
340
  availableTools: Object.keys(tools).map((name) => {
@@ -655,6 +650,27 @@ export class BaseProvider {
655
650
  logger.debug(`[BaseProvider] getAllTools returning tools: ${getKeysAsString(tools)}`);
656
651
  return tools;
657
652
  }
653
+ /**
654
+ * Calculate actual cost based on token usage and provider configuration
655
+ */
656
+ async calculateActualCost(usage) {
657
+ try {
658
+ const costInfo = modelConfig.getCostInfo(this.providerName, this.modelName);
659
+ if (!costInfo) {
660
+ return 0; // No cost info available
661
+ }
662
+ const promptTokens = usage?.promptTokens || 0;
663
+ const completionTokens = usage?.completionTokens || 0;
664
+ // Calculate cost per 1K tokens
665
+ const inputCost = (promptTokens / 1000) * costInfo.input;
666
+ const outputCost = (completionTokens / 1000) * costInfo.output;
667
+ return inputCost + outputCost;
668
+ }
669
+ catch (error) {
670
+ logger.debug(`Cost calculation failed for ${this.providerName}:`, error);
671
+ return 0; // Fallback to 0 on any error
672
+ }
673
+ }
658
674
  /**
659
675
  * Convert MCP JSON Schema to Zod schema for AI SDK tools
660
676
  * Handles common MCP schema patterns safely
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@juspay/neurolink",
3
- "version": "7.32.0",
3
+ "version": "7.33.0",
4
4
  "description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 9 major providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
5
5
  "author": {
6
6
  "name": "Juspay Technologies",