@juspay/neurolink 7.37.0 → 7.37.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/CHANGELOG.md +6 -0
  2. package/dist/cli/commands/config.d.ts +18 -18
  3. package/dist/cli/factories/commandFactory.d.ts +24 -0
  4. package/dist/cli/factories/commandFactory.js +297 -245
  5. package/dist/core/baseProvider.d.ts +40 -3
  6. package/dist/core/baseProvider.js +689 -352
  7. package/dist/core/constants.d.ts +2 -30
  8. package/dist/core/constants.js +15 -43
  9. package/dist/factories/providerFactory.js +23 -6
  10. package/dist/index.d.ts +3 -2
  11. package/dist/index.js +4 -3
  12. package/dist/lib/core/baseProvider.d.ts +40 -3
  13. package/dist/lib/core/baseProvider.js +689 -352
  14. package/dist/lib/core/constants.d.ts +2 -30
  15. package/dist/lib/core/constants.js +15 -43
  16. package/dist/lib/factories/providerFactory.js +23 -6
  17. package/dist/lib/index.d.ts +3 -2
  18. package/dist/lib/index.js +4 -3
  19. package/dist/lib/mcp/externalServerManager.js +2 -2
  20. package/dist/lib/mcp/registry.js +2 -2
  21. package/dist/lib/mcp/servers/agent/directToolsServer.js +19 -10
  22. package/dist/lib/mcp/toolRegistry.js +4 -8
  23. package/dist/lib/neurolink.d.ts +62 -27
  24. package/dist/lib/neurolink.js +415 -719
  25. package/dist/lib/providers/amazonBedrock.js +2 -2
  26. package/dist/lib/providers/googleVertex.d.ts +3 -23
  27. package/dist/lib/providers/googleVertex.js +14 -342
  28. package/dist/lib/providers/openAI.d.ts +23 -0
  29. package/dist/lib/providers/openAI.js +313 -6
  30. package/dist/lib/providers/sagemaker/language-model.d.ts +2 -2
  31. package/dist/lib/sdk/toolRegistration.js +18 -1
  32. package/dist/lib/types/common.d.ts +98 -0
  33. package/dist/lib/types/streamTypes.d.ts +13 -6
  34. package/dist/lib/types/typeAliases.d.ts +3 -2
  35. package/dist/lib/utils/parameterValidation.js +6 -25
  36. package/dist/lib/utils/promptRedaction.js +4 -4
  37. package/dist/lib/utils/schemaConversion.d.ts +14 -0
  38. package/dist/lib/utils/schemaConversion.js +140 -0
  39. package/dist/lib/utils/transformationUtils.js +143 -5
  40. package/dist/mcp/externalServerManager.js +2 -2
  41. package/dist/mcp/registry.js +2 -2
  42. package/dist/mcp/servers/agent/directToolsServer.js +19 -10
  43. package/dist/mcp/toolRegistry.js +4 -8
  44. package/dist/neurolink.d.ts +62 -27
  45. package/dist/neurolink.js +415 -719
  46. package/dist/providers/amazonBedrock.js +2 -2
  47. package/dist/providers/googleVertex.d.ts +3 -23
  48. package/dist/providers/googleVertex.js +14 -342
  49. package/dist/providers/openAI.d.ts +23 -0
  50. package/dist/providers/openAI.js +313 -6
  51. package/dist/providers/sagemaker/language-model.d.ts +2 -2
  52. package/dist/sdk/toolRegistration.js +18 -1
  53. package/dist/types/common.d.ts +98 -0
  54. package/dist/types/streamTypes.d.ts +13 -6
  55. package/dist/types/typeAliases.d.ts +3 -2
  56. package/dist/utils/parameterValidation.js +6 -25
  57. package/dist/utils/promptRedaction.js +4 -4
  58. package/dist/utils/schemaConversion.d.ts +14 -0
  59. package/dist/utils/schemaConversion.js +140 -0
  60. package/dist/utils/transformationUtils.js +143 -5
  61. package/package.json +3 -2
@@ -1,3 +1,4 @@
1
+ import { z } from "zod";
1
2
  import { generateText } from "ai";
2
3
  import { MiddlewareFactory } from "../middleware/factory.js";
3
4
  import { logger } from "../utils/logger.js";
@@ -9,6 +10,7 @@ import { shouldDisableBuiltinTools } from "../utils/toolUtils.js";
9
10
  import { buildMessagesArray, buildMultimodalMessagesArray, } from "../utils/messageBuilder.js";
10
11
  import { getKeysAsString, getKeyCount } from "../utils/transformationUtils.js";
11
12
  import { validateStreamOptions as validateStreamOpts, validateTextGenerationOptions, ValidationError, createValidationSummary, } from "../utils/parameterValidation.js";
13
+ import { convertJsonSchemaToZod } from "../utils/schemaConversion.js";
12
14
  import { recordProviderPerformanceFromMetrics, getPerformanceOptimizedProvider, } from "./evaluationProviders.js";
13
15
  import { modelConfig } from "./modelConfiguration.js";
14
16
  /**
@@ -53,18 +55,46 @@ export class BaseProvider {
53
55
  */
54
56
  async stream(optionsOrPrompt, analysisSchema) {
55
57
  const options = this.normalizeStreamOptions(optionsOrPrompt);
58
+ logger.info(`Starting stream`, {
59
+ provider: this.providerName,
60
+ hasTools: !options.disableTools && this.supportsTools(),
61
+ disableTools: !!options.disableTools,
62
+ supportsTools: this.supportsTools(),
63
+ inputLength: options.input?.text?.length || 0,
64
+ maxTokens: options.maxTokens,
65
+ temperature: options.temperature,
66
+ timestamp: Date.now(),
67
+ });
56
68
  // CRITICAL FIX: Always prefer real streaming over fake streaming
57
69
  // Try real streaming first, use fake streaming only as fallback
58
70
  try {
71
+ logger.debug(`Attempting real streaming`, {
72
+ provider: this.providerName,
73
+ timestamp: Date.now(),
74
+ });
59
75
  const realStreamResult = await this.executeStream(options, analysisSchema);
76
+ logger.info(`Real streaming succeeded`, {
77
+ provider: this.providerName,
78
+ timestamp: Date.now(),
79
+ });
60
80
  // If real streaming succeeds, return it (with tools support via Vercel AI SDK)
61
81
  return realStreamResult;
62
82
  }
63
83
  catch (realStreamError) {
64
- logger.warn(`Real streaming failed for ${this.providerName}, falling back to fake streaming:`, realStreamError);
84
+ logger.warn(`Real streaming failed for ${this.providerName}, falling back to fake streaming:`, {
85
+ error: realStreamError instanceof Error
86
+ ? realStreamError.message
87
+ : String(realStreamError),
88
+ timestamp: Date.now(),
89
+ });
65
90
  // Fallback to fake streaming only if real streaming fails AND tools are enabled
66
91
  if (!options.disableTools && this.supportsTools()) {
67
92
  try {
93
+ logger.info(`Starting fake streaming with tools`, {
94
+ provider: this.providerName,
95
+ supportsTools: this.supportsTools(),
96
+ timestamp: Date.now(),
97
+ });
68
98
  // Convert stream options to text generation options
69
99
  const textOptions = {
70
100
  prompt: options.input?.text || "",
@@ -82,7 +112,20 @@ export class BaseProvider {
82
112
  toolUsageContext: options.toolUsageContext,
83
113
  context: options.context,
84
114
  };
115
+ logger.debug(`Calling generate for fake streaming`, {
116
+ provider: this.providerName,
117
+ maxSteps: textOptions.maxSteps,
118
+ disableTools: textOptions.disableTools,
119
+ timestamp: Date.now(),
120
+ });
85
121
  const result = await this.generate(textOptions, analysisSchema);
122
+ logger.info(`Generate completed for fake streaming`, {
123
+ provider: this.providerName,
124
+ hasContent: !!result?.content,
125
+ contentLength: result?.content?.length || 0,
126
+ toolsUsed: result?.toolsUsed?.length || 0,
127
+ timestamp: Date.now(),
128
+ });
86
129
  // Create a synthetic stream from the generate result that simulates progressive delivery
87
130
  return {
88
131
  stream: (async function* () {
@@ -153,285 +196,367 @@ export class BaseProvider {
153
196
  }
154
197
  }
155
198
  /**
156
- * Text generation method - implements AIProvider interface
157
- * Tools are always available unless explicitly disabled
158
- * IMPLEMENTATION NOTE: Uses streamText() under the hood and accumulates results
159
- * for consistency and better performance
199
+ * Prepare generation context including tools and model
160
200
  */
161
- async generate(optionsOrPrompt, _analysisSchema) {
162
- const options = this.normalizeTextOptions(optionsOrPrompt);
163
- // Validate options before proceeding
164
- this.validateOptions(options);
165
- const startTime = Date.now();
166
- try {
167
- // Import streamText dynamically to avoid circular dependencies
168
- // Using streamText instead of generateText for unified implementation
169
- // const { streamText } = await import("ai");
170
- // Get ALL available tools (direct + MCP + external from options)
171
- const shouldUseTools = !options.disableTools && this.supportsTools();
172
- const baseTools = shouldUseTools ? await this.getAllTools() : {};
173
- const tools = shouldUseTools
174
- ? {
175
- ...baseTools,
176
- ...(options.tools || {}), // Include external tools passed from NeuroLink
177
- }
178
- : {};
179
- // DEBUG: Log detailed tool information for generate
180
- logger.debug("BaseProvider Generate - Tool Loading Debug", {
181
- provider: this.providerName,
182
- shouldUseTools,
183
- baseToolsProvided: !!baseTools,
184
- baseToolCount: baseTools ? Object.keys(baseTools).length : 0,
185
- finalToolCount: tools ? Object.keys(tools).length : 0,
186
- toolNames: tools ? Object.keys(tools).slice(0, 10) : [],
187
- disableTools: options.disableTools,
188
- supportsTools: this.supportsTools(),
189
- externalToolsCount: options.tools
190
- ? Object.keys(options.tools).length
191
- : 0,
192
- });
193
- if (tools && Object.keys(tools).length > 0) {
194
- logger.debug("BaseProvider Generate - First 5 Tools Detail", {
195
- provider: this.providerName,
196
- tools: Object.keys(tools)
197
- .slice(0, 5)
198
- .map((name) => ({
199
- name,
200
- description: tools[name]?.description?.substring(0, 100),
201
- })),
202
- });
201
+ async prepareGenerationContext(options) {
202
+ const shouldUseTools = !options.disableTools && this.supportsTools();
203
+ const baseTools = shouldUseTools ? await this.getAllTools() : {};
204
+ const tools = shouldUseTools
205
+ ? {
206
+ ...baseTools,
207
+ ...(options.tools || {}),
203
208
  }
204
- logger.debug(`[BaseProvider.generate] Tools for ${this.providerName}:`, {
205
- directTools: getKeyCount(baseTools),
206
- directToolNames: getKeysAsString(baseTools),
207
- externalTools: getKeyCount(options.tools || {}),
208
- externalToolNames: getKeysAsString(options.tools || {}),
209
- totalTools: getKeyCount(tools),
210
- totalToolNames: getKeysAsString(tools),
211
- });
212
- const model = await this.getAISDKModelWithMiddleware(options);
213
- // Build proper message array with conversation history
214
- // Check if this is a multimodal request (images or content present)
215
- let messages;
216
- // Type guard to check if options has multimodal input
217
- const hasMultimodalInput = (opts) => {
218
- const input = opts.input;
219
- const hasImages = !!input?.images?.length;
220
- const hasContent = !!input?.content?.length;
221
- return hasImages || hasContent;
209
+ : {};
210
+ logger.debug(`Final tools prepared for AI`, {
211
+ provider: this.providerName,
212
+ directTools: getKeyCount(baseTools),
213
+ directToolNames: getKeysAsString(baseTools),
214
+ externalTools: getKeyCount(options.tools || {}),
215
+ externalToolNames: getKeysAsString(options.tools || {}),
216
+ totalTools: getKeyCount(tools),
217
+ totalToolNames: getKeysAsString(tools),
218
+ shouldUseTools,
219
+ timestamp: Date.now(),
220
+ });
221
+ const model = await this.getAISDKModelWithMiddleware(options);
222
+ return { tools, model };
223
+ }
224
+ /**
225
+ * Build messages array for generation
226
+ */
227
+ async buildMessages(options) {
228
+ const hasMultimodalInput = (opts) => {
229
+ const input = opts.input;
230
+ const hasImages = !!input?.images?.length;
231
+ const hasContent = !!input?.content?.length;
232
+ return hasImages || hasContent;
233
+ };
234
+ let messages;
235
+ if (hasMultimodalInput(options)) {
236
+ if (process.env.NEUROLINK_DEBUG === "true") {
237
+ logger.debug("Detected multimodal input, using multimodal message builder");
238
+ }
239
+ const input = options.input;
240
+ const multimodalOptions = {
241
+ input: {
242
+ text: options.prompt || options.input?.text || "",
243
+ images: input?.images,
244
+ content: input?.content,
245
+ },
246
+ provider: options.provider,
247
+ model: options.model,
248
+ temperature: options.temperature,
249
+ maxTokens: options.maxTokens,
250
+ systemPrompt: options.systemPrompt,
251
+ enableAnalytics: options.enableAnalytics,
252
+ enableEvaluation: options.enableEvaluation,
253
+ context: options.context,
222
254
  };
223
- if (hasMultimodalInput(options)) {
224
- if (process.env.NEUROLINK_DEBUG === "true") {
225
- logger.info("🖼️ [MULTIMODAL-REQUEST] Detected multimodal input, using multimodal message builder");
226
- }
227
- // This is a multimodal request - use multimodal message builder
228
- // Convert TextGenerationOptions to GenerateOptions format for multimodal processing
229
- const input = options.input;
230
- const multimodalOptions = {
231
- input: {
232
- text: options.prompt || options.input?.text || "",
233
- images: input?.images,
234
- content: input?.content,
235
- },
236
- provider: options.provider,
237
- model: options.model,
238
- temperature: options.temperature,
239
- maxTokens: options.maxTokens,
240
- systemPrompt: options.systemPrompt,
241
- enableAnalytics: options.enableAnalytics,
242
- enableEvaluation: options.enableEvaluation,
243
- context: options.context,
255
+ messages = await buildMultimodalMessagesArray(multimodalOptions, this.providerName, this.modelName);
256
+ }
257
+ else {
258
+ if (process.env.NEUROLINK_DEBUG === "true") {
259
+ logger.debug("No multimodal input detected, using standard message builder");
260
+ }
261
+ messages = buildMessagesArray(options);
262
+ }
263
+ // Convert messages to Vercel AI SDK format
264
+ return messages.map((msg) => {
265
+ if (typeof msg.content === "string") {
266
+ return {
267
+ role: msg.role,
268
+ content: msg.content,
244
269
  };
245
- messages = await buildMultimodalMessagesArray(multimodalOptions, this.providerName, this.modelName);
246
270
  }
247
271
  else {
248
- if (process.env.NEUROLINK_DEBUG === "true") {
249
- logger.info("📝 [TEXT-ONLY-REQUEST] No multimodal input detected, using standard message builder");
250
- }
251
- // Standard text-only request
252
- messages = buildMessagesArray(options);
272
+ return {
273
+ role: msg.role,
274
+ content: msg.content.map((item) => {
275
+ if (item.type === "text") {
276
+ return { type: "text", text: item.text || "" };
277
+ }
278
+ else if (item.type === "image") {
279
+ return { type: "image", image: item.image || "" };
280
+ }
281
+ return item;
282
+ }),
283
+ };
253
284
  }
254
- // Convert messages to Vercel AI SDK format
255
- const aiSDKMessages = messages.map((msg) => {
256
- if (typeof msg.content === "string") {
257
- // Simple text content
258
- return {
259
- role: msg.role,
260
- content: msg.content,
261
- };
262
- }
263
- else {
264
- // Multimodal content array - convert to Vercel AI SDK format
265
- // The Vercel AI SDK expects content to be in a specific format
266
- return {
267
- role: msg.role,
268
- content: msg.content.map((item) => {
269
- if (item.type === "text") {
270
- return { type: "text", text: item.text || "" };
271
- }
272
- else if (item.type === "image") {
273
- return { type: "image", image: item.image || "" };
274
- }
275
- return item;
276
- }),
277
- };
278
- }
285
+ });
286
+ }
287
+ /**
288
+ * Execute the generation with AI SDK
289
+ */
290
+ async executeGeneration(model, messages, tools, options) {
291
+ const shouldUseTools = !options.disableTools && this.supportsTools();
292
+ return await generateText({
293
+ model,
294
+ messages,
295
+ tools,
296
+ maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
297
+ toolChoice: shouldUseTools ? "auto" : "none",
298
+ temperature: options.temperature,
299
+ maxTokens: options.maxTokens,
300
+ });
301
+ }
302
+ /**
303
+ * Log generation completion information
304
+ */
305
+ logGenerationComplete(generateResult) {
306
+ logger.debug(`generateText completed`, {
307
+ provider: this.providerName,
308
+ model: this.modelName,
309
+ responseLength: generateResult.text?.length || 0,
310
+ toolResultsCount: generateResult.toolResults?.length || 0,
311
+ finishReason: generateResult.finishReason,
312
+ usage: generateResult.usage,
313
+ timestamp: Date.now(),
314
+ });
315
+ }
316
+ /**
317
+ * Record performance metrics
318
+ */
319
+ async recordPerformanceMetrics(usage, responseTime) {
320
+ try {
321
+ const actualCost = await this.calculateActualCost(usage || { promptTokens: 0, completionTokens: 0, totalTokens: 0 });
322
+ recordProviderPerformanceFromMetrics(this.providerName, {
323
+ responseTime,
324
+ tokensGenerated: usage?.totalTokens || 0,
325
+ cost: actualCost,
326
+ success: true,
279
327
  });
280
- const generateResult = await generateText({
281
- model,
282
- messages: aiSDKMessages,
283
- tools,
284
- maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
285
- toolChoice: shouldUseTools ? "auto" : "none",
286
- temperature: options.temperature,
287
- maxTokens: options.maxTokens, // No default limit - unlimited unless specified
328
+ const optimizedProvider = getPerformanceOptimizedProvider("speed");
329
+ logger.debug(`🚀 Performance recorded for ${this.providerName}:`, {
330
+ responseTime: `${responseTime}ms`,
331
+ tokens: usage?.totalTokens || 0,
332
+ estimatedCost: `$${actualCost.toFixed(6)}`,
333
+ recommendedSpeedProvider: optimizedProvider?.provider || "none",
288
334
  });
289
- const responseTime = Date.now() - startTime;
290
- // Extract properties from generateResult
291
- const usage = generateResult.usage;
292
- const toolCalls = generateResult.toolCalls;
293
- const toolResults = generateResult.toolResults;
294
- try {
295
- const actualCost = await this.calculateActualCost(usage || { promptTokens: 0, completionTokens: 0, totalTokens: 0 });
296
- recordProviderPerformanceFromMetrics(this.providerName, {
297
- responseTime,
298
- tokensGenerated: usage?.totalTokens || 0,
299
- cost: actualCost,
300
- success: true,
301
- });
302
- // Show what the system learned (updated to include cost)
303
- const optimizedProvider = getPerformanceOptimizedProvider("speed");
304
- logger.debug(`🚀 Performance recorded for ${this.providerName}:`, {
305
- responseTime: `${responseTime}ms`,
306
- tokens: usage?.totalTokens || 0,
307
- estimatedCost: `$${actualCost.toFixed(6)}`,
308
- recommendedSpeedProvider: optimizedProvider?.provider || "none",
309
- });
310
- }
311
- catch (perfError) {
312
- logger.warn("⚠️ Performance recording failed:", perfError);
313
- }
314
- // Extract tool names from tool calls for tracking
315
- // AI SDK puts tool calls in steps array for multi-step generation
316
- const toolsUsed = [];
317
- // First check direct tool calls (fallback)
318
- if (toolCalls && toolCalls.length > 0) {
319
- toolsUsed.push(...toolCalls.map((tc) => {
320
- return tc.toolName || tc.name || "unknown";
321
- }));
322
- }
323
- // Then check steps for tool calls (primary source for multi-step)
324
- if (generateResult.steps &&
325
- Array.isArray(generateResult.steps)) {
326
- for (const step of generateResult
327
- .steps || []) {
328
- if (step?.toolCalls && Array.isArray(step.toolCalls)) {
329
- toolsUsed.push(...step.toolCalls.map((tc) => {
330
- return tc.toolName || tc.name || "unknown";
331
- }));
332
- }
333
- }
334
- }
335
- // Remove duplicates
336
- const uniqueToolsUsed = [...new Set(toolsUsed)];
337
- // ✅ Extract tool executions from AI SDK result
338
- const toolExecutions = [];
339
- // Create a map of tool calls to their arguments for matching with results
335
+ }
336
+ catch (perfError) {
337
+ logger.warn("⚠️ Performance recording failed:", perfError);
338
+ }
339
+ }
340
+ /**
341
+ * Extract tool information from generation result
342
+ */
343
+ extractToolInformation(generateResult) {
344
+ const toolsUsed = [];
345
+ const toolExecutions = [];
346
+ // Extract tool names from tool calls
347
+ if (generateResult.toolCalls && generateResult.toolCalls.length > 0) {
348
+ toolsUsed.push(...generateResult.toolCalls.map((tc) => {
349
+ return tc.toolName || tc.name || "unknown";
350
+ }));
351
+ }
352
+ // Extract from steps
353
+ if (generateResult.steps &&
354
+ Array.isArray(generateResult.steps)) {
340
355
  const toolCallArgsMap = new Map();
341
- // Extract tool executions from AI SDK result steps
342
- if (generateResult.steps &&
343
- Array.isArray(generateResult.steps)) {
344
- for (const step of generateResult
345
- .steps || []) {
346
- // First, collect tool calls and their arguments
347
- if (step?.toolCalls && Array.isArray(step.toolCalls)) {
348
- for (const toolCall of step.toolCalls) {
349
- const tcRecord = toolCall;
350
- const toolName = tcRecord.toolName ||
351
- tcRecord.name ||
352
- "unknown";
353
- const toolId = tcRecord.toolCallId ||
354
- tcRecord.id ||
355
- toolName;
356
- // Extract arguments from tool call
357
- let callArgs = {};
358
- if (tcRecord.args) {
359
- callArgs = tcRecord.args;
360
- }
361
- else if (tcRecord.arguments) {
362
- callArgs = tcRecord.arguments;
363
- }
364
- else if (tcRecord.parameters) {
365
- callArgs = tcRecord.parameters;
366
- }
367
- toolCallArgsMap.set(toolId, callArgs);
368
- toolCallArgsMap.set(toolName, callArgs); // Also map by name as fallback
356
+ for (const step of generateResult
357
+ .steps || []) {
358
+ // Collect tool calls and their arguments
359
+ if (step?.toolCalls && Array.isArray(step.toolCalls)) {
360
+ for (const toolCall of step.toolCalls) {
361
+ const tcRecord = toolCall;
362
+ const toolName = tcRecord.toolName ||
363
+ tcRecord.name ||
364
+ "unknown";
365
+ const toolId = tcRecord.toolCallId ||
366
+ tcRecord.id ||
367
+ toolName;
368
+ toolsUsed.push(toolName);
369
+ let callArgs = {};
370
+ if (tcRecord.args) {
371
+ callArgs = tcRecord.args;
369
372
  }
373
+ else if (tcRecord.arguments) {
374
+ callArgs = tcRecord.arguments;
375
+ }
376
+ else if (tcRecord.parameters) {
377
+ callArgs = tcRecord.parameters;
378
+ }
379
+ toolCallArgsMap.set(toolId, callArgs);
380
+ toolCallArgsMap.set(toolName, callArgs);
370
381
  }
371
- // Then, process tool results and match with call arguments
372
- if (step?.toolResults && Array.isArray(step.toolResults)) {
373
- for (const toolResult of step.toolResults) {
374
- const trRecord = toolResult;
375
- const toolName = trRecord.toolName || "unknown";
376
- const toolId = trRecord.toolCallId || trRecord.id;
377
- // Try to get arguments from the tool result first
378
- let toolArgs = {};
379
- if (trRecord.args) {
380
- toolArgs = trRecord.args;
381
- }
382
- else if (trRecord.arguments) {
383
- toolArgs = trRecord.arguments;
384
- }
385
- else if (trRecord.parameters) {
386
- toolArgs = trRecord.parameters;
387
- }
388
- else if (trRecord.input) {
389
- toolArgs = trRecord.input;
390
- }
391
- else {
392
- // Fallback: get arguments from the corresponding tool call
393
- toolArgs = toolCallArgsMap.get(toolId || toolName) || {};
394
- }
395
- toolExecutions.push({
396
- name: toolName,
397
- input: toolArgs,
398
- output: trRecord.result || "success",
399
- });
382
+ }
383
+ // Process tool results
384
+ if (step?.toolResults && Array.isArray(step.toolResults)) {
385
+ for (const toolResult of step.toolResults) {
386
+ const trRecord = toolResult;
387
+ const toolName = trRecord.toolName || "unknown";
388
+ const toolId = trRecord.toolCallId || trRecord.id;
389
+ let toolArgs = {};
390
+ if (trRecord.args) {
391
+ toolArgs = trRecord.args;
392
+ }
393
+ else if (trRecord.arguments) {
394
+ toolArgs = trRecord.arguments;
395
+ }
396
+ else if (trRecord.parameters) {
397
+ toolArgs = trRecord.parameters;
398
+ }
399
+ else if (trRecord.input) {
400
+ toolArgs = trRecord.input;
400
401
  }
402
+ else {
403
+ toolArgs = toolCallArgsMap.get(toolId || toolName) || {};
404
+ }
405
+ toolExecutions.push({
406
+ name: toolName,
407
+ input: toolArgs,
408
+ output: trRecord.result || "success",
409
+ });
401
410
  }
402
411
  }
403
412
  }
404
- // Format the result with tool executions included
405
- const enhancedResult = {
406
- content: generateResult.text,
407
- usage: {
408
- input: generateResult.usage?.promptTokens || 0,
409
- output: generateResult.usage?.completionTokens || 0,
410
- total: generateResult.usage?.totalTokens || 0,
411
- },
412
- provider: this.providerName,
413
- model: this.modelName,
414
- toolCalls: toolCalls
415
- ? toolCalls.map((tc) => ({
416
- toolCallId: tc.toolCallId || "unknown",
417
- toolName: tc.toolName || "unknown",
418
- args: tc.args || {},
419
- }))
420
- : [],
421
- toolResults: toolResults || [],
422
- toolsUsed: uniqueToolsUsed,
423
- toolExecutions, // Add extracted tool executions
424
- availableTools: Object.keys(tools).map((name) => {
425
- const tool = tools[name];
426
- return {
427
- name,
428
- description: tool.description || "No description available",
429
- parameters: tool.parameters || {},
430
- server: tool.serverId || "direct",
431
- };
432
- }),
433
- };
434
- // Enhanced result with analytics and evaluation
413
+ }
414
+ return { toolsUsed: [...new Set(toolsUsed)], toolExecutions };
415
+ }
416
+ /**
417
+ * Format the enhanced result
418
+ */
419
+ formatEnhancedResult(generateResult, tools, toolsUsed, toolExecutions) {
420
+ return {
421
+ content: generateResult.text,
422
+ usage: {
423
+ input: generateResult.usage?.promptTokens || 0,
424
+ output: generateResult.usage?.completionTokens || 0,
425
+ total: generateResult.usage?.totalTokens || 0,
426
+ },
427
+ provider: this.providerName,
428
+ model: this.modelName,
429
+ toolCalls: generateResult.toolCalls
430
+ ? generateResult.toolCalls.map((tc) => ({
431
+ toolCallId: tc.toolCallId || "unknown",
432
+ toolName: tc.toolName || "unknown",
433
+ args: tc.args || {},
434
+ }))
435
+ : [],
436
+ toolResults: generateResult.toolResults || [],
437
+ toolsUsed,
438
+ toolExecutions,
439
+ availableTools: Object.keys(tools).map((name) => {
440
+ const tool = tools[name];
441
+ return {
442
+ name,
443
+ description: tool.description || "No description available",
444
+ parameters: tool.parameters || {},
445
+ server: tool.serverId || "direct",
446
+ };
447
+ }),
448
+ };
449
+ }
450
+ /**
451
+ * Analyze AI response structure and log detailed debugging information
452
+ * Extracted from generate method to reduce complexity
453
+ */
454
+ analyzeAIResponse(result) {
455
+ // 🔧 NEUROLINK RAW AI RESPONSE TRACE: Log everything about the raw AI response before parameter extraction
456
+ logger.debug("NeuroLink Raw AI Response Analysis", {
457
+ provider: this.providerName,
458
+ model: this.modelName,
459
+ responseTextLength: result.text?.length || 0,
460
+ responsePreview: result.text?.substring(0, 500) + "...",
461
+ finishReason: result.finishReason,
462
+ usage: result.usage,
463
+ });
464
+ // 🔧 NEUROLINK TOOL CALLS ANALYSIS: Analyze raw tool calls structure
465
+ const toolCallsAnalysis = {
466
+ hasToolCalls: !!result.toolCalls,
467
+ toolCallsLength: result.toolCalls?.length || 0,
468
+ toolCalls: result.toolCalls?.map((toolCall, index) => {
469
+ const tcRecord = toolCall;
470
+ const toolName = tcRecord.toolName || tcRecord.name || "unknown";
471
+ const isTargetTool = toolName.toString().includes("SuccessRateSRByTime") ||
472
+ toolName.toString().includes("juspay-analytics");
473
+ return {
474
+ index: index + 1,
475
+ toolName,
476
+ toolId: tcRecord.toolCallId || tcRecord.id || "none",
477
+ hasArgs: !!tcRecord.args,
478
+ argsKeys: tcRecord.args && typeof tcRecord.args === "object"
479
+ ? Object.keys(tcRecord.args)
480
+ : [],
481
+ isTargetTool,
482
+ ...(isTargetTool && {
483
+ targetToolDetails: {
484
+ argsType: typeof tcRecord.args,
485
+ startTime: tcRecord.args?.startTime ||
486
+ "MISSING",
487
+ endTime: tcRecord.args?.endTime ||
488
+ "MISSING",
489
+ },
490
+ }),
491
+ };
492
+ }) || [],
493
+ };
494
+ logger.debug("Tool Calls Analysis", toolCallsAnalysis);
495
+ // 🔧 NEUROLINK STEPS ANALYSIS: Analyze steps structure (AI SDK multi-step format)
496
+ const steps = result.steps;
497
+ const stepsAnalysis = {
498
+ hasSteps: !!steps,
499
+ stepsLength: Array.isArray(steps) ? steps.length : 0,
500
+ steps: Array.isArray(steps)
501
+ ? steps.map((step, stepIndex) => ({
502
+ stepIndex: stepIndex + 1,
503
+ hasToolCalls: !!step.toolCalls,
504
+ toolCallsLength: step.toolCalls?.length || 0,
505
+ hasToolResults: !!step.toolResults,
506
+ toolResultsLength: step.toolResults?.length || 0,
507
+ targetToolsInStep: step.toolCalls
508
+ ?.filter((tc) => {
509
+ const toolName = tc.toolName || tc.name || "unknown";
510
+ return (toolName.toString().includes("SuccessRateSRByTime") ||
511
+ toolName.toString().includes("juspay-analytics"));
512
+ })
513
+ .map((tc) => ({
514
+ toolName: tc.toolName || tc.name,
515
+ hasArgs: !!tc.args,
516
+ argsKeys: tc.args && typeof tc.args === "object"
517
+ ? Object.keys(tc.args)
518
+ : [],
519
+ startTime: tc.args?.startTime,
520
+ endTime: tc.args?.endTime,
521
+ })) || [],
522
+ }))
523
+ : [],
524
+ };
525
+ logger.debug("[BaseProvider] Steps Analysis", stepsAnalysis);
526
+ // 🔧 NEUROLINK TOOL RESULTS ANALYSIS: Analyze top-level tool results
527
+ const toolResultsAnalysis = {
528
+ hasToolResults: !!result.toolResults,
529
+ toolResultsLength: result.toolResults?.length || 0,
530
+ toolResults: result.toolResults?.map((toolResult, index) => ({
531
+ index: index + 1,
532
+ toolName: toolResult.toolName || "unknown",
533
+ hasResult: !!toolResult.result,
534
+ hasError: !!toolResult.error,
535
+ })) || [],
536
+ };
537
+ logger.debug("[BaseProvider] Tool Results Analysis", toolResultsAnalysis);
538
+ logger.debug("[BaseProvider] NeuroLink Raw AI Response Analysis Complete");
539
+ }
540
+ /**
541
+ * Text generation method - implements AIProvider interface
542
+ * Tools are always available unless explicitly disabled
543
+ * IMPLEMENTATION NOTE: Uses streamText() under the hood and accumulates results
544
+ * for consistency and better performance
545
+ */
546
+ async generate(optionsOrPrompt, _analysisSchema) {
547
+ const options = this.normalizeTextOptions(optionsOrPrompt);
548
+ this.validateOptions(options);
549
+ const startTime = Date.now();
550
+ try {
551
+ const { tools, model } = await this.prepareGenerationContext(options);
552
+ const messages = await this.buildMessages(options);
553
+ const generateResult = await this.executeGeneration(model, messages, tools, options);
554
+ this.analyzeAIResponse(generateResult);
555
+ this.logGenerationComplete(generateResult);
556
+ const responseTime = Date.now() - startTime;
557
+ await this.recordPerformanceMetrics(generateResult.usage, responseTime);
558
+ const { toolsUsed, toolExecutions } = this.extractToolInformation(generateResult);
559
+ const enhancedResult = this.formatEnhancedResult(generateResult, tools, toolsUsed, toolExecutions);
435
560
  return await this.enhanceResult(enhancedResult, options, startTime);
436
561
  }
437
562
  catch (error) {
@@ -451,11 +576,12 @@ export class BaseProvider {
451
576
  * Ensures existing scripts using createAIProvider().generateText() continue to work
452
577
  */
453
578
  async generateText(options) {
454
- // Validate required parameters for backward compatibility
455
- if (!options.prompt ||
456
- typeof options.prompt !== "string" ||
457
- options.prompt.trim() === "") {
458
- throw new Error("GenerateText options must include prompt as a non-empty string");
579
+ // Validate required parameters for backward compatibility - support both prompt and input.text
580
+ const promptText = options.prompt || options.input?.text;
581
+ if (!promptText ||
582
+ typeof promptText !== "string" ||
583
+ promptText.trim() === "") {
584
+ throw new Error("GenerateText options must include prompt or input.text as a non-empty string");
459
585
  }
460
586
  // Call the main generate method
461
587
  const result = await this.generate(options);
@@ -567,6 +693,7 @@ export class BaseProvider {
567
693
  }
568
694
  /**
569
695
  * Convert tool execution result from MCP format to standard format
696
+ * Handles tool failures gracefully to prevent stream termination
570
697
  */
571
698
  async convertToolResult(result) {
572
699
  // Handle MCP-style results
@@ -576,10 +703,24 @@ export class BaseProvider {
576
703
  return mcpResult.data;
577
704
  }
578
705
  else {
706
+ // Instead of throwing, return a structured error result
707
+ // This prevents tool failures from terminating streams
579
708
  const errorMsg = typeof mcpResult.error === "string"
580
709
  ? mcpResult.error
581
710
  : "Tool execution failed";
582
- throw new Error(errorMsg);
711
+ // Log the error for debugging but don't throw
712
+ logger.warn(`Tool execution failed: ${errorMsg}`);
713
+ // Return error as structured data that can be processed by the AI
714
+ return {
715
+ isError: true,
716
+ error: errorMsg,
717
+ content: [
718
+ {
719
+ type: "text",
720
+ text: `Tool execution failed: ${errorMsg}`,
721
+ },
722
+ ],
723
+ };
583
724
  }
584
725
  }
585
726
  return result;
@@ -593,14 +734,106 @@ export class BaseProvider {
593
734
  // Convert to AI SDK tool format
594
735
  const { tool: createAISDKTool } = await import("ai");
595
736
  const { z } = await import("zod");
737
+ let finalSchema;
738
+ const schemaSource = toolInfo.parameters || toolInfo.inputSchema;
739
+ if (this.isZodSchema(schemaSource)) {
740
+ finalSchema = schemaSource;
741
+ logger.debug(`[BaseProvider] ${toolName}: Using existing Zod schema from ${toolInfo.parameters ? "parameters" : "inputSchema"} field`);
742
+ }
743
+ else if (schemaSource && typeof schemaSource === "object") {
744
+ logger.debug(`[BaseProvider] ${toolName}: Converting JSON Schema to Zod from ${toolInfo.parameters ? "parameters" : "inputSchema"} field`);
745
+ finalSchema = convertJsonSchemaToZod(schemaSource);
746
+ }
747
+ else {
748
+ finalSchema = z.object({});
749
+ logger.debug(`[BaseProvider] ${toolName}: No schema found, using empty object`);
750
+ }
596
751
  return createAISDKTool({
597
752
  description: toolInfo.description || `Tool ${toolName}`,
598
- parameters: this.isZodSchema(toolInfo.parameters)
599
- ? toolInfo.parameters
600
- : z.object({}),
753
+ parameters: finalSchema,
601
754
  execute: async (params) => {
602
- const result = await toolInfo.execute(params);
603
- return await this.convertToolResult(result);
755
+ const startTime = Date.now();
756
+ let executionId;
757
+ if (this.neurolink?.emitToolStart) {
758
+ executionId = this.neurolink.emitToolStart(toolName, params, startTime);
759
+ logger.debug(`Custom tool:start emitted via NeuroLink for ${toolName}`, {
760
+ toolName,
761
+ executionId,
762
+ input: params,
763
+ hasNativeEmission: true,
764
+ });
765
+ }
766
+ try {
767
+ // 🔧 PARAMETER FLOW TRACING - Before NeuroLink executeTool call
768
+ logger.debug(`About to call NeuroLink executeTool for ${toolName}`, {
769
+ toolName,
770
+ paramsBeforeExecution: {
771
+ type: typeof params,
772
+ isNull: params === null,
773
+ isUndefined: params === undefined,
774
+ isEmpty: params &&
775
+ typeof params === "object" &&
776
+ Object.keys(params).length === 0,
777
+ keys: params && typeof params === "object"
778
+ ? Object.keys(params)
779
+ : "NOT_OBJECT",
780
+ keysLength: params && typeof params === "object"
781
+ ? Object.keys(params).length
782
+ : 0,
783
+ },
784
+ executorInfo: {
785
+ hasExecutor: typeof toolInfo.execute === "function",
786
+ executorType: typeof toolInfo.execute,
787
+ },
788
+ timestamp: Date.now(),
789
+ phase: "BEFORE_NEUROLINK_EXECUTE",
790
+ });
791
+ const result = await toolInfo.execute(params);
792
+ // 🔧 PARAMETER FLOW TRACING - After NeuroLink executeTool call
793
+ logger.debug(`NeuroLink executeTool completed for ${toolName}`, {
794
+ toolName,
795
+ resultInfo: {
796
+ type: typeof result,
797
+ isNull: result === null,
798
+ isUndefined: result === undefined,
799
+ hasError: result && typeof result === "object" && "error" in result,
800
+ },
801
+ timestamp: Date.now(),
802
+ phase: "AFTER_NEUROLINK_EXECUTE",
803
+ });
804
+ const convertedResult = await this.convertToolResult(result);
805
+ const endTime = Date.now();
806
+ // 🔧 NATIVE NEUROLINK EVENT EMISSION - Tool End (Success)
807
+ if (this.neurolink?.emitToolEnd) {
808
+ this.neurolink.emitToolEnd(toolName, convertedResult, undefined, // no error
809
+ startTime, endTime, executionId);
810
+ logger.debug(`Custom tool:end emitted via NeuroLink for ${toolName}`, {
811
+ toolName,
812
+ executionId,
813
+ duration: endTime - startTime,
814
+ hasResult: convertedResult !== undefined,
815
+ hasNativeEmission: true,
816
+ });
817
+ }
818
+ return convertedResult;
819
+ }
820
+ catch (error) {
821
+ const endTime = Date.now();
822
+ const errorMsg = error instanceof Error ? error.message : String(error);
823
+ // 🔧 NATIVE NEUROLINK EVENT EMISSION - Tool End (Error)
824
+ if (this.neurolink?.emitToolEnd) {
825
+ this.neurolink.emitToolEnd(toolName, undefined, // no result
826
+ errorMsg, startTime, endTime, executionId);
827
+ logger.info(`Custom tool:end error emitted via NeuroLink for ${toolName}`, {
828
+ toolName,
829
+ executionId,
830
+ duration: endTime - startTime,
831
+ error: errorMsg,
832
+ hasNativeEmission: true,
833
+ });
834
+ }
835
+ throw error;
836
+ }
604
837
  },
605
838
  });
606
839
  }
@@ -609,6 +842,85 @@ export class BaseProvider {
609
842
  return null;
610
843
  }
611
844
  }
845
+ /**
846
+ * Process direct tools with event emission wrapping
847
+ */
848
+ async processDirectTools(tools) {
849
+ if (!this.directTools || Object.keys(this.directTools).length === 0) {
850
+ return;
851
+ }
852
+ logger.debug(`Loading ${Object.keys(this.directTools).length} direct tools with event emission`);
853
+ for (const [toolName, directTool] of Object.entries(this.directTools)) {
854
+ logger.debug(`Processing direct tool: ${toolName}`, {
855
+ toolName,
856
+ hasExecute: directTool &&
857
+ typeof directTool === "object" &&
858
+ "execute" in directTool,
859
+ hasDescription: directTool &&
860
+ typeof directTool === "object" &&
861
+ "description" in directTool,
862
+ });
863
+ // Wrap the direct tool's execute function with event emission
864
+ if (directTool &&
865
+ typeof directTool === "object" &&
866
+ "execute" in directTool) {
867
+ const originalExecute = directTool.execute;
868
+ // Create a new tool with wrapped execute function
869
+ tools[toolName] = {
870
+ ...directTool,
871
+ execute: async (params) => {
872
+ // 🔧 EMIT TOOL START EVENT - Bedrock-compatible format
873
+ if (this.neurolink?.getEventEmitter) {
874
+ const emitter = this.neurolink.getEventEmitter();
875
+ emitter.emit("tool:start", { tool: toolName, input: params });
876
+ logger.debug(`Direct tool:start event emitted for ${toolName}`, {
877
+ toolName,
878
+ input: params,
879
+ hasEmitter: !!emitter,
880
+ });
881
+ }
882
+ try {
883
+ const result = await originalExecute(params);
884
+ // 🔧 EMIT TOOL END EVENT - Bedrock-compatible format
885
+ if (this.neurolink?.getEventEmitter) {
886
+ const emitter = this.neurolink.getEventEmitter();
887
+ emitter.emit("tool:end", { tool: toolName, result });
888
+ logger.debug(`Direct tool:end event emitted for ${toolName}`, {
889
+ toolName,
890
+ result: typeof result === "string"
891
+ ? result.substring(0, 100)
892
+ : JSON.stringify(result).substring(0, 100),
893
+ hasEmitter: !!emitter,
894
+ });
895
+ }
896
+ return result;
897
+ }
898
+ catch (error) {
899
+ // 🔧 EMIT TOOL END EVENT FOR ERROR - Bedrock-compatible format
900
+ if (this.neurolink?.getEventEmitter) {
901
+ const emitter = this.neurolink.getEventEmitter();
902
+ const errorMsg = error instanceof Error ? error.message : String(error);
903
+ emitter.emit("tool:end", { tool: toolName, error: errorMsg });
904
+ logger.debug(`Direct tool:end error event emitted for ${toolName}`, {
905
+ toolName,
906
+ error: errorMsg,
907
+ hasEmitter: !!emitter,
908
+ });
909
+ }
910
+ throw error;
911
+ }
912
+ },
913
+ };
914
+ }
915
+ else {
916
+ // Fallback: include tool as-is if it doesn't have execute function
917
+ tools[toolName] = directTool;
918
+ }
919
+ }
920
+ logger.debug(`Direct tools processing complete`, {
921
+ directToolsProcessed: Object.keys(this.directTools).length,
922
+ });
923
+ }
612
924
  /**
613
925
  * Process custom tools from setupToolExecutor
614
926
  */
@@ -618,7 +930,7 @@ export class BaseProvider {
618
930
  }
619
931
  logger.debug(`[BaseProvider] Loading ${this.customTools.size} custom tools from setupToolExecutor`);
620
932
  for (const [toolName, toolDef] of this.customTools.entries()) {
621
- logger.debug(`[BaseProvider] Processing custom tool: ${toolName}`, {
933
+ logger.debug(`Processing custom tool: ${toolName}`, {
622
934
  toolDef: typeof toolDef,
623
935
  hasExecute: toolDef && typeof toolDef === "object" && "execute" in toolDef,
624
936
  hasName: toolDef && typeof toolDef === "object" && "name" in toolDef,
@@ -647,16 +959,101 @@ export class BaseProvider {
647
959
  const { tool: createAISDKTool } = await import("ai");
648
960
  return createAISDKTool({
649
961
  description: tool.description || `External MCP tool ${tool.name}`,
650
- parameters: await this.convertMCPSchemaToZod(tool.inputSchema),
962
+ parameters: this.createPermissiveZodSchema(),
651
963
  execute: async (params) => {
652
- logger.debug(`[BaseProvider] Executing external MCP tool: ${tool.name}`, { params });
964
+ logger.debug(`Executing external MCP tool: ${tool.name}`, {
965
+ toolName: tool.name,
966
+ serverId: tool.serverId,
967
+ params: JSON.stringify(params),
968
+ paramsType: typeof params,
969
+ hasNeurolink: !!this.neurolink,
970
+ hasExecuteFunction: this.neurolink &&
971
+ typeof this.neurolink.executeExternalMCPTool === "function",
972
+ timestamp: Date.now(),
973
+ });
974
+ // 🔧 EMIT TOOL START EVENT - Bedrock-compatible format
975
+ if (this.neurolink?.getEventEmitter) {
976
+ const emitter = this.neurolink.getEventEmitter();
977
+ emitter.emit("tool:start", { tool: tool.name, input: params });
978
+ logger.debug(`tool:start event emitted for ${tool.name}`, {
979
+ toolName: tool.name,
980
+ input: params,
981
+ hasEmitter: !!emitter,
982
+ });
983
+ }
653
984
  // Execute via NeuroLink's direct tool execution
654
985
  if (this.neurolink &&
655
986
  typeof this.neurolink.executeExternalMCPTool === "function") {
656
- return await this.neurolink.executeExternalMCPTool(tool.serverId || "unknown", tool.name, params);
987
+ try {
988
+ const result = await this.neurolink.executeExternalMCPTool(tool.serverId || "unknown", tool.name, params);
989
+ // 🔧 EMIT TOOL END EVENT - Bedrock-compatible format
990
+ if (this.neurolink?.getEventEmitter) {
991
+ const emitter = this.neurolink.getEventEmitter();
992
+ emitter.emit("tool:end", { tool: tool.name, result });
993
+ logger.debug(`tool:end event emitted for ${tool.name}`, {
994
+ toolName: tool.name,
995
+ result: typeof result === "string"
996
+ ? result.substring(0, 100)
997
+ : JSON.stringify(result).substring(0, 100),
998
+ hasEmitter: !!emitter,
999
+ });
1000
+ }
1001
+ logger.debug(`External MCP tool executed: ${tool.name}`, {
1002
+ toolName: tool.name,
1003
+ result: typeof result === "string"
1004
+ ? result.substring(0, 200)
1005
+ : JSON.stringify(result).substring(0, 200),
1006
+ resultType: typeof result,
1007
+ timestamp: Date.now(),
1008
+ });
1009
+ return result;
1010
+ }
1011
+ catch (mcpError) {
1012
+ // 🔧 EMIT TOOL END EVENT FOR ERROR - Bedrock-compatible format
1013
+ if (this.neurolink?.getEventEmitter) {
1014
+ const emitter = this.neurolink.getEventEmitter();
1015
+ const errorMsg = mcpError instanceof Error
1016
+ ? mcpError.message
1017
+ : String(mcpError);
1018
+ emitter.emit("tool:end", { tool: tool.name, error: errorMsg });
1019
+ logger.debug(`tool:end error event emitted for ${tool.name}`, {
1020
+ toolName: tool.name,
1021
+ error: errorMsg,
1022
+ hasEmitter: !!emitter,
1023
+ });
1024
+ }
1025
+ logger.error(`External MCP tool failed: ${tool.name}`, {
1026
+ toolName: tool.name,
1027
+ serverId: tool.serverId,
1028
+ error: mcpError instanceof Error
1029
+ ? mcpError.message
1030
+ : String(mcpError),
1031
+ errorStack: mcpError instanceof Error ? mcpError.stack : undefined,
1032
+ params: JSON.stringify(params),
1033
+ timestamp: Date.now(),
1034
+ });
1035
+ throw mcpError;
1036
+ }
657
1037
  }
658
1038
  else {
659
- throw new Error(`Cannot execute external MCP tool: NeuroLink executeExternalMCPTool not available`);
1039
+ const error = `Cannot execute external MCP tool: NeuroLink executeExternalMCPTool not available`;
1040
+ // 🔧 EMIT TOOL END EVENT FOR ERROR - Bedrock-compatible format
1041
+ if (this.neurolink?.getEventEmitter) {
1042
+ const emitter = this.neurolink.getEventEmitter();
1043
+ emitter.emit("tool:end", { tool: tool.name, error });
1044
+ logger.debug(`tool:end error event emitted for ${tool.name}`, {
1045
+ toolName: tool.name,
1046
+ error,
1047
+ hasEmitter: !!emitter,
1048
+ });
1049
+ }
1050
+ logger.error(`${error}`, {
1051
+ toolName: tool.name,
1052
+ hasNeurolink: !!this.neurolink,
1053
+ neurolinkType: typeof this.neurolink,
1054
+ timestamp: Date.now(),
1055
+ });
1056
+ throw new Error(error);
660
1057
  }
661
1058
  },
662
1059
  });
@@ -718,9 +1115,10 @@ export class BaseProvider {
718
1115
  * MCP tools are added when available (without blocking)
719
1116
  */
720
1117
  async getAllTools() {
721
- const tools = {
722
- ...this.directTools, // Always include direct tools
723
- };
1118
+ // Start with wrapped direct tools that emit events
1119
+ const tools = {};
1120
+ // Wrap direct tools with event emission
1121
+ await this.processDirectTools(tools);
724
1122
  logger.debug(`[BaseProvider] getAllTools called for ${this.providerName}`, {
725
1123
  neurolinkAvailable: !!this.neurolink,
726
1124
  neurolinkType: typeof this.neurolink,
@@ -756,76 +1154,15 @@ export class BaseProvider {
756
1154
  }
757
1155
  }
758
1156
  /**
759
- * Convert MCP JSON Schema to Zod schema for AI SDK tools
760
- * Handles common MCP schema patterns safely
1157
+ * Create a permissive Zod schema that accepts all parameters as-is
761
1158
  */
762
- async convertMCPSchemaToZod(inputSchema) {
763
- const { z } = await import("zod");
764
- if (!inputSchema || typeof inputSchema !== "object") {
765
- return z.object({});
766
- }
767
- try {
768
- const schema = inputSchema;
769
- const zodFields = {};
770
- // Handle JSON Schema properties
771
- if (schema.properties && typeof schema.properties === "object") {
772
- const required = new Set(Array.isArray(schema.required) ? schema.required : []);
773
- for (const [propName, propDef] of Object.entries(schema.properties)) {
774
- const prop = propDef;
775
- let zodType;
776
- // Convert based on JSON Schema type
777
- switch (prop.type) {
778
- case "string":
779
- zodType = z.string();
780
- if (prop.description && typeof prop.description === "string") {
781
- zodType = zodType.describe(prop.description);
782
- }
783
- break;
784
- case "number":
785
- case "integer":
786
- zodType = z.number();
787
- if (prop.description && typeof prop.description === "string") {
788
- zodType = zodType.describe(prop.description);
789
- }
790
- break;
791
- case "boolean":
792
- zodType = z.boolean();
793
- if (prop.description && typeof prop.description === "string") {
794
- zodType = zodType.describe(prop.description);
795
- }
796
- break;
797
- case "array":
798
- zodType = z.array(z.unknown());
799
- if (prop.description && typeof prop.description === "string") {
800
- zodType = zodType.describe(prop.description);
801
- }
802
- break;
803
- case "object":
804
- zodType = z.object({});
805
- if (prop.description && typeof prop.description === "string") {
806
- zodType = zodType.describe(prop.description);
807
- }
808
- break;
809
- default:
810
- // Unknown type, use string as fallback
811
- zodType = z.string();
812
- if (prop.description && typeof prop.description === "string") {
813
- zodType = zodType.describe(prop.description);
814
- }
815
- }
816
- // Make optional if not required
817
- if (!required.has(propName)) {
818
- zodType = zodType.optional();
819
- }
820
- zodFields[propName] = zodType;
821
- }
822
- }
823
- return getKeyCount(zodFields) > 0 ? z.object(zodFields) : z.object({});
824
- }
825
- catch (error) {
826
- logger.warn(`Failed to convert MCP schema to Zod, using empty schema:`, error);
827
- return z.object({});
828
- }
1159
+ createPermissiveZodSchema() {
1160
+ // Create a permissive record that accepts any object structure
1161
+ // This allows all parameters to pass through without validation issues
1162
+ return z.record(z.unknown()).transform((data) => {
1163
+ // Return the data as-is to preserve all parameter information
1164
+ return data;
1165
+ });
829
1166
  }
830
1167
  /**
831
1168
  * Set session context for MCP tools