@juspay/neurolink 7.37.0 → 7.38.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/dist/cli/commands/config.d.ts +18 -18
  3. package/dist/cli/factories/commandFactory.d.ts +24 -0
  4. package/dist/cli/factories/commandFactory.js +297 -245
  5. package/dist/core/baseProvider.d.ts +44 -3
  6. package/dist/core/baseProvider.js +729 -352
  7. package/dist/core/constants.d.ts +2 -30
  8. package/dist/core/constants.js +15 -43
  9. package/dist/core/redisConversationMemoryManager.d.ts +98 -15
  10. package/dist/core/redisConversationMemoryManager.js +665 -203
  11. package/dist/factories/providerFactory.js +23 -6
  12. package/dist/index.d.ts +3 -2
  13. package/dist/index.js +4 -3
  14. package/dist/lib/core/baseProvider.d.ts +44 -3
  15. package/dist/lib/core/baseProvider.js +729 -352
  16. package/dist/lib/core/constants.d.ts +2 -30
  17. package/dist/lib/core/constants.js +15 -43
  18. package/dist/lib/core/redisConversationMemoryManager.d.ts +98 -15
  19. package/dist/lib/core/redisConversationMemoryManager.js +665 -203
  20. package/dist/lib/factories/providerFactory.js +23 -6
  21. package/dist/lib/index.d.ts +3 -2
  22. package/dist/lib/index.js +4 -3
  23. package/dist/lib/mcp/externalServerManager.js +2 -2
  24. package/dist/lib/mcp/registry.js +2 -2
  25. package/dist/lib/mcp/servers/agent/directToolsServer.js +19 -10
  26. package/dist/lib/mcp/toolRegistry.js +4 -8
  27. package/dist/lib/neurolink.d.ts +95 -28
  28. package/dist/lib/neurolink.js +479 -719
  29. package/dist/lib/providers/amazonBedrock.js +2 -2
  30. package/dist/lib/providers/anthropic.js +8 -0
  31. package/dist/lib/providers/anthropicBaseProvider.js +8 -0
  32. package/dist/lib/providers/azureOpenai.js +8 -0
  33. package/dist/lib/providers/googleAiStudio.js +8 -0
  34. package/dist/lib/providers/googleVertex.d.ts +3 -23
  35. package/dist/lib/providers/googleVertex.js +24 -342
  36. package/dist/lib/providers/huggingFace.js +8 -0
  37. package/dist/lib/providers/litellm.js +8 -0
  38. package/dist/lib/providers/mistral.js +8 -0
  39. package/dist/lib/providers/openAI.d.ts +23 -0
  40. package/dist/lib/providers/openAI.js +323 -6
  41. package/dist/lib/providers/openaiCompatible.js +8 -0
  42. package/dist/lib/providers/sagemaker/language-model.d.ts +2 -2
  43. package/dist/lib/sdk/toolRegistration.js +18 -1
  44. package/dist/lib/types/common.d.ts +98 -0
  45. package/dist/lib/types/conversation.d.ts +52 -2
  46. package/dist/lib/types/streamTypes.d.ts +13 -6
  47. package/dist/lib/types/typeAliases.d.ts +3 -2
  48. package/dist/lib/utils/conversationMemory.js +3 -1
  49. package/dist/lib/utils/messageBuilder.d.ts +10 -2
  50. package/dist/lib/utils/messageBuilder.js +22 -1
  51. package/dist/lib/utils/parameterValidation.js +6 -25
  52. package/dist/lib/utils/promptRedaction.js +4 -4
  53. package/dist/lib/utils/redis.d.ts +10 -6
  54. package/dist/lib/utils/redis.js +71 -70
  55. package/dist/lib/utils/schemaConversion.d.ts +14 -0
  56. package/dist/lib/utils/schemaConversion.js +140 -0
  57. package/dist/lib/utils/transformationUtils.js +143 -5
  58. package/dist/mcp/externalServerManager.js +2 -2
  59. package/dist/mcp/registry.js +2 -2
  60. package/dist/mcp/servers/agent/directToolsServer.js +19 -10
  61. package/dist/mcp/toolRegistry.js +4 -8
  62. package/dist/neurolink.d.ts +95 -28
  63. package/dist/neurolink.js +479 -719
  64. package/dist/providers/amazonBedrock.js +2 -2
  65. package/dist/providers/anthropic.js +8 -0
  66. package/dist/providers/anthropicBaseProvider.js +8 -0
  67. package/dist/providers/azureOpenai.js +8 -0
  68. package/dist/providers/googleAiStudio.js +8 -0
  69. package/dist/providers/googleVertex.d.ts +3 -23
  70. package/dist/providers/googleVertex.js +24 -342
  71. package/dist/providers/huggingFace.js +8 -0
  72. package/dist/providers/litellm.js +8 -0
  73. package/dist/providers/mistral.js +8 -0
  74. package/dist/providers/openAI.d.ts +23 -0
  75. package/dist/providers/openAI.js +323 -6
  76. package/dist/providers/openaiCompatible.js +8 -0
  77. package/dist/providers/sagemaker/language-model.d.ts +2 -2
  78. package/dist/sdk/toolRegistration.js +18 -1
  79. package/dist/types/common.d.ts +98 -0
  80. package/dist/types/conversation.d.ts +52 -2
  81. package/dist/types/streamTypes.d.ts +13 -6
  82. package/dist/types/typeAliases.d.ts +3 -2
  83. package/dist/utils/conversationMemory.js +3 -1
  84. package/dist/utils/messageBuilder.d.ts +10 -2
  85. package/dist/utils/messageBuilder.js +22 -1
  86. package/dist/utils/parameterValidation.js +6 -25
  87. package/dist/utils/promptRedaction.js +4 -4
  88. package/dist/utils/redis.d.ts +10 -6
  89. package/dist/utils/redis.js +71 -70
  90. package/dist/utils/schemaConversion.d.ts +14 -0
  91. package/dist/utils/schemaConversion.js +140 -0
  92. package/dist/utils/transformationUtils.js +143 -5
  93. package/package.json +3 -2
@@ -2,7 +2,7 @@ import { BedrockRuntimeClient, ConverseCommand, ConverseStreamCommand, } from "@
2
2
  import { BedrockClient, ListFoundationModelsCommand, } from "@aws-sdk/client-bedrock";
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
4
  import { logger } from "../utils/logger.js";
5
- import { zodToJsonSchema } from "zod-to-json-schema";
5
+ import { convertZodToJsonSchema } from "../utils/schemaConversion.js";
6
6
  export class AmazonBedrockProvider extends BaseProvider {
7
7
  bedrockClient;
8
8
  conversationHistory = [];
@@ -481,7 +481,7 @@ export class AmazonBedrockProvider extends BaseProvider {
481
481
  // Check if it's a Zod schema
482
482
  if ("_def" in tool.parameters) {
483
483
  // It's a Zod schema, convert to JSON schema
484
- schema = zodToJsonSchema(tool.parameters);
484
+ schema = convertZodToJsonSchema(tool.parameters);
485
485
  }
486
486
  else {
487
487
  // It's already a plain object schema
@@ -103,6 +103,14 @@ export class AnthropicProvider extends BaseProvider {
103
103
  maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
104
104
  toolChoice: shouldUseTools ? "auto" : "none",
105
105
  abortSignal: timeoutController?.controller.signal,
106
+ onStepFinish: ({ toolCalls, toolResults }) => {
107
+ this.handleToolExecutionStorage(toolCalls, toolResults, options).catch((error) => {
108
+ logger.warn("[AnthropicProvider] Failed to store tool executions", {
109
+ provider: this.providerName,
110
+ error: error instanceof Error ? error.message : String(error),
111
+ });
112
+ });
113
+ },
106
114
  });
107
115
  timeoutController?.cleanup();
108
116
  const transformedStream = this.createTextStream(result);
@@ -73,6 +73,14 @@ export class AnthropicProviderV2 extends BaseProvider {
73
73
  tools: options.tools,
74
74
  toolChoice: "auto",
75
75
  abortSignal: timeoutController?.controller.signal,
76
+ onStepFinish: ({ toolCalls, toolResults }) => {
77
+ this.handleToolExecutionStorage(toolCalls, toolResults, options).catch((error) => {
78
+ logger.warn("[AnthropicBaseProvider] Failed to store tool executions", {
79
+ provider: this.providerName,
80
+ error: error instanceof Error ? error.message : String(error),
81
+ });
82
+ });
83
+ },
76
84
  });
77
85
  timeoutController?.cleanup();
78
86
  // Transform string stream to content object stream (match Google AI pattern)
@@ -122,6 +122,14 @@ export class AzureOpenAIProvider extends BaseProvider {
122
122
  : {}),
123
123
  tools,
124
124
  toolChoice: shouldUseTools ? "auto" : "none",
125
+ onStepFinish: ({ toolCalls, toolResults }) => {
126
+ this.handleToolExecutionStorage(toolCalls, toolResults, options).catch((error) => {
127
+ logger.warn("[AzureOpenaiProvider] Failed to store tool executions", {
128
+ provider: this.providerName,
129
+ error: error instanceof Error ? error.message : String(error),
130
+ });
131
+ });
132
+ },
125
133
  maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
126
134
  });
127
135
  // Transform string stream to content object stream using BaseProvider method
@@ -101,6 +101,14 @@ export class GoogleAIStudioProvider extends BaseProvider {
101
101
  maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
102
102
  toolChoice: shouldUseTools ? "auto" : "none",
103
103
  abortSignal: timeoutController?.controller.signal,
104
+ onStepFinish: ({ toolCalls, toolResults }) => {
105
+ this.handleToolExecutionStorage(toolCalls, toolResults, options).catch((error) => {
106
+ logger.warn("[GoogleAiStudioProvider] Failed to store tool executions", {
107
+ provider: this.providerName,
108
+ error: error instanceof Error ? error.message : String(error),
109
+ });
110
+ });
111
+ },
104
112
  });
105
113
  timeoutController?.cleanup();
106
114
  // Transform string stream to content object stream using BaseProvider method
@@ -34,7 +34,7 @@ export declare class GoogleVertexProvider extends BaseProvider {
34
34
  */
35
35
  protected getAISDKModel(): Promise<LanguageModel>;
36
36
  /**
37
- * Initialize model creation logging and tracking
37
+ * Initialize model creation tracking
38
38
  */
39
39
  private initializeModelCreationLogging;
40
40
  /**
@@ -56,29 +56,9 @@ export declare class GoogleVertexProvider extends BaseProvider {
56
56
  */
57
57
  private getModel;
58
58
  /**
59
- * Log stream execution start with comprehensive analysis
59
+ * Validate stream options
60
60
  */
61
- private logStreamExecutionStart;
62
- /**
63
- * Log timeout setup process
64
- */
65
- private logTimeoutSetup;
66
- /**
67
- * Log successful timeout setup
68
- */
69
- private logTimeoutSetupSuccess;
70
- /**
71
- * Log and perform stream options validation
72
- */
73
- private logAndValidateStreamOptions;
74
- /**
75
- * Log start of message building process
76
- */
77
- private logMessageBuildStart;
78
- /**
79
- * Log successful message building
80
- */
81
- private logMessageBuildSuccess;
61
+ private validateStreamOptionsOnly;
82
62
  protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
83
63
  protected handleProviderError(error: unknown): Error;
84
64
  /**
@@ -272,41 +272,13 @@ export class GoogleVertexProvider extends BaseProvider {
272
272
  return model;
273
273
  }
274
274
  /**
275
- * Initialize model creation logging and tracking
275
+ * Initialize model creation tracking
276
276
  */
277
277
  initializeModelCreationLogging() {
278
278
  const modelCreationId = `vertex-model-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`;
279
279
  const modelCreationStartTime = Date.now();
280
280
  const modelCreationHrTimeStart = process.hrtime.bigint();
281
281
  const modelName = this.modelName || getDefaultVertexModel();
282
- logger.debug(`[GoogleVertexProvider] 🏭 LOG_POINT_V001_MODEL_CREATION_START`, {
283
- logPoint: "V001_MODEL_CREATION_START",
284
- modelCreationId,
285
- timestamp: new Date().toISOString(),
286
- modelCreationStartTime,
287
- modelCreationHrTimeStart: modelCreationHrTimeStart.toString(),
288
- requestedModel: this.modelName,
289
- resolvedModel: modelName,
290
- defaultModel: getDefaultVertexModel(),
291
- projectId: this.projectId,
292
- location: this.location,
293
- // Environment analysis for network issues
294
- environmentAnalysis: {
295
- httpProxy: process.env.HTTP_PROXY || process.env.http_proxy || "NOT_SET",
296
- httpsProxy: process.env.HTTPS_PROXY || process.env.https_proxy || "NOT_SET",
297
- googleAppCreds: process.env.GOOGLE_APPLICATION_CREDENTIALS || "NOT_SET",
298
- googleServiceKey: process.env.GOOGLE_SERVICE_ACCOUNT_KEY
299
- ? "SET"
300
- : "NOT_SET",
301
- nodeVersion: process.version,
302
- platform: process.platform,
303
- arch: process.arch,
304
- },
305
- // Memory and performance baseline
306
- memoryUsage: process.memoryUsage(),
307
- cpuUsage: process.cpuUsage(),
308
- message: "Starting model creation with comprehensive environment analysis",
309
- });
310
282
  return {
311
283
  modelCreationId,
312
284
  modelCreationStartTime,
@@ -318,37 +290,10 @@ export class GoogleVertexProvider extends BaseProvider {
318
290
  * Check if model is Anthropic-based and attempt creation
319
291
  */
320
292
  async attemptAnthropicModelCreation(modelName, modelCreationId, modelCreationStartTime, modelCreationHrTimeStart) {
321
- const anthropicCheckStartTime = process.hrtime.bigint();
322
293
  const isAnthropic = isAnthropicModel(modelName);
323
- logger.debug(`[GoogleVertexProvider] 🤖 LOG_POINT_V002_ANTHROPIC_CHECK`, {
324
- logPoint: "V002_ANTHROPIC_CHECK",
325
- modelCreationId,
326
- timestamp: new Date().toISOString(),
327
- elapsedMs: Date.now() - modelCreationStartTime,
328
- elapsedNs: (process.hrtime.bigint() - modelCreationHrTimeStart).toString(),
329
- anthropicCheckStartTimeNs: anthropicCheckStartTime.toString(),
330
- modelName,
331
- isAnthropicModel: isAnthropic,
332
- modelNameLowerCase: modelName.toLowerCase(),
333
- containsClaude: modelName.toLowerCase().includes("claude"),
334
- anthropicModelPatterns: ["claude"],
335
- message: "Checking if model is Anthropic-based",
336
- });
337
294
  if (!isAnthropic) {
338
295
  return null;
339
296
  }
340
- const anthropicModelStartTime = process.hrtime.bigint();
341
- logger.debug(`[GoogleVertexProvider] 🧠 LOG_POINT_V003_ANTHROPIC_MODEL_START`, {
342
- logPoint: "V003_ANTHROPIC_MODEL_START",
343
- modelCreationId,
344
- timestamp: new Date().toISOString(),
345
- elapsedMs: Date.now() - modelCreationStartTime,
346
- elapsedNs: (process.hrtime.bigint() - modelCreationHrTimeStart).toString(),
347
- anthropicModelStartTimeNs: anthropicModelStartTime.toString(),
348
- modelName,
349
- hasAnthropicSupport: hasAnthropicSupport(),
350
- message: "Creating Anthropic model using vertexAnthropic provider",
351
- });
352
297
  logger.debug("Creating Anthropic model using vertexAnthropic provider", {
353
298
  modelName,
354
299
  });
@@ -359,52 +304,17 @@ export class GoogleVertexProvider extends BaseProvider {
359
304
  try {
360
305
  const anthropicModel = await this.createAnthropicModel(modelName);
361
306
  if (anthropicModel) {
362
- const anthropicModelSuccessTime = process.hrtime.bigint();
363
- const anthropicModelDurationNs = anthropicModelSuccessTime - anthropicModelStartTime;
364
- logger.debug(`[GoogleVertexProvider] ✅ LOG_POINT_V004_ANTHROPIC_MODEL_SUCCESS`, {
365
- logPoint: "V004_ANTHROPIC_MODEL_SUCCESS",
366
- modelCreationId,
367
- timestamp: new Date().toISOString(),
368
- elapsedMs: Date.now() - modelCreationStartTime,
369
- elapsedNs: (process.hrtime.bigint() - modelCreationHrTimeStart).toString(),
370
- anthropicModelDurationNs: anthropicModelDurationNs.toString(),
371
- anthropicModelDurationMs: Number(anthropicModelDurationNs) / 1000000,
372
- modelName,
373
- hasAnthropicModel: !!anthropicModel,
374
- anthropicModelType: typeof anthropicModel,
375
- memoryUsageAfterAnthropicCreation: process.memoryUsage(),
376
- message: "Anthropic model created successfully via vertexAnthropic",
377
- });
378
307
  return anthropicModel;
379
308
  }
380
- // Anthropic model creation returned null
381
- const anthropicModelNullTime = process.hrtime.bigint();
382
- const anthropicModelDurationNs = anthropicModelNullTime - anthropicModelStartTime;
383
- logger.warn(`[GoogleVertexProvider] ⚠️ LOG_POINT_V005_ANTHROPIC_MODEL_NULL`, {
384
- logPoint: "V005_ANTHROPIC_MODEL_NULL",
385
- modelCreationId,
386
- timestamp: new Date().toISOString(),
387
- elapsedMs: Date.now() - modelCreationStartTime,
388
- elapsedNs: (process.hrtime.bigint() - modelCreationHrTimeStart).toString(),
389
- anthropicModelDurationNs: anthropicModelDurationNs.toString(),
390
- anthropicModelDurationMs: Number(anthropicModelDurationNs) / 1000000,
391
- modelName,
392
- hasAnthropicModel: false,
393
- fallbackToGoogle: true,
394
- message: "Anthropic model creation returned null - falling back to Google model",
395
- });
309
+ // Anthropic model creation returned null, falling back to Google model
396
310
  }
397
311
  catch (error) {
398
- const anthropicModelErrorTime = process.hrtime.bigint();
399
- const anthropicModelDurationNs = anthropicModelErrorTime - anthropicModelStartTime;
400
312
  logger.error(`[GoogleVertexProvider] ❌ LOG_POINT_V006_ANTHROPIC_MODEL_ERROR`, {
401
313
  logPoint: "V006_ANTHROPIC_MODEL_ERROR",
402
314
  modelCreationId,
403
315
  timestamp: new Date().toISOString(),
404
316
  elapsedMs: Date.now() - modelCreationStartTime,
405
317
  elapsedNs: (process.hrtime.bigint() - modelCreationHrTimeStart).toString(),
406
- anthropicModelDurationNs: anthropicModelDurationNs.toString(),
407
- anthropicModelDurationMs: Number(anthropicModelDurationNs) / 1000000,
408
318
  modelName,
409
319
  error: error instanceof Error ? error.message : String(error),
410
320
  errorName: error instanceof Error ? error.name : "UnknownError",
@@ -420,21 +330,7 @@ export class GoogleVertexProvider extends BaseProvider {
420
330
  /**
421
331
  * Create Google Vertex model with comprehensive logging and error handling
422
332
  */
423
- async createGoogleVertexModel(modelName, modelCreationId, modelCreationStartTime, modelCreationHrTimeStart, isAnthropic) {
424
- const googleModelStartTime = process.hrtime.bigint();
425
- logger.debug(`[GoogleVertexProvider] 🌐 LOG_POINT_V007_GOOGLE_MODEL_START`, {
426
- logPoint: "V007_GOOGLE_MODEL_START",
427
- modelCreationId,
428
- timestamp: new Date().toISOString(),
429
- elapsedMs: Date.now() - modelCreationStartTime,
430
- elapsedNs: (process.hrtime.bigint() - modelCreationHrTimeStart).toString(),
431
- googleModelStartTimeNs: googleModelStartTime.toString(),
432
- modelName,
433
- projectId: this.projectId,
434
- location: this.location,
435
- reason: isAnthropic ? "ANTHROPIC_FALLBACK" : "DIRECT_GOOGLE_MODEL",
436
- message: "Creating fresh Google Vertex model with current settings",
437
- });
333
+ async createGoogleVertexModel(modelName, modelCreationId, modelCreationStartTime, modelCreationHrTimeStart) {
438
334
  logger.debug("Creating Google Vertex model", {
439
335
  modelName,
440
336
  project: this.projectId,
@@ -680,260 +576,31 @@ export class GoogleVertexProvider extends BaseProvider {
680
576
  return anthropicModel;
681
577
  }
682
578
  // Fall back to Google Vertex model creation
683
- return await this.createGoogleVertexModel(modelName, modelCreationId, modelCreationStartTime, modelCreationHrTimeStart, isAnthropicModel(modelName));
579
+ return await this.createGoogleVertexModel(modelName, modelCreationId, modelCreationStartTime, modelCreationHrTimeStart);
684
580
  }
685
581
  // executeGenerate removed - BaseProvider handles all generation with tools
686
582
  /**
687
- * Log stream execution start with comprehensive analysis
688
- */
689
- logStreamExecutionStart(streamExecutionId, streamExecutionStartTime, streamExecutionHrTimeStart, functionTag, options, analysisSchema) {
690
- logger.info(`[GoogleVertexProvider] 🎬 LOG_POINT_S001_STREAM_EXECUTION_START`, {
691
- logPoint: "S001_STREAM_EXECUTION_START",
692
- streamExecutionId,
693
- timestamp: new Date().toISOString(),
694
- streamExecutionStartTime,
695
- streamExecutionHrTimeStart: streamExecutionHrTimeStart.toString(),
696
- functionTag,
697
- // Input analysis
698
- inputAnalysis: {
699
- hasOptions: !!options,
700
- optionsType: typeof options,
701
- optionsKeys: options ? Object.keys(options) : [],
702
- hasInputText: !!options?.input?.text,
703
- inputTextLength: options?.input?.text?.length || 0,
704
- inputTextPreview: options?.input?.text?.substring(0, 200) || "NO_TEXT",
705
- hasAnalysisSchema: !!analysisSchema,
706
- schemaType: analysisSchema ? typeof analysisSchema : "NO_SCHEMA",
707
- disableTools: options?.disableTools || false,
708
- temperature: options?.temperature,
709
- maxTokens: options?.maxTokens,
710
- },
711
- // Provider context
712
- providerContext: {
713
- modelName: this.modelName,
714
- providerName: this.providerName,
715
- projectId: this.projectId,
716
- location: this.location,
717
- defaultTimeout: this.defaultTimeout,
718
- },
719
- // Network environment
720
- networkEnvironment: {
721
- httpProxy: process.env.HTTP_PROXY || process.env.http_proxy || "NOT_SET",
722
- httpsProxy: process.env.HTTPS_PROXY || process.env.https_proxy || "NOT_SET",
723
- googleAppCreds: process.env.GOOGLE_APPLICATION_CREDENTIALS || "NOT_SET",
724
- hasGoogleServiceKey: !!process.env.GOOGLE_SERVICE_ACCOUNT_KEY,
725
- expectedEndpoint: `https://${this.location}-aiplatform.googleapis.com`,
726
- proxyConfigured: !!(process.env.HTTP_PROXY ||
727
- process.env.HTTPS_PROXY ||
728
- process.env.http_proxy ||
729
- process.env.https_proxy),
730
- },
731
- // Performance baseline
732
- memoryUsage: process.memoryUsage(),
733
- cpuUsage: process.cpuUsage(),
734
- message: "Stream execution starting with comprehensive analysis",
735
- });
736
- }
737
- /**
738
- * Log timeout setup process
739
- */
740
- logTimeoutSetup(streamExecutionId, streamExecutionStartTime, streamExecutionHrTimeStart, timeoutSetupStartTime, timeout) {
741
- logger.debug(`[GoogleVertexProvider] ⏰ LOG_POINT_S002_TIMEOUT_SETUP`, {
742
- logPoint: "S002_TIMEOUT_SETUP",
743
- streamExecutionId,
744
- timestamp: new Date().toISOString(),
745
- elapsedMs: Date.now() - streamExecutionStartTime,
746
- elapsedNs: (process.hrtime.bigint() - streamExecutionHrTimeStart).toString(),
747
- timeoutSetupStartTimeNs: timeoutSetupStartTime.toString(),
748
- timeout,
749
- providerName: this.providerName,
750
- streamType: "stream",
751
- message: "Setting up timeout controller for stream execution",
752
- });
753
- }
754
- /**
755
- * Log successful timeout setup
756
- */
757
- logTimeoutSetupSuccess(streamExecutionId, streamExecutionStartTime, streamExecutionHrTimeStart, timeoutSetupStartTime, timeoutController, timeout) {
758
- const timeoutSetupEndTime = process.hrtime.bigint();
759
- const timeoutSetupDurationNs = timeoutSetupEndTime - timeoutSetupStartTime;
760
- logger.debug(`[GoogleVertexProvider] ✅ LOG_POINT_S003_TIMEOUT_SETUP_SUCCESS`, {
761
- logPoint: "S003_TIMEOUT_SETUP_SUCCESS",
762
- streamExecutionId,
763
- timestamp: new Date().toISOString(),
764
- elapsedMs: Date.now() - streamExecutionStartTime,
765
- elapsedNs: (process.hrtime.bigint() - streamExecutionHrTimeStart).toString(),
766
- timeoutSetupDurationNs: timeoutSetupDurationNs.toString(),
767
- timeoutSetupDurationMs: Number(timeoutSetupDurationNs) / 1000000,
768
- hasTimeoutController: !!timeoutController,
769
- timeoutValue: timeout,
770
- message: "Timeout controller setup completed",
771
- });
772
- }
773
- /**
774
- * Log and perform stream options validation
583
+ * Validate stream options
775
584
  */
776
- logAndValidateStreamOptions(streamExecutionId, streamExecutionStartTime, streamExecutionHrTimeStart, options) {
777
- const validationStartTime = process.hrtime.bigint();
778
- logger.debug(`[GoogleVertexProvider] ✔️ LOG_POINT_S004_VALIDATION_START`, {
779
- logPoint: "S004_VALIDATION_START",
780
- streamExecutionId,
781
- timestamp: new Date().toISOString(),
782
- elapsedMs: Date.now() - streamExecutionStartTime,
783
- elapsedNs: (process.hrtime.bigint() - streamExecutionHrTimeStart).toString(),
784
- validationStartTimeNs: validationStartTime.toString(),
785
- message: "Starting stream options validation",
786
- });
585
+ validateStreamOptionsOnly(options) {
787
586
  this.validateStreamOptions(options);
788
- const validationEndTime = process.hrtime.bigint();
789
- const validationDurationNs = validationEndTime - validationStartTime;
790
- logger.debug(`[GoogleVertexProvider] ✅ LOG_POINT_S005_VALIDATION_SUCCESS`, {
791
- logPoint: "S005_VALIDATION_SUCCESS",
792
- streamExecutionId,
793
- timestamp: new Date().toISOString(),
794
- elapsedMs: Date.now() - streamExecutionStartTime,
795
- elapsedNs: (process.hrtime.bigint() - streamExecutionHrTimeStart).toString(),
796
- validationDurationNs: validationDurationNs.toString(),
797
- validationDurationMs: Number(validationDurationNs) / 1000000,
798
- message: "Stream options validation successful",
799
- });
800
- }
801
- /**
802
- * Log start of message building process
803
- */
804
- logMessageBuildStart(streamExecutionId, streamExecutionStartTime, streamExecutionHrTimeStart) {
805
- const messagesBuildStartTime = process.hrtime.bigint();
806
- logger.debug(`[GoogleVertexProvider] 📝 LOG_POINT_S006_MESSAGES_BUILD_START`, {
807
- logPoint: "S006_MESSAGES_BUILD_START",
808
- streamExecutionId,
809
- timestamp: new Date().toISOString(),
810
- elapsedMs: Date.now() - streamExecutionStartTime,
811
- elapsedNs: (process.hrtime.bigint() - streamExecutionHrTimeStart).toString(),
812
- messagesBuildStartTimeNs: messagesBuildStartTime.toString(),
813
- message: "Starting message array building",
814
- });
815
- return messagesBuildStartTime;
816
- }
817
- /**
818
- * Log successful message building
819
- */
820
- logMessageBuildSuccess(streamExecutionId, streamExecutionStartTime, streamExecutionHrTimeStart, messagesBuildStartTime, messages) {
821
- const messagesBuildEndTime = process.hrtime.bigint();
822
- const messagesBuildDurationNs = messagesBuildEndTime - messagesBuildStartTime;
823
- logger.debug(`[GoogleVertexProvider] ✅ LOG_POINT_S007_MESSAGES_BUILD_SUCCESS`, {
824
- logPoint: "S007_MESSAGES_BUILD_SUCCESS",
825
- streamExecutionId,
826
- timestamp: new Date().toISOString(),
827
- elapsedMs: Date.now() - streamExecutionStartTime,
828
- elapsedNs: (process.hrtime.bigint() - streamExecutionHrTimeStart).toString(),
829
- messagesBuildDurationNs: messagesBuildDurationNs.toString(),
830
- messagesBuildDurationMs: Number(messagesBuildDurationNs) / 1000000,
831
- messagesCount: Array.isArray(messages) ? messages.length : 0,
832
- messagesType: typeof messages,
833
- hasMessages: !!messages,
834
- message: "Message array built successfully",
835
- });
836
587
  }
837
- /* eslint-disable-next-line max-lines-per-function */
838
588
  async executeStream(options, analysisSchema) {
839
589
  // Initialize stream execution tracking
840
- const streamExecutionId = `vertex-stream-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`;
841
- const streamExecutionStartTime = Date.now();
842
- const streamExecutionHrTimeStart = process.hrtime.bigint();
843
590
  const functionTag = "GoogleVertexProvider.executeStream";
844
591
  let chunkCount = 0;
845
- // Log stream execution start
846
- this.logStreamExecutionStart(streamExecutionId, streamExecutionStartTime, streamExecutionHrTimeStart, functionTag, options, analysisSchema);
847
592
  // Setup timeout controller
848
- const timeoutSetupStartTime = process.hrtime.bigint();
849
593
  const timeout = this.getTimeout(options);
850
- this.logTimeoutSetup(streamExecutionId, streamExecutionStartTime, streamExecutionHrTimeStart, timeoutSetupStartTime, timeout);
851
594
  const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
852
- this.logTimeoutSetupSuccess(streamExecutionId, streamExecutionStartTime, streamExecutionHrTimeStart, timeoutSetupStartTime, timeoutController, timeout);
853
595
  try {
854
- // Validate stream options with logging
855
- this.logAndValidateStreamOptions(streamExecutionId, streamExecutionStartTime, streamExecutionHrTimeStart, options);
856
- // Build messages with logging
857
- const messagesBuildStartTime = this.logMessageBuildStart(streamExecutionId, streamExecutionStartTime, streamExecutionHrTimeStart);
596
+ // Validate stream options
597
+ this.validateStreamOptionsOnly(options);
858
598
  // Build message array from options
859
599
  const messages = buildMessagesArray(options);
860
- this.logMessageBuildSuccess(streamExecutionId, streamExecutionStartTime, streamExecutionHrTimeStart, messagesBuildStartTime, messages);
861
- // Log stream request details
862
- logger.debug(`[GoogleVertexProvider] 🚀 LOG_POINT_S008_STREAM_REQUEST_DETAILS`, {
863
- logPoint: "S008_STREAM_REQUEST_DETAILS",
864
- streamExecutionId,
865
- streamRequestDetails: {
866
- modelName: this.modelName,
867
- promptLength: typeof options.input?.text === "string"
868
- ? options.input.text.length
869
- : 0,
870
- hasSchema: !!analysisSchema,
871
- messagesCount: Array.isArray(messages) ? messages.length : 0,
872
- temperature: options?.temperature,
873
- maxTokens: options?.maxTokens,
874
- disableTools: options?.disableTools || false,
875
- },
876
- message: "Starting comprehensive stream request processing",
877
- });
878
- // 🚀 EXHAUSTIVE LOGGING POINT S009: MODEL CREATION FOR STREAM
879
- const modelCreationStartTime = process.hrtime.bigint();
880
- logger.debug(`[GoogleVertexProvider] 🏭 LOG_POINT_S009_MODEL_CREATION_FOR_STREAM`, {
881
- logPoint: "S009_MODEL_CREATION_FOR_STREAM",
882
- streamExecutionId,
883
- timestamp: new Date().toISOString(),
884
- elapsedMs: Date.now() - streamExecutionStartTime,
885
- elapsedNs: (process.hrtime.bigint() - streamExecutionHrTimeStart).toString(),
886
- modelCreationStartTimeNs: modelCreationStartTime.toString(),
887
- requestedModel: this.modelName,
888
- message: "Starting model creation for stream execution (this will include network setup)",
889
- });
890
600
  const model = await this.getModel(); // This is where network connection happens!
891
- const modelCreationEndTime = process.hrtime.bigint();
892
- const modelCreationDurationNs = modelCreationEndTime - modelCreationStartTime;
893
- logger.info(`[GoogleVertexProvider] ✅ LOG_POINT_S010_MODEL_CREATION_SUCCESS`, {
894
- logPoint: "S010_MODEL_CREATION_SUCCESS",
895
- streamExecutionId,
896
- timestamp: new Date().toISOString(),
897
- elapsedMs: Date.now() - streamExecutionStartTime,
898
- elapsedNs: (process.hrtime.bigint() - streamExecutionHrTimeStart).toString(),
899
- modelCreationDurationNs: modelCreationDurationNs.toString(),
900
- modelCreationDurationMs: Number(modelCreationDurationNs) / 1000000,
901
- hasModel: !!model,
902
- modelType: typeof model,
903
- message: "Model creation completed successfully - network connection established",
904
- });
905
- // 🚀 EXHAUSTIVE LOGGING POINT S011: TOOLS SETUP FOR STREAMING
906
- const toolsSetupStartTime = process.hrtime.bigint();
907
- logger.debug(`[GoogleVertexProvider] 🛠️ LOG_POINT_S011_TOOLS_SETUP_START`, {
908
- logPoint: "S011_TOOLS_SETUP_START",
909
- streamExecutionId,
910
- timestamp: new Date().toISOString(),
911
- elapsedMs: Date.now() - streamExecutionStartTime,
912
- elapsedNs: (process.hrtime.bigint() - streamExecutionHrTimeStart).toString(),
913
- toolsSetupStartTimeNs: toolsSetupStartTime.toString(),
914
- disableTools: options?.disableTools || false,
915
- supportsTools: this.supportsTools(),
916
- message: "Setting up tools for streaming",
917
- });
918
601
  // Get all available tools (direct + MCP + external) for streaming
919
602
  const shouldUseTools = !options.disableTools && this.supportsTools();
920
603
  const tools = shouldUseTools ? await this.getAllTools() : {};
921
- const toolsSetupEndTime = process.hrtime.bigint();
922
- const toolsSetupDurationNs = toolsSetupEndTime - toolsSetupStartTime;
923
- logger.debug(`[GoogleVertexProvider] ✅ LOG_POINT_S012_TOOLS_SETUP_SUCCESS`, {
924
- logPoint: "S012_TOOLS_SETUP_SUCCESS",
925
- streamExecutionId,
926
- timestamp: new Date().toISOString(),
927
- elapsedMs: Date.now() - streamExecutionStartTime,
928
- elapsedNs: (process.hrtime.bigint() - streamExecutionHrTimeStart).toString(),
929
- toolsSetupDurationNs: toolsSetupDurationNs.toString(),
930
- toolsSetupDurationMs: Number(toolsSetupDurationNs) / 1000000,
931
- shouldUseTools,
932
- toolCount: Object.keys(tools).length,
933
- toolNames: Object.keys(tools),
934
- hasTools: Object.keys(tools).length > 0,
935
- message: "Tools setup completed for streaming",
936
- });
937
604
  logger.debug(`${functionTag}: Tools for streaming`, {
938
605
  shouldUseTools,
939
606
  toolCount: Object.keys(tools).length,
@@ -953,7 +620,6 @@ export class GoogleVertexProvider extends BaseProvider {
953
620
  messages: messages,
954
621
  temperature: options.temperature,
955
622
  ...(maxTokens && { maxTokens }),
956
- // Add tools support for streaming
957
623
  ...(shouldUseTools &&
958
624
  Object.keys(tools).length > 0 && {
959
625
  tools,
@@ -980,6 +646,16 @@ export class GoogleVertexProvider extends BaseProvider {
980
646
  onChunk: () => {
981
647
  chunkCount++;
982
648
  },
649
+ onStepFinish: ({ toolCalls, toolResults }) => {
650
+ logger.info("Tool execution completed", { toolResults, toolCalls });
651
+ // Handle tool execution storage
652
+ this.handleToolExecutionStorage(toolCalls, toolResults, options).catch((error) => {
653
+ logger.warn("[GoogleVertexProvider] Failed to store tool executions", {
654
+ provider: this.providerName,
655
+ error: error instanceof Error ? error.message : String(error),
656
+ });
657
+ });
658
+ },
983
659
  };
984
660
  if (analysisSchema) {
985
661
  try {
@@ -1208,8 +884,13 @@ export class GoogleVertexProvider extends BaseProvider {
1208
884
  supportedRegions: [
1209
885
  "us-central1",
1210
886
  "us-east4",
887
+ "us-east5",
888
+ "us-west1",
889
+ "us-west4",
1211
890
  "europe-west1",
891
+ "europe-west4",
1212
892
  "asia-southeast1",
893
+ "asia-northeast1",
1213
894
  ],
1214
895
  solution: "Set GOOGLE_CLOUD_LOCATION to a supported region",
1215
896
  });
@@ -1425,6 +1106,7 @@ export class GoogleVertexProvider extends BaseProvider {
1425
1106
  const supportedRegions = [
1426
1107
  "us-central1",
1427
1108
  "us-east4",
1109
+ "us-east5",
1428
1110
  "us-west1",
1429
1111
  "us-west4",
1430
1112
  "europe-west1",
@@ -123,6 +123,14 @@ export class HuggingFaceProvider extends BaseProvider {
123
123
  tools: streamOptions.tools, // Tools format conversion handled by prepareStreamOptions
124
124
  toolChoice: streamOptions.toolChoice, // Tool choice handled by prepareStreamOptions
125
125
  abortSignal: timeoutController?.controller.signal,
126
+ onStepFinish: ({ toolCalls, toolResults }) => {
127
+ this.handleToolExecutionStorage(toolCalls, toolResults, options).catch((error) => {
128
+ logger.warn("[HuggingFaceProvider] Failed to store tool executions", {
129
+ provider: this.providerName,
130
+ error: error instanceof Error ? error.message : String(error),
131
+ });
132
+ });
133
+ },
126
134
  });
127
135
  timeoutController?.cleanup();
128
136
  // Transform stream to match StreamResult interface with enhanced tool call parsing
@@ -130,6 +130,14 @@ export class LiteLLMProvider extends BaseProvider {
130
130
  tools: options.tools,
131
131
  toolChoice: "auto",
132
132
  abortSignal: timeoutController?.controller.signal,
133
+ onStepFinish: ({ toolCalls, toolResults }) => {
134
+ this.handleToolExecutionStorage(toolCalls, toolResults, options).catch((error) => {
135
+ logger.warn("LiteLLMProvider] Failed to store tool executions", {
136
+ provider: this.providerName,
137
+ error: error instanceof Error ? error.message : String(error),
138
+ });
139
+ });
140
+ },
133
141
  });
134
142
  timeoutController?.cleanup();
135
143
  // Transform stream to match StreamResult interface
@@ -59,6 +59,14 @@ export class MistralProvider extends BaseProvider {
59
59
  maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
60
60
  toolChoice: shouldUseTools ? "auto" : "none",
61
61
  abortSignal: timeoutController?.controller.signal,
62
+ onStepFinish: ({ toolCalls, toolResults }) => {
63
+ this.handleToolExecutionStorage(toolCalls, toolResults, options).catch((error) => {
64
+ logger.warn("[MistralProvider] Failed to store tool executions", {
65
+ provider: this.providerName,
66
+ error: error instanceof Error ? error.message : String(error),
67
+ });
68
+ });
69
+ },
62
70
  });
63
71
  timeoutController?.cleanup();
64
72
  // Transform string stream to content object stream using BaseProvider method