@juspay/neurolink 7.48.1 → 7.50.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (153) hide show
  1. package/CHANGELOG.md +19 -0
  2. package/README.md +215 -16
  3. package/dist/agent/directTools.d.ts +55 -0
  4. package/dist/agent/directTools.js +266 -0
  5. package/dist/cli/factories/commandFactory.d.ts +6 -0
  6. package/dist/cli/factories/commandFactory.js +149 -16
  7. package/dist/cli/index.js +13 -2
  8. package/dist/cli/loop/conversationSelector.d.ts +45 -0
  9. package/dist/cli/loop/conversationSelector.js +222 -0
  10. package/dist/cli/loop/optionsSchema.d.ts +1 -1
  11. package/dist/cli/loop/session.d.ts +36 -8
  12. package/dist/cli/loop/session.js +257 -61
  13. package/dist/core/baseProvider.d.ts +9 -0
  14. package/dist/core/baseProvider.js +45 -5
  15. package/dist/core/evaluation.js +5 -2
  16. package/dist/factories/providerRegistry.js +2 -2
  17. package/dist/index.d.ts +8 -2
  18. package/dist/index.js +11 -10
  19. package/dist/lib/agent/directTools.d.ts +55 -0
  20. package/dist/lib/agent/directTools.js +266 -0
  21. package/dist/lib/core/baseProvider.d.ts +9 -0
  22. package/dist/lib/core/baseProvider.js +45 -5
  23. package/dist/lib/core/evaluation.js +5 -2
  24. package/dist/lib/factories/providerRegistry.js +2 -2
  25. package/dist/lib/index.d.ts +8 -2
  26. package/dist/lib/index.js +11 -10
  27. package/dist/lib/mcp/factory.d.ts +2 -157
  28. package/dist/lib/mcp/flexibleToolValidator.d.ts +1 -5
  29. package/dist/lib/mcp/index.d.ts +3 -2
  30. package/dist/lib/mcp/mcpCircuitBreaker.d.ts +1 -75
  31. package/dist/lib/mcp/mcpClientFactory.d.ts +1 -20
  32. package/dist/lib/mcp/mcpClientFactory.js +1 -0
  33. package/dist/lib/mcp/registry.d.ts +3 -10
  34. package/dist/lib/mcp/servers/agent/directToolsServer.d.ts +1 -1
  35. package/dist/lib/mcp/servers/aiProviders/aiCoreServer.d.ts +1 -1
  36. package/dist/lib/mcp/servers/utilities/utilityServer.d.ts +1 -1
  37. package/dist/lib/mcp/toolDiscoveryService.d.ts +3 -84
  38. package/dist/lib/mcp/toolRegistry.d.ts +2 -24
  39. package/dist/lib/middleware/builtin/guardrails.d.ts +5 -16
  40. package/dist/lib/middleware/builtin/guardrails.js +44 -39
  41. package/dist/lib/middleware/utils/guardrailsUtils.d.ts +64 -0
  42. package/dist/lib/middleware/utils/guardrailsUtils.js +387 -0
  43. package/dist/lib/neurolink.d.ts +36 -7
  44. package/dist/lib/neurolink.js +141 -0
  45. package/dist/lib/providers/anthropic.js +47 -3
  46. package/dist/lib/providers/azureOpenai.js +9 -2
  47. package/dist/lib/providers/googleAiStudio.js +9 -2
  48. package/dist/lib/providers/googleVertex.js +12 -2
  49. package/dist/lib/providers/huggingFace.js +1 -1
  50. package/dist/lib/providers/litellm.js +1 -1
  51. package/dist/lib/providers/mistral.js +1 -1
  52. package/dist/lib/providers/openAI.js +47 -3
  53. package/dist/lib/services/server/ai/observability/instrumentation.d.ts +57 -0
  54. package/dist/lib/services/server/ai/observability/instrumentation.js +170 -0
  55. package/dist/lib/session/globalSessionState.d.ts +26 -0
  56. package/dist/lib/session/globalSessionState.js +86 -1
  57. package/dist/lib/telemetry/index.d.ts +1 -0
  58. package/dist/lib/telemetry/telemetryService.d.ts +2 -0
  59. package/dist/lib/telemetry/telemetryService.js +7 -7
  60. package/dist/lib/types/cli.d.ts +28 -0
  61. package/dist/lib/types/content.d.ts +18 -5
  62. package/dist/lib/types/contextTypes.d.ts +1 -1
  63. package/dist/lib/types/conversation.d.ts +57 -4
  64. package/dist/lib/types/fileTypes.d.ts +65 -0
  65. package/dist/lib/types/fileTypes.js +4 -0
  66. package/dist/lib/types/generateTypes.d.ts +12 -0
  67. package/dist/lib/types/guardrails.d.ts +103 -0
  68. package/dist/lib/types/guardrails.js +1 -0
  69. package/dist/lib/types/index.d.ts +4 -2
  70. package/dist/lib/types/index.js +4 -0
  71. package/dist/lib/types/mcpTypes.d.ts +407 -14
  72. package/dist/lib/types/modelTypes.d.ts +6 -6
  73. package/dist/lib/types/observability.d.ts +49 -0
  74. package/dist/lib/types/observability.js +6 -0
  75. package/dist/lib/types/streamTypes.d.ts +7 -0
  76. package/dist/lib/types/tools.d.ts +132 -35
  77. package/dist/lib/utils/csvProcessor.d.ts +68 -0
  78. package/dist/lib/utils/csvProcessor.js +277 -0
  79. package/dist/lib/utils/fileDetector.d.ts +57 -0
  80. package/dist/lib/utils/fileDetector.js +457 -0
  81. package/dist/lib/utils/imageProcessor.d.ts +10 -0
  82. package/dist/lib/utils/imageProcessor.js +22 -0
  83. package/dist/lib/utils/loopUtils.d.ts +71 -0
  84. package/dist/lib/utils/loopUtils.js +262 -0
  85. package/dist/lib/utils/messageBuilder.d.ts +2 -1
  86. package/dist/lib/utils/messageBuilder.js +197 -2
  87. package/dist/lib/utils/optionsUtils.d.ts +1 -1
  88. package/dist/mcp/factory.d.ts +2 -157
  89. package/dist/mcp/flexibleToolValidator.d.ts +1 -5
  90. package/dist/mcp/index.d.ts +3 -2
  91. package/dist/mcp/mcpCircuitBreaker.d.ts +1 -75
  92. package/dist/mcp/mcpClientFactory.d.ts +1 -20
  93. package/dist/mcp/mcpClientFactory.js +1 -0
  94. package/dist/mcp/registry.d.ts +3 -10
  95. package/dist/mcp/servers/agent/directToolsServer.d.ts +1 -1
  96. package/dist/mcp/servers/aiProviders/aiCoreServer.d.ts +1 -1
  97. package/dist/mcp/servers/utilities/utilityServer.d.ts +1 -1
  98. package/dist/mcp/toolDiscoveryService.d.ts +3 -84
  99. package/dist/mcp/toolRegistry.d.ts +2 -24
  100. package/dist/middleware/builtin/guardrails.d.ts +5 -16
  101. package/dist/middleware/builtin/guardrails.js +44 -39
  102. package/dist/middleware/utils/guardrailsUtils.d.ts +64 -0
  103. package/dist/middleware/utils/guardrailsUtils.js +387 -0
  104. package/dist/neurolink.d.ts +36 -7
  105. package/dist/neurolink.js +141 -0
  106. package/dist/providers/anthropic.js +47 -3
  107. package/dist/providers/azureOpenai.js +9 -2
  108. package/dist/providers/googleAiStudio.js +9 -2
  109. package/dist/providers/googleVertex.js +12 -2
  110. package/dist/providers/huggingFace.js +1 -1
  111. package/dist/providers/litellm.js +1 -1
  112. package/dist/providers/mistral.js +1 -1
  113. package/dist/providers/openAI.js +47 -3
  114. package/dist/services/server/ai/observability/instrumentation.d.ts +57 -0
  115. package/dist/services/server/ai/observability/instrumentation.js +170 -0
  116. package/dist/session/globalSessionState.d.ts +26 -0
  117. package/dist/session/globalSessionState.js +86 -1
  118. package/dist/telemetry/index.d.ts +1 -0
  119. package/dist/telemetry/telemetryService.d.ts +2 -0
  120. package/dist/telemetry/telemetryService.js +7 -7
  121. package/dist/types/cli.d.ts +28 -0
  122. package/dist/types/content.d.ts +18 -5
  123. package/dist/types/contextTypes.d.ts +1 -1
  124. package/dist/types/conversation.d.ts +57 -4
  125. package/dist/types/fileTypes.d.ts +65 -0
  126. package/dist/types/fileTypes.js +4 -0
  127. package/dist/types/generateTypes.d.ts +12 -0
  128. package/dist/types/guardrails.d.ts +103 -0
  129. package/dist/types/guardrails.js +1 -0
  130. package/dist/types/index.d.ts +4 -2
  131. package/dist/types/index.js +4 -0
  132. package/dist/types/mcpTypes.d.ts +407 -14
  133. package/dist/types/modelTypes.d.ts +6 -6
  134. package/dist/types/observability.d.ts +49 -0
  135. package/dist/types/observability.js +6 -0
  136. package/dist/types/streamTypes.d.ts +7 -0
  137. package/dist/types/tools.d.ts +132 -35
  138. package/dist/utils/csvProcessor.d.ts +68 -0
  139. package/dist/utils/csvProcessor.js +277 -0
  140. package/dist/utils/fileDetector.d.ts +57 -0
  141. package/dist/utils/fileDetector.js +457 -0
  142. package/dist/utils/imageProcessor.d.ts +10 -0
  143. package/dist/utils/imageProcessor.js +22 -0
  144. package/dist/utils/loopUtils.d.ts +71 -0
  145. package/dist/utils/loopUtils.js +262 -0
  146. package/dist/utils/messageBuilder.d.ts +2 -1
  147. package/dist/utils/messageBuilder.js +197 -2
  148. package/dist/utils/optionsUtils.d.ts +1 -1
  149. package/package.json +18 -16
  150. package/dist/lib/mcp/contracts/mcpContract.d.ts +0 -106
  151. package/dist/lib/mcp/contracts/mcpContract.js +0 -5
  152. package/dist/mcp/contracts/mcpContract.d.ts +0 -106
  153. package/dist/mcp/contracts/mcpContract.js +0 -5
package/dist/neurolink.js CHANGED
@@ -39,6 +39,7 @@ import { directToolsServer } from "./mcp/servers/agent/directToolsServer.js";
39
39
  // Import orchestration components
40
40
  import { ModelRouter } from "./utils/modelRouter.js";
41
41
  import { BinaryTaskClassifier } from "./utils/taskClassifier.js";
42
+ import { initializeOpenTelemetry, shutdownOpenTelemetry, flushOpenTelemetry, getLangfuseHealthStatus, } from "./services/server/ai/observability/instrumentation.js";
42
43
  import { isNonNullObject } from "./utils/typeUtils.js";
43
44
  import { isZodSchema } from "./utils/schemaConversion.js";
44
45
  // Core types imported from "./types/index.js"
@@ -178,8 +179,10 @@ export class NeuroLink {
178
179
  * @throws {Error} When external server manager initialization fails
179
180
  * @throws {Error} When HITL configuration is invalid (if enabled)
180
181
  */
182
+ observabilityConfig;
181
183
  constructor(config) {
182
184
  this.toolRegistry = config?.toolRegistry || new MCPToolRegistry();
185
+ this.observabilityConfig = config?.observability;
183
186
  // Initialize orchestration setting
184
187
  this.enableOrchestration = config?.enableOrchestration ?? false;
185
188
  // Read tool cache duration from environment variables, with a default
@@ -194,6 +197,7 @@ export class NeuroLink {
194
197
  this.initializeConversationMemory(config, constructorId, constructorStartTime, constructorHrTimeStart);
195
198
  this.initializeExternalServerManager(constructorId, constructorStartTime, constructorHrTimeStart);
196
199
  this.initializeHITL(config, constructorId, constructorStartTime, constructorHrTimeStart);
200
+ this.initializeLangfuse(constructorId, constructorStartTime, constructorHrTimeStart);
197
201
  this.logConstructorComplete(constructorId, constructorStartTime, constructorHrTimeStart);
198
202
  }
199
203
  /**
@@ -494,6 +498,81 @@ export class NeuroLink {
494
498
  this.unregisterExternalMCPToolFromRegistry(event.toolName);
495
499
  });
496
500
  }
501
+ /**
502
+ * Initialize Langfuse observability for AI operations tracking
503
+ */
504
+ initializeLangfuse(constructorId, constructorStartTime, constructorHrTimeStart) {
505
+ const langfuseInitStartTime = process.hrtime.bigint();
506
+ try {
507
+ const langfuseConfig = this.observabilityConfig?.langfuse;
508
+ if (langfuseConfig?.enabled) {
509
+ logger.debug(`[NeuroLink] 📊 LOG_POINT_C019_LANGFUSE_INIT_START`, {
510
+ logPoint: "C019_LANGFUSE_INIT_START",
511
+ constructorId,
512
+ timestamp: new Date().toISOString(),
513
+ elapsedMs: Date.now() - constructorStartTime,
514
+ elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
515
+ langfuseInitStartTimeNs: langfuseInitStartTime.toString(),
516
+ message: "Starting Langfuse observability initialization",
517
+ });
518
+ // Initialize OpenTelemetry FIRST (required for Langfuse v4)
519
+ initializeOpenTelemetry(langfuseConfig);
520
+ const healthStatus = getLangfuseHealthStatus();
521
+ const langfuseInitDurationNs = process.hrtime.bigint() - langfuseInitStartTime;
522
+ if (healthStatus.initialized &&
523
+ healthStatus.hasProcessor &&
524
+ healthStatus.isHealthy) {
525
+ logger.debug(`[NeuroLink] ✅ LOG_POINT_C020_LANGFUSE_INIT_SUCCESS`, {
526
+ logPoint: "C020_LANGFUSE_INIT_SUCCESS",
527
+ constructorId,
528
+ timestamp: new Date().toISOString(),
529
+ elapsedMs: Date.now() - constructorStartTime,
530
+ elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
531
+ langfuseInitDurationNs: langfuseInitDurationNs.toString(),
532
+ langfuseInitDurationMs: Number(langfuseInitDurationNs) / 1_000_000,
533
+ healthStatus,
534
+ message: "Langfuse observability initialized successfully",
535
+ });
536
+ }
537
+ else {
538
+ logger.warn(`[NeuroLink] ⚠️ LOG_POINT_C021_LANGFUSE_INIT_WARNING`, {
539
+ logPoint: "C021_LANGFUSE_INIT_WARNING",
540
+ constructorId,
541
+ timestamp: new Date().toISOString(),
542
+ elapsedMs: Date.now() - constructorStartTime,
543
+ elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
544
+ langfuseInitDurationNs: langfuseInitDurationNs.toString(),
545
+ healthStatus,
546
+ message: "Langfuse initialized but not healthy",
547
+ });
548
+ }
549
+ }
550
+ else {
551
+ logger.debug(`[NeuroLink] 🚫 LOG_POINT_C022_LANGFUSE_DISABLED`, {
552
+ logPoint: "C022_LANGFUSE_DISABLED",
553
+ constructorId,
554
+ timestamp: new Date().toISOString(),
555
+ elapsedMs: Date.now() - constructorStartTime,
556
+ elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
557
+ message: "Langfuse observability not enabled - skipping initialization",
558
+ });
559
+ }
560
+ }
561
+ catch (error) {
562
+ const langfuseInitErrorDurationNs = process.hrtime.bigint() - langfuseInitStartTime;
563
+ logger.error(`[NeuroLink] ❌ LOG_POINT_C023_LANGFUSE_INIT_ERROR`, {
564
+ logPoint: "C023_LANGFUSE_INIT_ERROR",
565
+ constructorId,
566
+ timestamp: new Date().toISOString(),
567
+ elapsedMs: Date.now() - constructorStartTime,
568
+ elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
569
+ langfuseInitDurationNs: langfuseInitErrorDurationNs.toString(),
570
+ errorMessage: error instanceof Error ? error.message : String(error),
571
+ errorStack: error instanceof Error ? error.stack : undefined,
572
+ message: "Langfuse observability initialization failed",
573
+ });
574
+ }
575
+ }
497
576
  /**
498
577
  * Log constructor completion with final state summary
499
578
  */
@@ -992,6 +1071,68 @@ export class NeuroLink {
992
1071
  * @throws {Error} When all providers fail to generate content
993
1072
  * @throws {Error} When conversation memory operations fail (if enabled)
994
1073
  */
1074
+ /**
1075
+ * Get observability configuration
1076
+ */
1077
+ getObservabilityConfig() {
1078
+ return this.observabilityConfig;
1079
+ }
1080
+ /**
1081
+ * Check if Langfuse telemetry is enabled
1082
+ * Centralized utility to avoid duplication across providers
1083
+ */
1084
+ isTelemetryEnabled() {
1085
+ return this.observabilityConfig?.langfuse?.enabled || false;
1086
+ }
1087
+ /**
1088
+ * Public method to initialize Langfuse observability
1089
+ * This method can be called externally to ensure Langfuse is properly initialized
1090
+ */
1091
+ async initializeLangfuseObservability() {
1092
+ try {
1093
+ const langfuseConfig = this.observabilityConfig?.langfuse;
1094
+ if (langfuseConfig?.enabled) {
1095
+ initializeOpenTelemetry(langfuseConfig);
1096
+ logger.debug("[NeuroLink] Langfuse observability initialized via public method");
1097
+ }
1098
+ else {
1099
+ logger.debug("[NeuroLink] Langfuse not enabled, skipping initialization");
1100
+ }
1101
+ }
1102
+ catch (error) {
1103
+ logger.warn("[NeuroLink] Failed to initialize Langfuse observability:", error);
1104
+ }
1105
+ }
1106
+ /**
1107
+ * Gracefully shutdown NeuroLink and all MCP connections
1108
+ */
1109
+ async shutdown() {
1110
+ try {
1111
+ logger.debug("[NeuroLink] Starting graceful shutdown");
1112
+ try {
1113
+ await flushOpenTelemetry();
1114
+ await shutdownOpenTelemetry();
1115
+ logger.debug("[NeuroLink] OpenTelemetry shutdown completed");
1116
+ }
1117
+ catch (error) {
1118
+ logger.warn("[NeuroLink] OpenTelemetry shutdown failed:", error);
1119
+ }
1120
+ if (this.externalServerManager) {
1121
+ try {
1122
+ await this.externalServerManager.shutdown();
1123
+ logger.debug("[NeuroLink] MCP servers shutdown completed");
1124
+ }
1125
+ catch (error) {
1126
+ logger.warn("[NeuroLink] MCP servers shutdown failed:", error);
1127
+ }
1128
+ }
1129
+ logger.debug("[NeuroLink] Graceful shutdown completed");
1130
+ }
1131
+ catch (error) {
1132
+ logger.error("[NeuroLink] Shutdown failed:", error);
1133
+ throw error;
1134
+ }
1135
+ }
995
1136
  async generate(optionsOrPrompt) {
996
1137
  const originalPrompt = this._extractOriginalPrompt(optionsOrPrompt);
997
1138
  // Convert string prompt to full options
@@ -7,7 +7,7 @@ import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
7
7
  import { AuthenticationError, NetworkError, ProviderError, RateLimitError, } from "../types/errors.js";
8
8
  import { DEFAULT_MAX_STEPS } from "../core/constants.js";
9
9
  import { validateApiKey, createAnthropicConfig, getProviderModel, } from "../utils/providerConfig.js";
10
- import { buildMessagesArray } from "../utils/messageBuilder.js";
10
+ import { buildMessagesArray, buildMultimodalMessagesArray, convertToCoreMessages, } from "../utils/messageBuilder.js";
11
11
  import { createProxyFetch } from "../proxy/proxyFetch.js";
12
12
  // Configuration helpers - now using consolidated utility
13
13
  const getAnthropicApiKey = () => {
@@ -92,8 +92,51 @@ export class AnthropicProvider extends BaseProvider {
92
92
  // ✅ Get tools for streaming (same as generate method)
93
93
  const shouldUseTools = !options.disableTools && this.supportsTools();
94
94
  const tools = shouldUseTools ? await this.getAllTools() : {};
95
- // Build message array from options
96
- const messages = buildMessagesArray(options);
95
+ // Build message array from options with multimodal support
96
+ const hasMultimodalInput = !!(options.input?.images?.length ||
97
+ options.input?.content?.length ||
98
+ options.input?.files?.length ||
99
+ options.input?.csvFiles?.length);
100
+ let messages;
101
+ if (hasMultimodalInput) {
102
+ logger.debug(`Anthropic: Detected multimodal input, using multimodal message builder`, {
103
+ hasImages: !!options.input?.images?.length,
104
+ imageCount: options.input?.images?.length || 0,
105
+ hasContent: !!options.input?.content?.length,
106
+ contentCount: options.input?.content?.length || 0,
107
+ hasFiles: !!options.input?.files?.length,
108
+ fileCount: options.input?.files?.length || 0,
109
+ hasCSVFiles: !!options.input?.csvFiles?.length,
110
+ csvFileCount: options.input?.csvFiles?.length || 0,
111
+ });
112
+ // Create multimodal options for buildMultimodalMessagesArray
113
+ const multimodalOptions = {
114
+ input: {
115
+ text: options.input?.text || "",
116
+ images: options.input?.images,
117
+ content: options.input?.content,
118
+ files: options.input?.files,
119
+ csvFiles: options.input?.csvFiles,
120
+ },
121
+ csvOptions: options.csvOptions,
122
+ systemPrompt: options.systemPrompt,
123
+ conversationHistory: options.conversationMessages,
124
+ provider: this.providerName,
125
+ model: this.modelName,
126
+ temperature: options.temperature,
127
+ maxTokens: options.maxTokens,
128
+ enableAnalytics: options.enableAnalytics,
129
+ enableEvaluation: options.enableEvaluation,
130
+ context: options.context,
131
+ };
132
+ const mm = await buildMultimodalMessagesArray(multimodalOptions, this.providerName, this.modelName);
133
+ // Convert multimodal messages to Vercel AI SDK format (CoreMessage[])
134
+ messages = convertToCoreMessages(mm);
135
+ }
136
+ else {
137
+ logger.debug(`Anthropic: Text-only input, using standard message builder`);
138
+ messages = await buildMessagesArray(options);
139
+ }
97
140
  const model = await this.getAISDKModelWithMiddleware(options);
98
141
  const result = await streamText({
99
142
  model: model,
@@ -104,6 +147,7 @@ export class AnthropicProvider extends BaseProvider {
104
147
  maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
105
148
  toolChoice: shouldUseTools ? "auto" : "none",
106
149
  abortSignal: timeoutController?.controller.signal,
150
+ experimental_telemetry: this.getStreamTelemetryConfig(options),
107
151
  onStepFinish: ({ toolCalls, toolResults }) => {
108
152
  this.handleToolExecutionStorage(toolCalls, toolResults, options, new Date()).catch((error) => {
109
153
  logger.warn("[AnthropicProvider] Failed to store tool executions", {
@@ -110,7 +110,10 @@ export class AzureOpenAIProvider extends BaseProvider {
110
110
  });
111
111
  }
112
112
  // Build message array from options with multimodal support
113
- const hasMultimodalInput = !!(options.input?.images?.length || options.input?.content?.length);
113
+ const hasMultimodalInput = !!(options.input?.images?.length ||
114
+ options.input?.content?.length ||
115
+ options.input?.files?.length ||
116
+ options.input?.csvFiles?.length);
114
117
  let messages;
115
118
  if (hasMultimodalInput) {
116
119
  logger.debug(`Azure OpenAI: Detected multimodal input, using multimodal message builder`, {
@@ -125,7 +128,10 @@ export class AzureOpenAIProvider extends BaseProvider {
125
128
  text: options.input?.text || "",
126
129
  images: options.input?.images,
127
130
  content: options.input?.content,
131
+ files: options.input?.files,
132
+ csvFiles: options.input?.csvFiles,
128
133
  },
134
+ csvOptions: options.csvOptions,
129
135
  systemPrompt: options.systemPrompt,
130
136
  conversationHistory: options.conversationMessages,
131
137
  provider: this.providerName,
@@ -142,7 +148,7 @@ export class AzureOpenAIProvider extends BaseProvider {
142
148
  }
143
149
  else {
144
150
  logger.debug(`Azure OpenAI: Text-only input, using standard message builder`);
145
- messages = buildMessagesArray(options);
151
+ messages = await buildMessagesArray(options);
146
152
  }
147
153
  const model = await this.getAISDKModelWithMiddleware(options);
148
154
  const stream = await streamText({
@@ -156,6 +162,7 @@ export class AzureOpenAIProvider extends BaseProvider {
156
162
  : {}),
157
163
  tools,
158
164
  toolChoice: shouldUseTools ? "auto" : "none",
165
+ experimental_telemetry: this.getStreamTelemetryConfig(options),
159
166
  onStepFinish: ({ toolCalls, toolResults }) => {
160
167
  this.handleToolExecutionStorage(toolCalls, toolResults, options, new Date()).catch((error) => {
161
168
  logger.warn("[AzureOpenaiProvider] Failed to store tool executions", {
@@ -92,7 +92,10 @@ export class GoogleAIStudioProvider extends BaseProvider {
92
92
  const shouldUseTools = !options.disableTools && this.supportsTools();
93
93
  const tools = shouldUseTools ? await this.getAllTools() : {};
94
94
  // Build message array from options with multimodal support
95
- const hasMultimodalInput = !!(options.input?.images?.length || options.input?.content?.length);
95
+ const hasMultimodalInput = !!(options.input?.images?.length ||
96
+ options.input?.content?.length ||
97
+ options.input?.files?.length ||
98
+ options.input?.csvFiles?.length);
96
99
  let messages;
97
100
  if (hasMultimodalInput) {
98
101
  logger.debug(`Google AI Studio: Detected multimodal input, using multimodal message builder`, {
@@ -107,7 +110,10 @@ export class GoogleAIStudioProvider extends BaseProvider {
107
110
  text: options.input?.text || "",
108
111
  images: options.input?.images,
109
112
  content: options.input?.content,
113
+ files: options.input?.files,
114
+ csvFiles: options.input?.csvFiles,
110
115
  },
116
+ csvOptions: options.csvOptions,
111
117
  systemPrompt: options.systemPrompt,
112
118
  conversationHistory: options.conversationMessages,
113
119
  provider: this.providerName,
@@ -124,7 +130,7 @@ export class GoogleAIStudioProvider extends BaseProvider {
124
130
  }
125
131
  else {
126
132
  logger.debug(`Google AI Studio: Text-only input, using standard message builder`);
127
- messages = buildMessagesArray(options);
133
+ messages = await buildMessagesArray(options);
128
134
  }
129
135
  const result = await streamText({
130
136
  model,
@@ -135,6 +141,7 @@ export class GoogleAIStudioProvider extends BaseProvider {
135
141
  maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
136
142
  toolChoice: shouldUseTools ? "auto" : "none",
137
143
  abortSignal: timeoutController?.controller.signal,
144
+ experimental_telemetry: this.getStreamTelemetryConfig(options),
138
145
  onStepFinish: ({ toolCalls, toolResults }) => {
139
146
  this.handleToolExecutionStorage(toolCalls, toolResults, options, new Date()).catch((error) => {
140
147
  logger.warn("[GoogleAiStudioProvider] Failed to store tool executions", {
@@ -596,7 +596,10 @@ export class GoogleVertexProvider extends BaseProvider {
596
596
  // Validate stream options
597
597
  this.validateStreamOptionsOnly(options);
598
598
  // Build message array from options with multimodal support
599
- const hasMultimodalInput = !!(options.input?.images?.length || options.input?.content?.length);
599
+ const hasMultimodalInput = !!(options.input?.images?.length ||
600
+ options.input?.content?.length ||
601
+ options.input?.files?.length ||
602
+ options.input?.csvFiles?.length);
600
603
  let messages;
601
604
  if (hasMultimodalInput) {
602
605
  logger.debug(`${functionTag}: Detected multimodal input, using multimodal message builder`, {
@@ -611,7 +614,10 @@ export class GoogleVertexProvider extends BaseProvider {
611
614
  text: options.input?.text || "",
612
615
  images: options.input?.images,
613
616
  content: options.input?.content,
617
+ files: options.input?.files,
618
+ csvFiles: options.input?.csvFiles,
614
619
  },
620
+ csvOptions: options.csvOptions,
615
621
  systemPrompt: options.systemPrompt,
616
622
  conversationHistory: options.conversationMessages,
617
623
  provider: this.providerName,
@@ -628,7 +634,7 @@ export class GoogleVertexProvider extends BaseProvider {
628
634
  }
629
635
  else {
630
636
  logger.debug(`${functionTag}: Text-only input, using standard message builder`);
631
- messages = buildMessagesArray(options);
637
+ messages = await buildMessagesArray(options);
632
638
  }
633
639
  const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
634
640
  // Get all available tools (direct + MCP + external) for streaming
@@ -660,6 +666,7 @@ export class GoogleVertexProvider extends BaseProvider {
660
666
  maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
661
667
  }),
662
668
  abortSignal: timeoutController?.controller.signal,
669
+ experimental_telemetry: this.getStreamTelemetryConfig(options),
663
670
  onError: (event) => {
664
671
  const error = event.error;
665
672
  const errorMessage = error instanceof Error ? error.message : String(error);
@@ -937,6 +944,7 @@ export class GoogleVertexProvider extends BaseProvider {
937
944
  modelName,
938
945
  issue: modelValidation.issue,
939
946
  recommendedModels: [
947
+ "claude-sonnet-4-5@20250929",
940
948
  "claude-sonnet-4@20250514",
941
949
  "claude-opus-4@20250514",
942
950
  "claude-3-5-sonnet-20241022",
@@ -1169,6 +1177,7 @@ export class GoogleVertexProvider extends BaseProvider {
1169
1177
  // Validate against known Claude model patterns
1170
1178
  const validPatterns = [
1171
1179
  /^claude-sonnet-4@\d{8}$/,
1180
+ /^claude-sonnet-4-5@\d{8}$/,
1172
1181
  /^claude-opus-4@\d{8}$/,
1173
1182
  /^claude-3-5-sonnet-\d{8}$/,
1174
1183
  /^claude-3-5-haiku-\d{8}$/,
@@ -1390,6 +1399,7 @@ export class GoogleVertexProvider extends BaseProvider {
1390
1399
  "gemini-1.5-flash",
1391
1400
  ],
1392
1401
  claude: [
1402
+ "claude-sonnet-4-5@20250929",
1393
1403
  "claude-sonnet-4@20250514",
1394
1404
  "claude-opus-4@20250514",
1395
1405
  "claude-3-5-sonnet-20241022",
@@ -114,7 +114,7 @@ export class HuggingFaceProvider extends BaseProvider {
114
114
  // Enhanced tool handling for HuggingFace models
115
115
  const streamOptions = this.prepareStreamOptions(options, analysisSchema);
116
116
  // Build message array from options
117
- const messages = buildMessagesArray(options);
117
+ const messages = await buildMessagesArray(options);
118
118
  const result = await streamText({
119
119
  model: this.model,
120
120
  messages: messages,
@@ -121,7 +121,7 @@ export class LiteLLMProvider extends BaseProvider {
121
121
  const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
122
122
  try {
123
123
  // Build message array from options
124
- const messages = buildMessagesArray(options);
124
+ const messages = await buildMessagesArray(options);
125
125
  const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
126
126
  const result = streamText({
127
127
  model: model,
@@ -49,7 +49,7 @@ export class MistralProvider extends BaseProvider {
49
49
  // Get tools consistently with generate method
50
50
  const shouldUseTools = !options.disableTools && this.supportsTools();
51
51
  const tools = shouldUseTools ? await this.getAllTools() : {};
52
- const messages = buildMessagesArray(options);
52
+ const messages = await buildMessagesArray(options);
53
53
  const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
54
54
  const result = await streamText({
55
55
  model,
@@ -8,7 +8,7 @@ import { AuthenticationError, InvalidModelError, NetworkError, ProviderError, Ra
8
8
  import { DEFAULT_MAX_STEPS } from "../core/constants.js";
9
9
  import { validateApiKey, createOpenAIConfig, getProviderModel, } from "../utils/providerConfig.js";
10
10
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
11
- import { buildMessagesArray } from "../utils/messageBuilder.js";
11
+ import { buildMessagesArray, buildMultimodalMessagesArray, convertToCoreMessages, } from "../utils/messageBuilder.js";
12
12
  import { createProxyFetch } from "../proxy/proxyFetch.js";
13
13
  import { isZodSchema } from "../utils/schemaConversion.js";
14
14
  // Configuration helpers - now using consolidated utility
@@ -244,8 +244,51 @@ export class OpenAIProvider extends BaseProvider {
244
244
  toolNames: Object.keys(tools),
245
245
  filteredOutTools: Object.keys(allTools).filter((name) => !tools[name]),
246
246
  });
247
- // Build message array from options
248
- const messages = buildMessagesArray(options);
247
+ // Build message array from options with multimodal support
248
+ const hasMultimodalInput = !!(options.input?.images?.length ||
249
+ options.input?.content?.length ||
250
+ options.input?.files?.length ||
251
+ options.input?.csvFiles?.length);
252
+ let messages;
253
+ if (hasMultimodalInput) {
254
+ logger.debug(`OpenAI: Detected multimodal input, using multimodal message builder`, {
255
+ hasImages: !!options.input?.images?.length,
256
+ imageCount: options.input?.images?.length || 0,
257
+ hasContent: !!options.input?.content?.length,
258
+ contentCount: options.input?.content?.length || 0,
259
+ hasFiles: !!options.input?.files?.length,
260
+ fileCount: options.input?.files?.length || 0,
261
+ hasCSVFiles: !!options.input?.csvFiles?.length,
262
+ csvFileCount: options.input?.csvFiles?.length || 0,
263
+ });
264
+ // Create multimodal options for buildMultimodalMessagesArray
265
+ const multimodalOptions = {
266
+ input: {
267
+ text: options.input?.text || "",
268
+ images: options.input?.images,
269
+ content: options.input?.content,
270
+ files: options.input?.files,
271
+ csvFiles: options.input?.csvFiles,
272
+ },
273
+ csvOptions: options.csvOptions,
274
+ systemPrompt: options.systemPrompt,
275
+ conversationHistory: options.conversationMessages,
276
+ provider: this.providerName,
277
+ model: this.modelName,
278
+ temperature: options.temperature,
279
+ maxTokens: options.maxTokens,
280
+ enableAnalytics: options.enableAnalytics,
281
+ enableEvaluation: options.enableEvaluation,
282
+ context: options.context,
283
+ };
284
+ const mm = await buildMultimodalMessagesArray(multimodalOptions, this.providerName, this.modelName);
285
+ // Convert multimodal messages to Vercel AI SDK format (CoreMessage[])
286
+ messages = convertToCoreMessages(mm);
287
+ }
288
+ else {
289
+ logger.debug(`OpenAI: Text-only input, using standard message builder`);
290
+ messages = await buildMessagesArray(options);
291
+ }
249
292
  // Debug the actual request being sent to OpenAI
250
293
  logger.debug(`OpenAI: streamText request parameters:`, {
251
294
  modelName: this.modelName,
@@ -273,6 +316,7 @@ export class OpenAIProvider extends BaseProvider {
273
316
  maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
274
317
  toolChoice: shouldUseTools && Object.keys(tools).length > 0 ? "auto" : "none",
275
318
  abortSignal: timeoutController?.controller.signal,
319
+ experimental_telemetry: this.getStreamTelemetryConfig(options),
276
320
  onStepFinish: ({ toolCalls, toolResults }) => {
277
321
  logger.info("Tool execution completed", { toolResults, toolCalls });
278
322
  // Handle tool execution storage
@@ -0,0 +1,57 @@
1
+ /**
2
+ * OpenTelemetry Instrumentation for Langfuse v4
3
+ *
4
+ * Configures OpenTelemetry TracerProvider with LangfuseSpanProcessor to capture
5
+ * traces from Vercel AI SDK's experimental_telemetry feature.
6
+ *
7
+ * Flow: Vercel AI SDK → OpenTelemetry Spans → LangfuseSpanProcessor → Langfuse Platform
8
+ */
9
+ import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node";
10
+ import { LangfuseSpanProcessor } from "@langfuse/otel";
11
+ import type { LangfuseConfig } from "../../../../types/observability.js";
12
+ /**
13
+ * Initialize OpenTelemetry with Langfuse span processor
14
+ *
15
+ * This connects Vercel AI SDK's experimental_telemetry to Langfuse by:
16
+ * 1. Creating LangfuseSpanProcessor with Langfuse credentials
17
+ * 2. Creating a NodeTracerProvider with service metadata and span processor
18
+ * 3. Registering the provider globally for AI SDK to use
19
+ *
20
+ * @param config - Langfuse configuration passed from parent application
21
+ */
22
+ export declare function initializeOpenTelemetry(config: LangfuseConfig): void;
23
+ /**
24
+ * Flush all pending spans to Langfuse
25
+ */
26
+ export declare function flushOpenTelemetry(): Promise<void>;
27
+ /**
28
+ * Shutdown OpenTelemetry and Langfuse span processor
29
+ */
30
+ export declare function shutdownOpenTelemetry(): Promise<void>;
31
+ /**
32
+ * Get the Langfuse span processor
33
+ */
34
+ export declare function getLangfuseSpanProcessor(): LangfuseSpanProcessor | null;
35
+ /**
36
+ * Get the tracer provider
37
+ */
38
+ export declare function getTracerProvider(): NodeTracerProvider | null;
39
+ /**
40
+ * Check if OpenTelemetry is initialized
41
+ */
42
+ export declare function isOpenTelemetryInitialized(): boolean;
43
+ /**
44
+ * Get health status for Langfuse observability
45
+ */
46
+ export declare function getLangfuseHealthStatus(): {
47
+ isHealthy: boolean | undefined;
48
+ initialized: boolean;
49
+ credentialsValid: boolean;
50
+ enabled: boolean;
51
+ hasProcessor: boolean;
52
+ config: {
53
+ baseUrl: string;
54
+ environment: string;
55
+ release: string;
56
+ } | undefined;
57
+ };