@juspay/neurolink 7.49.0 → 7.50.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/CHANGELOG.md +6 -0
  2. package/dist/cli/factories/commandFactory.d.ts +4 -0
  3. package/dist/cli/factories/commandFactory.js +19 -0
  4. package/dist/cli/index.js +13 -2
  5. package/dist/core/baseProvider.d.ts +9 -0
  6. package/dist/core/baseProvider.js +36 -3
  7. package/dist/index.d.ts +8 -2
  8. package/dist/index.js +11 -10
  9. package/dist/lib/core/baseProvider.d.ts +9 -0
  10. package/dist/lib/core/baseProvider.js +36 -3
  11. package/dist/lib/index.d.ts +8 -2
  12. package/dist/lib/index.js +11 -10
  13. package/dist/lib/neurolink.d.ts +35 -6
  14. package/dist/lib/neurolink.js +141 -0
  15. package/dist/lib/providers/anthropic.js +1 -0
  16. package/dist/lib/providers/azureOpenai.js +1 -0
  17. package/dist/lib/providers/googleAiStudio.js +1 -0
  18. package/dist/lib/providers/googleVertex.js +1 -0
  19. package/dist/lib/providers/openAI.js +1 -0
  20. package/dist/lib/services/server/ai/observability/instrumentation.d.ts +57 -0
  21. package/dist/lib/services/server/ai/observability/instrumentation.js +170 -0
  22. package/dist/lib/session/globalSessionState.js +37 -1
  23. package/dist/lib/telemetry/index.d.ts +1 -0
  24. package/dist/lib/telemetry/telemetryService.d.ts +2 -0
  25. package/dist/lib/telemetry/telemetryService.js +7 -7
  26. package/dist/lib/types/conversation.d.ts +2 -0
  27. package/dist/lib/types/modelTypes.d.ts +6 -6
  28. package/dist/lib/types/observability.d.ts +49 -0
  29. package/dist/lib/types/observability.js +6 -0
  30. package/dist/neurolink.d.ts +35 -6
  31. package/dist/neurolink.js +141 -0
  32. package/dist/providers/anthropic.js +1 -0
  33. package/dist/providers/azureOpenai.js +1 -0
  34. package/dist/providers/googleAiStudio.js +1 -0
  35. package/dist/providers/googleVertex.js +1 -0
  36. package/dist/providers/openAI.js +1 -0
  37. package/dist/services/server/ai/observability/instrumentation.d.ts +57 -0
  38. package/dist/services/server/ai/observability/instrumentation.js +170 -0
  39. package/dist/session/globalSessionState.js +37 -1
  40. package/dist/telemetry/index.d.ts +1 -0
  41. package/dist/telemetry/telemetryService.d.ts +2 -0
  42. package/dist/telemetry/telemetryService.js +7 -7
  43. package/dist/types/conversation.d.ts +2 -0
  44. package/dist/types/observability.d.ts +49 -0
  45. package/dist/types/observability.js +6 -0
  46. package/package.json +10 -14
package/CHANGELOG.md CHANGED
@@ -1,3 +1,9 @@
1
+ ## [7.50.0](https://github.com/juspay/neurolink/compare/v7.49.0...v7.50.0) (2025-10-08)
2
+
3
+ ### Features
4
+
5
+ - **(observability):** add langfuse and telemetry support ([4172d28](https://github.com/juspay/neurolink/commit/4172d283ebce0c6dddae356d278eeceb42aa8464))
6
+
1
7
  ## [7.49.0](https://github.com/juspay/neurolink/compare/v7.48.1...v7.49.0) (2025-10-07)
2
8
 
3
9
  ### Features
@@ -156,4 +156,8 @@ export declare class CLICommandFactory {
156
156
  * Execute completion command
157
157
  */
158
158
  private static executeCompletion;
159
+ /**
160
+ * Flush Langfuse traces before exit
161
+ */
162
+ private static flushLangfuseTraces;
159
163
  }
@@ -1044,6 +1044,7 @@ export class CLICommandFactory {
1044
1044
  logger.debug("Mode: DRY-RUN (no actual API calls made)");
1045
1045
  }
1046
1046
  if (!globalSession.getCurrentSessionId()) {
1047
+ await this.flushLangfuseTraces();
1047
1048
  process.exit(0);
1048
1049
  }
1049
1050
  }
@@ -1120,6 +1121,7 @@ export class CLICommandFactory {
1120
1121
  }
1121
1122
  }
1122
1123
  if (!globalSession.getCurrentSessionId()) {
1124
+ await this.flushLangfuseTraces();
1123
1125
  process.exit(0);
1124
1126
  }
1125
1127
  }
@@ -1228,6 +1230,7 @@ export class CLICommandFactory {
1228
1230
  logger.debug("Mode: DRY-RUN (no actual API calls made)");
1229
1231
  }
1230
1232
  if (!globalSession.getCurrentSessionId()) {
1233
+ await this.flushLangfuseTraces();
1231
1234
  process.exit(0);
1232
1235
  }
1233
1236
  }
@@ -1489,6 +1492,7 @@ export class CLICommandFactory {
1489
1492
  const fullContent = await this.executeRealStream(argv, options, inputText, contextMetadata);
1490
1493
  await this.handleStreamOutput(options, fullContent);
1491
1494
  if (!globalSession.getCurrentSessionId()) {
1495
+ await this.flushLangfuseTraces();
1492
1496
  process.exit(0);
1493
1497
  }
1494
1498
  }
@@ -1610,6 +1614,7 @@ export class CLICommandFactory {
1610
1614
  // Handle output with universal formatting
1611
1615
  this.handleOutput(results, options);
1612
1616
  if (!globalSession.getCurrentSessionId()) {
1617
+ await this.flushLangfuseTraces();
1613
1618
  process.exit(0);
1614
1619
  }
1615
1620
  }
@@ -2005,4 +2010,18 @@ export class CLICommandFactory {
2005
2010
  handleError(error, "Completion generation");
2006
2011
  }
2007
2012
  }
2013
+ /**
2014
+ * Flush Langfuse traces before exit
2015
+ */
2016
+ static async flushLangfuseTraces() {
2017
+ try {
2018
+ logger.debug("[CLI] Flushing Langfuse traces before exit...");
2019
+ const { flushOpenTelemetry } = await import("../../lib/services/server/ai/observability/instrumentation.js");
2020
+ await flushOpenTelemetry();
2021
+ logger.debug("[CLI] Langfuse traces flushed successfully");
2022
+ }
2023
+ catch (error) {
2024
+ logger.error("[CLI] Error flushing Langfuse traces", { error });
2025
+ }
2026
+ }
2008
2027
  }
package/dist/cli/index.js CHANGED
@@ -32,17 +32,28 @@ const cli = initializeCliParser();
32
32
  try {
33
33
  // Parse and execute commands
34
34
  await cli.parse();
35
+ await cleanup();
35
36
  }
36
37
  catch (error) {
37
38
  // Global error handler - should not reach here due to fail() handler
38
39
  process.stderr.write(chalk.red(`Unexpected CLI _error: ${error.message}\n`));
40
+ await cleanup();
39
41
  process.exit(1);
40
42
  }
41
43
  })();
42
44
  // Cleanup on exit
43
- process.on("SIGINT", () => {
45
+ process.on("SIGINT", async () => {
46
+ await cleanup();
44
47
  process.exit(0);
45
48
  });
46
- process.on("SIGTERM", () => {
49
+ process.on("SIGTERM", async () => {
50
+ await cleanup();
47
51
  process.exit(0);
48
52
  });
53
+ process.on("beforeExit", async () => {
54
+ await cleanup();
55
+ });
56
+ async function cleanup() {
57
+ const { flushOpenTelemetry } = await import("../lib/services/server/ai/observability/instrumentation.js");
58
+ await flushOpenTelemetry();
59
+ }
@@ -236,4 +236,13 @@ export declare abstract class BaseProvider implements AIProvider {
236
236
  * @returns Array of prompt chunks
237
237
  */
238
238
  static chunkPrompt(prompt: string, maxChunkSize?: number, overlap?: number): string[];
239
+ /**
240
+ * Create telemetry configuration for Vercel AI SDK experimental_telemetry
241
+ * This enables automatic OpenTelemetry tracing when telemetry is enabled
242
+ */
243
+ protected getStreamTelemetryConfig(options: StreamOptions | TextGenerationOptions, operationType?: "stream" | "generate"): {
244
+ isEnabled: boolean;
245
+ functionId?: string;
246
+ metadata?: Record<string, string | number | boolean>;
247
+ } | undefined;
239
248
  }
@@ -6,6 +6,8 @@ import { DEFAULT_MAX_STEPS, STEP_LIMITS } from "../core/constants.js";
6
6
  import { directAgentTools } from "../agent/directTools.js";
7
7
  import { getSafeMaxTokens } from "../utils/tokenLimits.js";
8
8
  import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
9
+ import { nanoid } from "nanoid";
10
+ import { createAnalytics } from "./analytics.js";
9
11
  import { shouldDisableBuiltinTools } from "../utils/toolUtils.js";
10
12
  import { buildMessagesArray, buildMultimodalMessagesArray, } from "../utils/messageBuilder.js";
11
13
  import { getKeysAsString, getKeyCount } from "../utils/transformationUtils.js";
@@ -304,6 +306,7 @@ export class BaseProvider {
304
306
  toolChoice: shouldUseTools ? "auto" : "none",
305
307
  temperature: options.temperature,
306
308
  maxTokens: options.maxTokens,
309
+ experimental_telemetry: this.getStreamTelemetryConfig(options, "generate"),
307
310
  onStepFinish: ({ toolCalls, toolResults }) => {
308
311
  logger.info("Tool execution completed", { toolResults, toolCalls });
309
312
  // Handle tool execution storage
@@ -1283,9 +1286,8 @@ export class BaseProvider {
1283
1286
  */
1284
1287
  async createStreamAnalytics(result, startTime, options) {
1285
1288
  try {
1286
- const { createAnalytics } = await import("./analytics.js");
1287
1289
  const analytics = createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
1288
- requestId: `${this.providerName}-stream-${Date.now()}`,
1290
+ requestId: `${this.providerName}-stream-${nanoid()}`,
1289
1291
  streamingMode: true,
1290
1292
  ...options.context,
1291
1293
  });
@@ -1518,7 +1520,7 @@ export class BaseProvider {
1518
1520
  }
1519
1521
  const sessionId = options.context?.sessionId ||
1520
1522
  options.sessionId ||
1521
- `session-${Date.now()}`;
1523
+ `session-${nanoid()}`;
1522
1524
  const userId = options.context?.userId ||
1523
1525
  options.userId;
1524
1526
  try {
@@ -1565,4 +1567,35 @@ export class BaseProvider {
1565
1567
  }
1566
1568
  return chunks;
1567
1569
  }
1570
+ /**
1571
+ * Create telemetry configuration for Vercel AI SDK experimental_telemetry
1572
+ * This enables automatic OpenTelemetry tracing when telemetry is enabled
1573
+ */
1574
+ getStreamTelemetryConfig(options, operationType = "stream") {
1575
+ // Check if telemetry is enabled via NeuroLink observability config
1576
+ if (!this.neurolink?.isTelemetryEnabled()) {
1577
+ return undefined;
1578
+ }
1579
+ const functionId = `${this.providerName}-${operationType}-${nanoid()}`;
1580
+ const metadata = {
1581
+ provider: this.providerName,
1582
+ model: this.modelName,
1583
+ toolsEnabled: !options.disableTools,
1584
+ neurolink: true,
1585
+ };
1586
+ // Add sessionId if available
1587
+ if ("sessionId" in options && options.sessionId) {
1588
+ const sessionId = options.sessionId;
1589
+ if (typeof sessionId === "string" ||
1590
+ typeof sessionId === "number" ||
1591
+ typeof sessionId === "boolean") {
1592
+ metadata.sessionId = sessionId;
1593
+ }
1594
+ }
1595
+ return {
1596
+ isEnabled: true,
1597
+ functionId,
1598
+ metadata,
1599
+ };
1600
+ }
1568
1601
  }
package/dist/index.d.ts CHANGED
@@ -21,6 +21,9 @@ import { NeuroLink } from "./neurolink.js";
21
21
  export { NeuroLink };
22
22
  export type { ProviderStatus, MCPStatus } from "./neurolink.js";
23
23
  export type { MCPServerInfo } from "./types/mcpTypes.js";
24
+ export type { ObservabilityConfig, LangfuseConfig, OpenTelemetryConfig, } from "./types/observability.js";
25
+ import { initializeOpenTelemetry, shutdownOpenTelemetry, flushOpenTelemetry, getLangfuseHealthStatus } from "./services/server/ai/observability/instrumentation.js";
26
+ export { initializeOpenTelemetry, shutdownOpenTelemetry, flushOpenTelemetry, getLangfuseHealthStatus, };
24
27
  export type { NeuroLinkMiddleware, MiddlewareContext, MiddlewareFactoryOptions, MiddlewarePreset, MiddlewareConfig, } from "./types/middlewareTypes.js";
25
28
  export { MiddlewareFactory } from "./middleware/factory.js";
26
29
  export declare const VERSION = "1.0.0";
@@ -82,10 +85,13 @@ export declare function createBestAIProvider(requestedProvider?: string, modelNa
82
85
  export { initializeMCPEcosystem, listMCPs, executeMCP, getMCPStats, mcpLogger, } from "./mcp/index.js";
83
86
  export type { McpMetadata, ExecutionContext, DiscoveredMcp, ToolInfo, ToolExecutionResult, LogLevel, } from "./mcp/index.js";
84
87
  export declare function initializeTelemetry(): Promise<boolean>;
85
- export declare function getTelemetryStatus(): {
88
+ export declare function getTelemetryStatus(): Promise<{
86
89
  enabled: boolean;
87
90
  initialized: boolean;
88
- };
91
+ endpoint?: string;
92
+ service?: string;
93
+ version?: string;
94
+ }>;
89
95
  export type { TextGenerationOptions, TextGenerationResult, AnalyticsData, EvaluationData, } from "./types/index.js";
90
96
  /**
91
97
  * BACKWARD COMPATIBILITY: Legacy generateText function
package/dist/index.js CHANGED
@@ -19,6 +19,9 @@ export { dynamicModelProvider } from "./core/dynamicModels.js";
19
19
  // Main NeuroLink wrapper class and diagnostic types
20
20
  import { NeuroLink } from "./neurolink.js";
21
21
  export { NeuroLink };
22
+ import { initializeOpenTelemetry, shutdownOpenTelemetry, flushOpenTelemetry, getLangfuseHealthStatus, } from "./services/server/ai/observability/instrumentation.js";
23
+ import { initializeTelemetry as init, getTelemetryStatus as getStatus, } from "./telemetry/index.js";
24
+ export { initializeOpenTelemetry, shutdownOpenTelemetry, flushOpenTelemetry, getLangfuseHealthStatus, };
22
25
  export { MiddlewareFactory } from "./middleware/factory.js";
23
26
  // Version
24
27
  export const VERSION = "1.0.0";
@@ -96,20 +99,18 @@ initializeMCPEcosystem, listMCPs, executeMCP, getMCPStats, mcpLogger, } from "./
96
99
  // Real-time Services (Phase 1) - Basic SSE functionality only
97
100
  // export { createEnhancedChatService } from './chat/index.js';
98
101
  // export type * from './services/types.js';
99
- // Optional Telemetry (Phase 2) - Conditional export based on feature flag
102
+ // Optional Telemetry (Phase 2) - Telemetry service initialization
100
103
  export async function initializeTelemetry() {
101
- if (process.env.NEUROLINK_TELEMETRY_ENABLED === "true") {
102
- const { initializeTelemetry: init } = await import("./telemetry/index.js");
104
+ try {
103
105
  const result = await init();
104
- return !!result; // Convert TelemetryService to boolean
106
+ return !!result;
105
107
  }
106
- return Promise.resolve(false);
107
- }
108
- export function getTelemetryStatus() {
109
- if (process.env.NEUROLINK_TELEMETRY_ENABLED === "true") {
110
- return { enabled: true, initialized: false };
108
+ catch {
109
+ return false;
111
110
  }
112
- return { enabled: false, initialized: false };
111
+ }
112
+ export async function getTelemetryStatus() {
113
+ return getStatus();
113
114
  }
114
115
  /**
115
116
  * BACKWARD COMPATIBILITY: Legacy generateText function
@@ -236,4 +236,13 @@ export declare abstract class BaseProvider implements AIProvider {
236
236
  * @returns Array of prompt chunks
237
237
  */
238
238
  static chunkPrompt(prompt: string, maxChunkSize?: number, overlap?: number): string[];
239
+ /**
240
+ * Create telemetry configuration for Vercel AI SDK experimental_telemetry
241
+ * This enables automatic OpenTelemetry tracing when telemetry is enabled
242
+ */
243
+ protected getStreamTelemetryConfig(options: StreamOptions | TextGenerationOptions, operationType?: "stream" | "generate"): {
244
+ isEnabled: boolean;
245
+ functionId?: string;
246
+ metadata?: Record<string, string | number | boolean>;
247
+ } | undefined;
239
248
  }
@@ -6,6 +6,8 @@ import { DEFAULT_MAX_STEPS, STEP_LIMITS } from "../core/constants.js";
6
6
  import { directAgentTools } from "../agent/directTools.js";
7
7
  import { getSafeMaxTokens } from "../utils/tokenLimits.js";
8
8
  import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
9
+ import { nanoid } from "nanoid";
10
+ import { createAnalytics } from "./analytics.js";
9
11
  import { shouldDisableBuiltinTools } from "../utils/toolUtils.js";
10
12
  import { buildMessagesArray, buildMultimodalMessagesArray, } from "../utils/messageBuilder.js";
11
13
  import { getKeysAsString, getKeyCount } from "../utils/transformationUtils.js";
@@ -304,6 +306,7 @@ export class BaseProvider {
304
306
  toolChoice: shouldUseTools ? "auto" : "none",
305
307
  temperature: options.temperature,
306
308
  maxTokens: options.maxTokens,
309
+ experimental_telemetry: this.getStreamTelemetryConfig(options, "generate"),
307
310
  onStepFinish: ({ toolCalls, toolResults }) => {
308
311
  logger.info("Tool execution completed", { toolResults, toolCalls });
309
312
  // Handle tool execution storage
@@ -1283,9 +1286,8 @@ export class BaseProvider {
1283
1286
  */
1284
1287
  async createStreamAnalytics(result, startTime, options) {
1285
1288
  try {
1286
- const { createAnalytics } = await import("./analytics.js");
1287
1289
  const analytics = createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
1288
- requestId: `${this.providerName}-stream-${Date.now()}`,
1290
+ requestId: `${this.providerName}-stream-${nanoid()}`,
1289
1291
  streamingMode: true,
1290
1292
  ...options.context,
1291
1293
  });
@@ -1518,7 +1520,7 @@ export class BaseProvider {
1518
1520
  }
1519
1521
  const sessionId = options.context?.sessionId ||
1520
1522
  options.sessionId ||
1521
- `session-${Date.now()}`;
1523
+ `session-${nanoid()}`;
1522
1524
  const userId = options.context?.userId ||
1523
1525
  options.userId;
1524
1526
  try {
@@ -1565,4 +1567,35 @@ export class BaseProvider {
1565
1567
  }
1566
1568
  return chunks;
1567
1569
  }
1570
+ /**
1571
+ * Create telemetry configuration for Vercel AI SDK experimental_telemetry
1572
+ * This enables automatic OpenTelemetry tracing when telemetry is enabled
1573
+ */
1574
+ getStreamTelemetryConfig(options, operationType = "stream") {
1575
+ // Check if telemetry is enabled via NeuroLink observability config
1576
+ if (!this.neurolink?.isTelemetryEnabled()) {
1577
+ return undefined;
1578
+ }
1579
+ const functionId = `${this.providerName}-${operationType}-${nanoid()}`;
1580
+ const metadata = {
1581
+ provider: this.providerName,
1582
+ model: this.modelName,
1583
+ toolsEnabled: !options.disableTools,
1584
+ neurolink: true,
1585
+ };
1586
+ // Add sessionId if available
1587
+ if ("sessionId" in options && options.sessionId) {
1588
+ const sessionId = options.sessionId;
1589
+ if (typeof sessionId === "string" ||
1590
+ typeof sessionId === "number" ||
1591
+ typeof sessionId === "boolean") {
1592
+ metadata.sessionId = sessionId;
1593
+ }
1594
+ }
1595
+ return {
1596
+ isEnabled: true,
1597
+ functionId,
1598
+ metadata,
1599
+ };
1600
+ }
1568
1601
  }
@@ -21,6 +21,9 @@ import { NeuroLink } from "./neurolink.js";
21
21
  export { NeuroLink };
22
22
  export type { ProviderStatus, MCPStatus } from "./neurolink.js";
23
23
  export type { MCPServerInfo } from "./types/mcpTypes.js";
24
+ export type { ObservabilityConfig, LangfuseConfig, OpenTelemetryConfig, } from "./types/observability.js";
25
+ import { initializeOpenTelemetry, shutdownOpenTelemetry, flushOpenTelemetry, getLangfuseHealthStatus } from "./services/server/ai/observability/instrumentation.js";
26
+ export { initializeOpenTelemetry, shutdownOpenTelemetry, flushOpenTelemetry, getLangfuseHealthStatus, };
24
27
  export type { NeuroLinkMiddleware, MiddlewareContext, MiddlewareFactoryOptions, MiddlewarePreset, MiddlewareConfig, } from "./types/middlewareTypes.js";
25
28
  export { MiddlewareFactory } from "./middleware/factory.js";
26
29
  export declare const VERSION = "1.0.0";
@@ -82,10 +85,13 @@ export declare function createBestAIProvider(requestedProvider?: string, modelNa
82
85
  export { initializeMCPEcosystem, listMCPs, executeMCP, getMCPStats, mcpLogger, } from "./mcp/index.js";
83
86
  export type { McpMetadata, ExecutionContext, DiscoveredMcp, ToolInfo, ToolExecutionResult, LogLevel, } from "./mcp/index.js";
84
87
  export declare function initializeTelemetry(): Promise<boolean>;
85
- export declare function getTelemetryStatus(): {
88
+ export declare function getTelemetryStatus(): Promise<{
86
89
  enabled: boolean;
87
90
  initialized: boolean;
88
- };
91
+ endpoint?: string;
92
+ service?: string;
93
+ version?: string;
94
+ }>;
89
95
  export type { TextGenerationOptions, TextGenerationResult, AnalyticsData, EvaluationData, } from "./types/index.js";
90
96
  /**
91
97
  * BACKWARD COMPATIBILITY: Legacy generateText function
package/dist/lib/index.js CHANGED
@@ -19,6 +19,9 @@ export { dynamicModelProvider } from "./core/dynamicModels.js";
19
19
  // Main NeuroLink wrapper class and diagnostic types
20
20
  import { NeuroLink } from "./neurolink.js";
21
21
  export { NeuroLink };
22
+ import { initializeOpenTelemetry, shutdownOpenTelemetry, flushOpenTelemetry, getLangfuseHealthStatus, } from "./services/server/ai/observability/instrumentation.js";
23
+ import { initializeTelemetry as init, getTelemetryStatus as getStatus, } from "./telemetry/index.js";
24
+ export { initializeOpenTelemetry, shutdownOpenTelemetry, flushOpenTelemetry, getLangfuseHealthStatus, };
22
25
  export { MiddlewareFactory } from "./middleware/factory.js";
23
26
  // Version
24
27
  export const VERSION = "1.0.0";
@@ -96,20 +99,18 @@ initializeMCPEcosystem, listMCPs, executeMCP, getMCPStats, mcpLogger, } from "./
96
99
  // Real-time Services (Phase 1) - Basic SSE functionality only
97
100
  // export { createEnhancedChatService } from './chat/index.js';
98
101
  // export type * from './services/types.js';
99
- // Optional Telemetry (Phase 2) - Conditional export based on feature flag
102
+ // Optional Telemetry (Phase 2) - Telemetry service initialization
100
103
  export async function initializeTelemetry() {
101
- if (process.env.NEUROLINK_TELEMETRY_ENABLED === "true") {
102
- const { initializeTelemetry: init } = await import("./telemetry/index.js");
104
+ try {
103
105
  const result = await init();
104
- return !!result; // Convert TelemetryService to boolean
106
+ return !!result;
105
107
  }
106
- return Promise.resolve(false);
107
- }
108
- export function getTelemetryStatus() {
109
- if (process.env.NEUROLINK_TELEMETRY_ENABLED === "true") {
110
- return { enabled: true, initialized: false };
108
+ catch {
109
+ return false;
111
110
  }
112
- return { enabled: false, initialized: false };
111
+ }
112
+ export async function getTelemetryStatus() {
113
+ return getStatus();
113
114
  }
114
115
  /**
115
116
  * BACKWARD COMPATIBILITY: Legacy generateText function
@@ -19,6 +19,17 @@ import { ConversationMemoryManager } from "./core/conversationMemoryManager.js";
19
19
  import { RedisConversationMemoryManager } from "./core/redisConversationMemoryManager.js";
20
20
  import type { HITLConfig } from "./hitl/types.js";
21
21
  import type { ExternalMCPServerInstance, ExternalMCPOperationResult, ExternalMCPToolInfo } from "./types/externalMcp.js";
22
+ import type { ObservabilityConfig } from "./types/observability.js";
23
+ /**
24
+ * Configuration object for NeuroLink constructor.
25
+ */
26
+ export interface NeurolinkConstructorConfig {
27
+ conversationMemory?: Partial<ConversationMemoryConfig>;
28
+ enableOrchestration?: boolean;
29
+ hitl?: HITLConfig;
30
+ toolRegistry?: MCPToolRegistry;
31
+ observability?: ObservabilityConfig;
32
+ }
22
33
  export interface ProviderStatus {
23
34
  provider: string;
24
35
  status: "working" | "failed" | "not-configured";
@@ -138,12 +149,8 @@ export declare class NeuroLink {
138
149
  * @throws {Error} When external server manager initialization fails
139
150
  * @throws {Error} When HITL configuration is invalid (if enabled)
140
151
  */
141
- constructor(config?: {
142
- conversationMemory?: Partial<ConversationMemoryConfig>;
143
- enableOrchestration?: boolean;
144
- hitl?: HITLConfig;
145
- toolRegistry?: MCPToolRegistry;
146
- });
152
+ private observabilityConfig?;
153
+ constructor(config?: NeurolinkConstructorConfig);
147
154
  /**
148
155
  * Initialize provider registry with security settings
149
156
  */
@@ -170,6 +177,10 @@ export declare class NeuroLink {
170
177
  * Setup event handlers for external server manager
171
178
  */
172
179
  private setupExternalServerEventHandlers;
180
+ /**
181
+ * Initialize Langfuse observability for AI operations tracking
182
+ */
183
+ private initializeLangfuse;
173
184
  /**
174
185
  * Log constructor completion with final state summary
175
186
  */
@@ -279,6 +290,24 @@ export declare class NeuroLink {
279
290
  * @throws {Error} When all providers fail to generate content
280
291
  * @throws {Error} When conversation memory operations fail (if enabled)
281
292
  */
293
+ /**
294
+ * Get observability configuration
295
+ */
296
+ getObservabilityConfig(): ObservabilityConfig | undefined;
297
+ /**
298
+ * Check if Langfuse telemetry is enabled
299
+ * Centralized utility to avoid duplication across providers
300
+ */
301
+ isTelemetryEnabled(): boolean;
302
+ /**
303
+ * Public method to initialize Langfuse observability
304
+ * This method can be called externally to ensure Langfuse is properly initialized
305
+ */
306
+ initializeLangfuseObservability(): Promise<void>;
307
+ /**
308
+ * Gracefully shutdown NeuroLink and all MCP connections
309
+ */
310
+ shutdown(): Promise<void>;
282
311
  generate(optionsOrPrompt: GenerateOptions | string): Promise<GenerateResult>;
283
312
  /**
284
313
  * BACKWARD COMPATIBILITY: Legacy generateText method
@@ -39,6 +39,7 @@ import { directToolsServer } from "./mcp/servers/agent/directToolsServer.js";
39
39
  // Import orchestration components
40
40
  import { ModelRouter } from "./utils/modelRouter.js";
41
41
  import { BinaryTaskClassifier } from "./utils/taskClassifier.js";
42
+ import { initializeOpenTelemetry, shutdownOpenTelemetry, flushOpenTelemetry, getLangfuseHealthStatus, } from "./services/server/ai/observability/instrumentation.js";
42
43
  import { isNonNullObject } from "./utils/typeUtils.js";
43
44
  import { isZodSchema } from "./utils/schemaConversion.js";
44
45
  // Core types imported from "./types/index.js"
@@ -178,8 +179,10 @@ export class NeuroLink {
178
179
  * @throws {Error} When external server manager initialization fails
179
180
  * @throws {Error} When HITL configuration is invalid (if enabled)
180
181
  */
182
+ observabilityConfig;
181
183
  constructor(config) {
182
184
  this.toolRegistry = config?.toolRegistry || new MCPToolRegistry();
185
+ this.observabilityConfig = config?.observability;
183
186
  // Initialize orchestration setting
184
187
  this.enableOrchestration = config?.enableOrchestration ?? false;
185
188
  // Read tool cache duration from environment variables, with a default
@@ -194,6 +197,7 @@ export class NeuroLink {
194
197
  this.initializeConversationMemory(config, constructorId, constructorStartTime, constructorHrTimeStart);
195
198
  this.initializeExternalServerManager(constructorId, constructorStartTime, constructorHrTimeStart);
196
199
  this.initializeHITL(config, constructorId, constructorStartTime, constructorHrTimeStart);
200
+ this.initializeLangfuse(constructorId, constructorStartTime, constructorHrTimeStart);
197
201
  this.logConstructorComplete(constructorId, constructorStartTime, constructorHrTimeStart);
198
202
  }
199
203
  /**
@@ -494,6 +498,81 @@ export class NeuroLink {
494
498
  this.unregisterExternalMCPToolFromRegistry(event.toolName);
495
499
  });
496
500
  }
501
+ /**
502
+ * Initialize Langfuse observability for AI operations tracking
503
+ */
504
+ initializeLangfuse(constructorId, constructorStartTime, constructorHrTimeStart) {
505
+ const langfuseInitStartTime = process.hrtime.bigint();
506
+ try {
507
+ const langfuseConfig = this.observabilityConfig?.langfuse;
508
+ if (langfuseConfig?.enabled) {
509
+ logger.debug(`[NeuroLink] 📊 LOG_POINT_C019_LANGFUSE_INIT_START`, {
510
+ logPoint: "C019_LANGFUSE_INIT_START",
511
+ constructorId,
512
+ timestamp: new Date().toISOString(),
513
+ elapsedMs: Date.now() - constructorStartTime,
514
+ elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
515
+ langfuseInitStartTimeNs: langfuseInitStartTime.toString(),
516
+ message: "Starting Langfuse observability initialization",
517
+ });
518
+ // Initialize OpenTelemetry FIRST (required for Langfuse v4)
519
+ initializeOpenTelemetry(langfuseConfig);
520
+ const healthStatus = getLangfuseHealthStatus();
521
+ const langfuseInitDurationNs = process.hrtime.bigint() - langfuseInitStartTime;
522
+ if (healthStatus.initialized &&
523
+ healthStatus.hasProcessor &&
524
+ healthStatus.isHealthy) {
525
+ logger.debug(`[NeuroLink] ✅ LOG_POINT_C020_LANGFUSE_INIT_SUCCESS`, {
526
+ logPoint: "C020_LANGFUSE_INIT_SUCCESS",
527
+ constructorId,
528
+ timestamp: new Date().toISOString(),
529
+ elapsedMs: Date.now() - constructorStartTime,
530
+ elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
531
+ langfuseInitDurationNs: langfuseInitDurationNs.toString(),
532
+ langfuseInitDurationMs: Number(langfuseInitDurationNs) / 1_000_000,
533
+ healthStatus,
534
+ message: "Langfuse observability initialized successfully",
535
+ });
536
+ }
537
+ else {
538
+ logger.warn(`[NeuroLink] ⚠️ LOG_POINT_C021_LANGFUSE_INIT_WARNING`, {
539
+ logPoint: "C021_LANGFUSE_INIT_WARNING",
540
+ constructorId,
541
+ timestamp: new Date().toISOString(),
542
+ elapsedMs: Date.now() - constructorStartTime,
543
+ elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
544
+ langfuseInitDurationNs: langfuseInitDurationNs.toString(),
545
+ healthStatus,
546
+ message: "Langfuse initialized but not healthy",
547
+ });
548
+ }
549
+ }
550
+ else {
551
+ logger.debug(`[NeuroLink] 🚫 LOG_POINT_C022_LANGFUSE_DISABLED`, {
552
+ logPoint: "C022_LANGFUSE_DISABLED",
553
+ constructorId,
554
+ timestamp: new Date().toISOString(),
555
+ elapsedMs: Date.now() - constructorStartTime,
556
+ elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
557
+ message: "Langfuse observability not enabled - skipping initialization",
558
+ });
559
+ }
560
+ }
561
+ catch (error) {
562
+ const langfuseInitErrorDurationNs = process.hrtime.bigint() - langfuseInitStartTime;
563
+ logger.error(`[NeuroLink] ❌ LOG_POINT_C023_LANGFUSE_INIT_ERROR`, {
564
+ logPoint: "C023_LANGFUSE_INIT_ERROR",
565
+ constructorId,
566
+ timestamp: new Date().toISOString(),
567
+ elapsedMs: Date.now() - constructorStartTime,
568
+ elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
569
+ langfuseInitDurationNs: langfuseInitErrorDurationNs.toString(),
570
+ errorMessage: error instanceof Error ? error.message : String(error),
571
+ errorStack: error instanceof Error ? error.stack : undefined,
572
+ message: "Langfuse observability initialization failed",
573
+ });
574
+ }
575
+ }
497
576
  /**
498
577
  * Log constructor completion with final state summary
499
578
  */
@@ -992,6 +1071,68 @@ export class NeuroLink {
992
1071
  * @throws {Error} When all providers fail to generate content
993
1072
  * @throws {Error} When conversation memory operations fail (if enabled)
994
1073
  */
1074
+ /**
1075
+ * Get observability configuration
1076
+ */
1077
+ getObservabilityConfig() {
1078
+ return this.observabilityConfig;
1079
+ }
1080
+ /**
1081
+ * Check if Langfuse telemetry is enabled
1082
+ * Centralized utility to avoid duplication across providers
1083
+ */
1084
+ isTelemetryEnabled() {
1085
+ return this.observabilityConfig?.langfuse?.enabled || false;
1086
+ }
1087
+ /**
1088
+ * Public method to initialize Langfuse observability
1089
+ * This method can be called externally to ensure Langfuse is properly initialized
1090
+ */
1091
+ async initializeLangfuseObservability() {
1092
+ try {
1093
+ const langfuseConfig = this.observabilityConfig?.langfuse;
1094
+ if (langfuseConfig?.enabled) {
1095
+ initializeOpenTelemetry(langfuseConfig);
1096
+ logger.debug("[NeuroLink] Langfuse observability initialized via public method");
1097
+ }
1098
+ else {
1099
+ logger.debug("[NeuroLink] Langfuse not enabled, skipping initialization");
1100
+ }
1101
+ }
1102
+ catch (error) {
1103
+ logger.warn("[NeuroLink] Failed to initialize Langfuse observability:", error);
1104
+ }
1105
+ }
1106
+ /**
1107
+ * Gracefully shutdown NeuroLink and all MCP connections
1108
+ */
1109
+ async shutdown() {
1110
+ try {
1111
+ logger.debug("[NeuroLink] Starting graceful shutdown");
1112
+ try {
1113
+ await flushOpenTelemetry();
1114
+ await shutdownOpenTelemetry();
1115
+ logger.debug("[NeuroLink] OpenTelemetry shutdown completed");
1116
+ }
1117
+ catch (error) {
1118
+ logger.warn("[NeuroLink] OpenTelemetry shutdown failed:", error);
1119
+ }
1120
+ if (this.externalServerManager) {
1121
+ try {
1122
+ await this.externalServerManager.shutdown();
1123
+ logger.debug("[NeuroLink] MCP servers shutdown completed");
1124
+ }
1125
+ catch (error) {
1126
+ logger.warn("[NeuroLink] MCP servers shutdown failed:", error);
1127
+ }
1128
+ }
1129
+ logger.debug("[NeuroLink] Graceful shutdown completed");
1130
+ }
1131
+ catch (error) {
1132
+ logger.error("[NeuroLink] Shutdown failed:", error);
1133
+ throw error;
1134
+ }
1135
+ }
995
1136
  async generate(optionsOrPrompt) {
996
1137
  const originalPrompt = this._extractOriginalPrompt(optionsOrPrompt);
997
1138
  // Convert string prompt to full options