@juspay/neurolink 7.49.0 → 7.50.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/CHANGELOG.md +6 -0
  2. package/dist/cli/factories/commandFactory.d.ts +4 -0
  3. package/dist/cli/factories/commandFactory.js +19 -0
  4. package/dist/cli/index.js +13 -2
  5. package/dist/core/baseProvider.d.ts +9 -0
  6. package/dist/core/baseProvider.js +36 -3
  7. package/dist/index.d.ts +8 -2
  8. package/dist/index.js +11 -10
  9. package/dist/lib/core/baseProvider.d.ts +9 -0
  10. package/dist/lib/core/baseProvider.js +36 -3
  11. package/dist/lib/index.d.ts +8 -2
  12. package/dist/lib/index.js +11 -10
  13. package/dist/lib/neurolink.d.ts +35 -6
  14. package/dist/lib/neurolink.js +141 -0
  15. package/dist/lib/providers/anthropic.js +1 -0
  16. package/dist/lib/providers/azureOpenai.js +1 -0
  17. package/dist/lib/providers/googleAiStudio.js +1 -0
  18. package/dist/lib/providers/googleVertex.js +1 -0
  19. package/dist/lib/providers/openAI.js +1 -0
  20. package/dist/lib/services/server/ai/observability/instrumentation.d.ts +57 -0
  21. package/dist/lib/services/server/ai/observability/instrumentation.js +170 -0
  22. package/dist/lib/session/globalSessionState.js +37 -1
  23. package/dist/lib/telemetry/index.d.ts +1 -0
  24. package/dist/lib/telemetry/telemetryService.d.ts +2 -0
  25. package/dist/lib/telemetry/telemetryService.js +7 -7
  26. package/dist/lib/types/conversation.d.ts +2 -0
  27. package/dist/lib/types/modelTypes.d.ts +6 -6
  28. package/dist/lib/types/observability.d.ts +49 -0
  29. package/dist/lib/types/observability.js +6 -0
  30. package/dist/neurolink.d.ts +35 -6
  31. package/dist/neurolink.js +141 -0
  32. package/dist/providers/anthropic.js +1 -0
  33. package/dist/providers/azureOpenai.js +1 -0
  34. package/dist/providers/googleAiStudio.js +1 -0
  35. package/dist/providers/googleVertex.js +1 -0
  36. package/dist/providers/openAI.js +1 -0
  37. package/dist/services/server/ai/observability/instrumentation.d.ts +57 -0
  38. package/dist/services/server/ai/observability/instrumentation.js +170 -0
  39. package/dist/session/globalSessionState.js +37 -1
  40. package/dist/telemetry/index.d.ts +1 -0
  41. package/dist/telemetry/telemetryService.d.ts +2 -0
  42. package/dist/telemetry/telemetryService.js +7 -7
  43. package/dist/types/conversation.d.ts +2 -0
  44. package/dist/types/observability.d.ts +49 -0
  45. package/dist/types/observability.js +6 -0
  46. package/package.json +10 -14
@@ -147,6 +147,7 @@ export class AnthropicProvider extends BaseProvider {
147
147
  maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
148
148
  toolChoice: shouldUseTools ? "auto" : "none",
149
149
  abortSignal: timeoutController?.controller.signal,
150
+ experimental_telemetry: this.getStreamTelemetryConfig(options),
150
151
  onStepFinish: ({ toolCalls, toolResults }) => {
151
152
  this.handleToolExecutionStorage(toolCalls, toolResults, options, new Date()).catch((error) => {
152
153
  logger.warn("[AnthropicProvider] Failed to store tool executions", {
@@ -162,6 +162,7 @@ export class AzureOpenAIProvider extends BaseProvider {
162
162
  : {}),
163
163
  tools,
164
164
  toolChoice: shouldUseTools ? "auto" : "none",
165
+ experimental_telemetry: this.getStreamTelemetryConfig(options),
165
166
  onStepFinish: ({ toolCalls, toolResults }) => {
166
167
  this.handleToolExecutionStorage(toolCalls, toolResults, options, new Date()).catch((error) => {
167
168
  logger.warn("[AzureOpenaiProvider] Failed to store tool executions", {
@@ -141,6 +141,7 @@ export class GoogleAIStudioProvider extends BaseProvider {
141
141
  maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
142
142
  toolChoice: shouldUseTools ? "auto" : "none",
143
143
  abortSignal: timeoutController?.controller.signal,
144
+ experimental_telemetry: this.getStreamTelemetryConfig(options),
144
145
  onStepFinish: ({ toolCalls, toolResults }) => {
145
146
  this.handleToolExecutionStorage(toolCalls, toolResults, options, new Date()).catch((error) => {
146
147
  logger.warn("[GoogleAiStudioProvider] Failed to store tool executions", {
@@ -666,6 +666,7 @@ export class GoogleVertexProvider extends BaseProvider {
666
666
  maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
667
667
  }),
668
668
  abortSignal: timeoutController?.controller.signal,
669
+ experimental_telemetry: this.getStreamTelemetryConfig(options),
669
670
  onError: (event) => {
670
671
  const error = event.error;
671
672
  const errorMessage = error instanceof Error ? error.message : String(error);
@@ -316,6 +316,7 @@ export class OpenAIProvider extends BaseProvider {
316
316
  maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
317
317
  toolChoice: shouldUseTools && Object.keys(tools).length > 0 ? "auto" : "none",
318
318
  abortSignal: timeoutController?.controller.signal,
319
+ experimental_telemetry: this.getStreamTelemetryConfig(options),
319
320
  onStepFinish: ({ toolCalls, toolResults }) => {
320
321
  logger.info("Tool execution completed", { toolResults, toolCalls });
321
322
  // Handle tool execution storage
@@ -0,0 +1,57 @@
1
+ /**
2
+ * OpenTelemetry Instrumentation for Langfuse v4
3
+ *
4
+ * Configures OpenTelemetry TracerProvider with LangfuseSpanProcessor to capture
5
+ * traces from Vercel AI SDK's experimental_telemetry feature.
6
+ *
7
+ * Flow: Vercel AI SDK → OpenTelemetry Spans → LangfuseSpanProcessor → Langfuse Platform
8
+ */
9
+ import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node";
10
+ import { LangfuseSpanProcessor } from "@langfuse/otel";
11
+ import type { LangfuseConfig } from "../../../../types/observability.js";
12
+ /**
13
+ * Initialize OpenTelemetry with Langfuse span processor
14
+ *
15
+ * This connects Vercel AI SDK's experimental_telemetry to Langfuse by:
16
+ * 1. Creating LangfuseSpanProcessor with Langfuse credentials
17
+ * 2. Creating a NodeTracerProvider with service metadata and span processor
18
+ * 3. Registering the provider globally for AI SDK to use
19
+ *
20
+ * @param config - Langfuse configuration passed from parent application
21
+ */
22
+ export declare function initializeOpenTelemetry(config: LangfuseConfig): void;
23
+ /**
24
+ * Flush all pending spans to Langfuse
25
+ */
26
+ export declare function flushOpenTelemetry(): Promise<void>;
27
+ /**
28
+ * Shutdown OpenTelemetry and Langfuse span processor
29
+ */
30
+ export declare function shutdownOpenTelemetry(): Promise<void>;
31
+ /**
32
+ * Get the Langfuse span processor
33
+ */
34
+ export declare function getLangfuseSpanProcessor(): LangfuseSpanProcessor | null;
35
+ /**
36
+ * Get the tracer provider
37
+ */
38
+ export declare function getTracerProvider(): NodeTracerProvider | null;
39
+ /**
40
+ * Check if OpenTelemetry is initialized
41
+ */
42
+ export declare function isOpenTelemetryInitialized(): boolean;
43
+ /**
44
+ * Get health status for Langfuse observability
45
+ */
46
+ export declare function getLangfuseHealthStatus(): {
47
+ isHealthy: boolean | undefined;
48
+ initialized: boolean;
49
+ credentialsValid: boolean;
50
+ enabled: boolean;
51
+ hasProcessor: boolean;
52
+ config: {
53
+ baseUrl: string;
54
+ environment: string;
55
+ release: string;
56
+ } | undefined;
57
+ };
@@ -0,0 +1,170 @@
1
+ /**
2
+ * OpenTelemetry Instrumentation for Langfuse v4
3
+ *
4
+ * Configures OpenTelemetry TracerProvider with LangfuseSpanProcessor to capture
5
+ * traces from Vercel AI SDK's experimental_telemetry feature.
6
+ *
7
+ * Flow: Vercel AI SDK → OpenTelemetry Spans → LangfuseSpanProcessor → Langfuse Platform
8
+ */
9
+ import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node";
10
+ import { LangfuseSpanProcessor } from "@langfuse/otel";
11
+ import { ATTR_SERVICE_NAME, ATTR_SERVICE_VERSION, } from "@opentelemetry/semantic-conventions";
12
+ import { resourceFromAttributes } from "@opentelemetry/resources";
13
+ import { logger } from "../../../../utils/logger.js";
14
+ const LOG_PREFIX = "[OpenTelemetry]";
15
+ let tracerProvider = null;
16
+ let langfuseProcessor = null;
17
+ let isInitialized = false;
18
+ let isCredentialsValid = false;
19
+ let currentConfig = null;
20
+ /**
21
+ * Initialize OpenTelemetry with Langfuse span processor
22
+ *
23
+ * This connects Vercel AI SDK's experimental_telemetry to Langfuse by:
24
+ * 1. Creating LangfuseSpanProcessor with Langfuse credentials
25
+ * 2. Creating a NodeTracerProvider with service metadata and span processor
26
+ * 3. Registering the provider globally for AI SDK to use
27
+ *
28
+ * @param config - Langfuse configuration passed from parent application
29
+ */
30
+ export function initializeOpenTelemetry(config) {
31
+ if (isInitialized) {
32
+ logger.debug(`${LOG_PREFIX} Already initialized`);
33
+ return;
34
+ }
35
+ if (!config.enabled) {
36
+ logger.debug(`${LOG_PREFIX} Langfuse disabled, skipping initialization`);
37
+ isInitialized = true;
38
+ return;
39
+ }
40
+ if (!config.publicKey || !config.secretKey) {
41
+ logger.warn(`${LOG_PREFIX} Langfuse enabled but missing credentials, skipping initialization`);
42
+ isInitialized = true;
43
+ isCredentialsValid = false;
44
+ return;
45
+ }
46
+ try {
47
+ currentConfig = config;
48
+ isCredentialsValid = true;
49
+ // Create Langfuse span processor with configuration
50
+ langfuseProcessor = new LangfuseSpanProcessor({
51
+ publicKey: config.publicKey,
52
+ secretKey: config.secretKey,
53
+ baseUrl: config.baseUrl || "https://cloud.langfuse.com",
54
+ environment: config.environment || "dev",
55
+ release: config.release || "v1.0.0",
56
+ });
57
+ // Create resource with service metadata (v2.x API)
58
+ const resource = resourceFromAttributes({
59
+ [ATTR_SERVICE_NAME]: "neurolink",
60
+ [ATTR_SERVICE_VERSION]: config.release || "v1.0.0",
61
+ "deployment.environment": config.environment || "dev",
62
+ });
63
+ // Initialize tracer provider with span processor and resource
64
+ tracerProvider = new NodeTracerProvider({
65
+ resource,
66
+ spanProcessors: [langfuseProcessor],
67
+ });
68
+ // Register provider globally so Vercel AI SDK can use it
69
+ tracerProvider.register();
70
+ isInitialized = true;
71
+ logger.info(`${LOG_PREFIX} Initialized with Langfuse span processor`, {
72
+ baseUrl: config.baseUrl || "https://cloud.langfuse.com",
73
+ environment: config.environment || "dev",
74
+ release: config.release || "v1.0.0",
75
+ });
76
+ }
77
+ catch (error) {
78
+ logger.error(`${LOG_PREFIX} Initialization failed`, {
79
+ error: error instanceof Error ? error.message : String(error),
80
+ stack: error instanceof Error ? error.stack : undefined,
81
+ });
82
+ throw error;
83
+ }
84
+ }
85
+ /**
86
+ * Flush all pending spans to Langfuse
87
+ */
88
+ export async function flushOpenTelemetry() {
89
+ if (!isInitialized) {
90
+ logger.debug(`${LOG_PREFIX} Not initialized, skipping flush`);
91
+ return;
92
+ }
93
+ if (!langfuseProcessor) {
94
+ logger.debug(`${LOG_PREFIX} No processor to flush (Langfuse disabled)`);
95
+ return;
96
+ }
97
+ try {
98
+ logger.info(`${LOG_PREFIX} Flushing pending spans to Langfuse...`);
99
+ await langfuseProcessor.forceFlush();
100
+ logger.info(`${LOG_PREFIX} Successfully flushed spans to Langfuse`);
101
+ }
102
+ catch (error) {
103
+ logger.error(`${LOG_PREFIX} Flush failed`, {
104
+ error: error instanceof Error ? error.message : String(error),
105
+ stack: error instanceof Error ? error.stack : undefined,
106
+ });
107
+ throw error;
108
+ }
109
+ }
110
+ /**
111
+ * Shutdown OpenTelemetry and Langfuse span processor
112
+ */
113
+ export async function shutdownOpenTelemetry() {
114
+ if (!isInitialized || !tracerProvider) {
115
+ return;
116
+ }
117
+ try {
118
+ await tracerProvider.shutdown();
119
+ tracerProvider = null;
120
+ langfuseProcessor = null;
121
+ isInitialized = false;
122
+ isCredentialsValid = false;
123
+ logger.debug(`${LOG_PREFIX} Shutdown complete`);
124
+ }
125
+ catch (error) {
126
+ logger.error(`${LOG_PREFIX} Shutdown failed`, {
127
+ error: error instanceof Error ? error.message : String(error),
128
+ });
129
+ }
130
+ }
131
+ /**
132
+ * Get the Langfuse span processor
133
+ */
134
+ export function getLangfuseSpanProcessor() {
135
+ return langfuseProcessor;
136
+ }
137
+ /**
138
+ * Get the tracer provider
139
+ */
140
+ export function getTracerProvider() {
141
+ return tracerProvider;
142
+ }
143
+ /**
144
+ * Check if OpenTelemetry is initialized
145
+ */
146
+ export function isOpenTelemetryInitialized() {
147
+ return isInitialized;
148
+ }
149
+ /**
150
+ * Get health status for Langfuse observability
151
+ */
152
+ export function getLangfuseHealthStatus() {
153
+ return {
154
+ isHealthy: currentConfig?.enabled &&
155
+ isInitialized &&
156
+ isCredentialsValid &&
157
+ langfuseProcessor !== null,
158
+ initialized: isInitialized,
159
+ credentialsValid: isCredentialsValid,
160
+ enabled: currentConfig?.enabled || false,
161
+ hasProcessor: langfuseProcessor !== null,
162
+ config: currentConfig
163
+ ? {
164
+ baseUrl: currentConfig.baseUrl || "https://cloud.langfuse.com",
165
+ environment: currentConfig.environment || "dev",
166
+ release: currentConfig.release || "v1.0.0",
167
+ }
168
+ : undefined,
169
+ };
170
+ }
@@ -1,5 +1,31 @@
1
1
  import { nanoid } from "nanoid";
2
2
  import { NeuroLink } from "../neurolink.js";
3
+ /**
4
+ * Build observability config from environment variables
5
+ * Used by CLI to configure NeuroLink instances
6
+ */
7
+ function buildObservabilityConfigFromEnv() {
8
+ const langfuseEnabled = process.env.LANGFUSE_ENABLED?.trim().toLowerCase() === "true";
9
+ const publicKey = process.env.LANGFUSE_PUBLIC_KEY?.trim();
10
+ const secretKey = process.env.LANGFUSE_SECRET_KEY?.trim();
11
+ if (!langfuseEnabled || !publicKey || !secretKey) {
12
+ return undefined;
13
+ }
14
+ return {
15
+ langfuse: {
16
+ enabled: langfuseEnabled,
17
+ publicKey,
18
+ secretKey,
19
+ baseUrl: process.env.LANGFUSE_BASE_URL?.trim() || "https://cloud.langfuse.com",
20
+ environment: process.env.LANGFUSE_ENVIRONMENT?.trim() ||
21
+ process.env.PUBLIC_APP_ENVIRONMENT?.trim() ||
22
+ "dev",
23
+ release: process.env.PUBLIC_APP_VERSION?.trim() ||
24
+ process.env.npm_package_version?.trim() ||
25
+ "v1.0.0",
26
+ },
27
+ };
28
+ }
3
29
  export class GlobalSessionManager {
4
30
  static instance;
5
31
  loopSession = null;
@@ -19,6 +45,11 @@ export class GlobalSessionManager {
19
45
  maxTurnsPerSession: config.maxTurnsPerSession,
20
46
  };
21
47
  }
48
+ // Add observability config from environment variables (CLI usage)
49
+ const observabilityConfig = buildObservabilityConfigFromEnv();
50
+ if (observabilityConfig) {
51
+ neurolinkOptions.observability = observabilityConfig;
52
+ }
22
53
  this.loopSession = {
23
54
  neurolinkInstance: new NeuroLink(neurolinkOptions),
24
55
  sessionId,
@@ -88,7 +119,12 @@ export class GlobalSessionManager {
88
119
  }
89
120
  getOrCreateNeuroLink() {
90
121
  const session = this.getLoopSession();
91
- return session ? session.neurolinkInstance : new NeuroLink();
122
+ if (session) {
123
+ return session.neurolinkInstance;
124
+ }
125
+ // Create new NeuroLink with observability config from environment (CLI usage)
126
+ const observabilityConfig = buildObservabilityConfigFromEnv();
127
+ return new NeuroLink(observabilityConfig ? { observability: observabilityConfig } : undefined);
92
128
  }
93
129
  getCurrentSessionId() {
94
130
  return this.getLoopSession()?.sessionId;
@@ -9,6 +9,7 @@ export declare function initializeTelemetry(): Promise<import("./telemetryServic
9
9
  */
10
10
  export declare function getTelemetryStatus(): Promise<{
11
11
  enabled: boolean;
12
+ initialized: boolean;
12
13
  endpoint?: string;
13
14
  service?: string;
14
15
  version?: string;
@@ -10,6 +10,7 @@ export declare class TelemetryService {
10
10
  private static instance;
11
11
  private sdk?;
12
12
  private enabled;
13
+ private initialized;
13
14
  private meter?;
14
15
  private tracer?;
15
16
  private aiRequestCounter?;
@@ -43,6 +44,7 @@ export declare class TelemetryService {
43
44
  isEnabled(): boolean;
44
45
  getStatus(): {
45
46
  enabled: boolean;
47
+ initialized: boolean;
46
48
  endpoint?: string;
47
49
  service?: string;
48
50
  version?: string;
@@ -1,14 +1,14 @@
1
1
  import { NodeSDK } from "@opentelemetry/sdk-node";
2
2
  import { metrics, trace, } from "@opentelemetry/api";
3
3
  import { getNodeAutoInstrumentations } from "@opentelemetry/auto-instrumentations-node";
4
- import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-http";
5
- import { Resource } from "@opentelemetry/resources";
4
+ import { resourceFromAttributes } from "@opentelemetry/resources";
6
5
  import { ATTR_SERVICE_NAME, ATTR_SERVICE_VERSION, } from "@opentelemetry/semantic-conventions";
7
6
  import { logger } from "../utils/logger.js";
8
7
  export class TelemetryService {
9
8
  static instance;
10
9
  sdk;
11
10
  enabled = false;
11
+ initialized = false;
12
12
  meter;
13
13
  tracer;
14
14
  // Optional Metrics (only created when enabled)
@@ -47,16 +47,12 @@ export class TelemetryService {
47
47
  }
48
48
  initializeTelemetry() {
49
49
  try {
50
- const resource = new Resource({
50
+ const resource = resourceFromAttributes({
51
51
  [ATTR_SERVICE_NAME]: process.env.OTEL_SERVICE_NAME || "neurolink-ai",
52
52
  [ATTR_SERVICE_VERSION]: process.env.OTEL_SERVICE_VERSION || "3.0.1",
53
53
  });
54
54
  this.sdk = new NodeSDK({
55
55
  resource,
56
- traceExporter: new OTLPTraceExporter({
57
- url: process.env.OTEL_EXPORTER_OTLP_TRACES_ENDPOINT ||
58
- `${process.env.OTEL_EXPORTER_OTLP_ENDPOINT}/v1/traces`,
59
- }),
60
56
  // Note: Metric reader configured separately
61
57
  instrumentations: [getNodeAutoInstrumentations()],
62
58
  });
@@ -102,11 +98,13 @@ export class TelemetryService {
102
98
  }
103
99
  try {
104
100
  await this.sdk?.start();
101
+ this.initialized = true;
105
102
  logger.debug("[Telemetry] SDK started successfully");
106
103
  }
107
104
  catch (error) {
108
105
  logger.error("[Telemetry] Failed to start SDK:", error);
109
106
  this.enabled = false;
107
+ this.initialized = false;
110
108
  }
111
109
  }
112
110
  // AI Operation Tracing (NO-OP when disabled)
@@ -250,6 +248,7 @@ export class TelemetryService {
250
248
  getStatus() {
251
249
  return {
252
250
  enabled: this.enabled,
251
+ initialized: this.initialized,
253
252
  endpoint: process.env.OTEL_EXPORTER_OTLP_ENDPOINT,
254
253
  service: process.env.OTEL_SERVICE_NAME || "neurolink-ai",
255
254
  version: process.env.OTEL_SERVICE_VERSION || "3.0.1",
@@ -285,6 +284,7 @@ export class TelemetryService {
285
284
  if (this.enabled && this.sdk) {
286
285
  try {
287
286
  await this.sdk.shutdown();
287
+ this.initialized = false;
288
288
  logger.debug("[Telemetry] SDK shutdown completed");
289
289
  }
290
290
  catch (error) {
@@ -157,6 +157,8 @@ export interface NeurolinkOptions {
157
157
  conversationMemory?: ConversationMemoryConfig;
158
158
  /** Session identifier for conversation context */
159
159
  sessionId?: string;
160
+ /** Observability configuration */
161
+ observability?: import("./observability.js").ObservabilityConfig;
160
162
  }
161
163
  /**
162
164
  * Session identifier for Redis storage operations
@@ -87,8 +87,8 @@ export declare const ModelConfigSchema: z.ZodObject<{
87
87
  releaseDate: z.ZodString;
88
88
  }, "strip", z.ZodTypeAny, {
89
89
  id: string;
90
- capabilities: string[];
91
90
  displayName: string;
91
+ capabilities: string[];
92
92
  deprecated: boolean;
93
93
  pricing: {
94
94
  input: number;
@@ -98,8 +98,8 @@ export declare const ModelConfigSchema: z.ZodObject<{
98
98
  releaseDate: string;
99
99
  }, {
100
100
  id: string;
101
- capabilities: string[];
102
101
  displayName: string;
102
+ capabilities: string[];
103
103
  deprecated: boolean;
104
104
  pricing: {
105
105
  input: number;
@@ -133,8 +133,8 @@ export declare const ModelRegistrySchema: z.ZodObject<{
133
133
  releaseDate: z.ZodString;
134
134
  }, "strip", z.ZodTypeAny, {
135
135
  id: string;
136
- capabilities: string[];
137
136
  displayName: string;
137
+ capabilities: string[];
138
138
  deprecated: boolean;
139
139
  pricing: {
140
140
  input: number;
@@ -144,8 +144,8 @@ export declare const ModelRegistrySchema: z.ZodObject<{
144
144
  releaseDate: string;
145
145
  }, {
146
146
  id: string;
147
- capabilities: string[];
148
147
  displayName: string;
148
+ capabilities: string[];
149
149
  deprecated: boolean;
150
150
  pricing: {
151
151
  input: number;
@@ -160,8 +160,8 @@ export declare const ModelRegistrySchema: z.ZodObject<{
160
160
  version: string;
161
161
  models: Record<string, Record<string, {
162
162
  id: string;
163
- capabilities: string[];
164
163
  displayName: string;
164
+ capabilities: string[];
165
165
  deprecated: boolean;
166
166
  pricing: {
167
167
  input: number;
@@ -177,8 +177,8 @@ export declare const ModelRegistrySchema: z.ZodObject<{
177
177
  version: string;
178
178
  models: Record<string, Record<string, {
179
179
  id: string;
180
- capabilities: string[];
181
180
  displayName: string;
181
+ capabilities: string[];
182
182
  deprecated: boolean;
183
183
  pricing: {
184
184
  input: number;
@@ -0,0 +1,49 @@
1
+ /**
2
+ * Observability Configuration Types
3
+ * These configs are passed from the parent application (e.g., Lighthouse)
4
+ * to enable telemetry and observability features in Neurolink SDK
5
+ */
6
+ /**
7
+ * Langfuse observability configuration
8
+ */
9
+ export interface LangfuseConfig {
10
+ /** Whether Langfuse is enabled */
11
+ enabled: boolean;
12
+ /** Langfuse public key */
13
+ publicKey: string;
14
+ /**
15
+ * Langfuse secret key
16
+ * @sensitive
17
+ * WARNING: This is a sensitive credential. Handle securely.
18
+ * Do NOT log, expose, or share this key. Follow best practices for secret management.
19
+ */
20
+ secretKey: string;
21
+ /** Langfuse base URL (default: https://cloud.langfuse.com) */
22
+ baseUrl?: string;
23
+ /** Environment name (e.g., dev, staging, prod) */
24
+ environment?: string;
25
+ /** Release/version identifier */
26
+ release?: string;
27
+ }
28
+ /**
29
+ * OpenTelemetry configuration
30
+ */
31
+ export interface OpenTelemetryConfig {
32
+ /** Whether OpenTelemetry is enabled */
33
+ enabled: boolean;
34
+ /** OTLP endpoint URL */
35
+ endpoint?: string;
36
+ /** Service name for traces */
37
+ serviceName?: string;
38
+ /** Service version */
39
+ serviceVersion?: string;
40
+ }
41
+ /**
42
+ * Complete observability configuration for Neurolink SDK
43
+ */
44
+ export interface ObservabilityConfig {
45
+ /** Langfuse configuration */
46
+ langfuse?: LangfuseConfig;
47
+ /** OpenTelemetry configuration */
48
+ openTelemetry?: OpenTelemetryConfig;
49
+ }
@@ -0,0 +1,6 @@
1
+ /**
2
+ * Observability Configuration Types
3
+ * These configs are passed from the parent application (e.g., Lighthouse)
4
+ * to enable telemetry and observability features in Neurolink SDK
5
+ */
6
+ export {};
@@ -19,6 +19,17 @@ import { ConversationMemoryManager } from "./core/conversationMemoryManager.js";
19
19
  import { RedisConversationMemoryManager } from "./core/redisConversationMemoryManager.js";
20
20
  import type { HITLConfig } from "./hitl/types.js";
21
21
  import type { ExternalMCPServerInstance, ExternalMCPOperationResult, ExternalMCPToolInfo } from "./types/externalMcp.js";
22
+ import type { ObservabilityConfig } from "./types/observability.js";
23
+ /**
24
+ * Configuration object for NeuroLink constructor.
25
+ */
26
+ export interface NeurolinkConstructorConfig {
27
+ conversationMemory?: Partial<ConversationMemoryConfig>;
28
+ enableOrchestration?: boolean;
29
+ hitl?: HITLConfig;
30
+ toolRegistry?: MCPToolRegistry;
31
+ observability?: ObservabilityConfig;
32
+ }
22
33
  export interface ProviderStatus {
23
34
  provider: string;
24
35
  status: "working" | "failed" | "not-configured";
@@ -138,12 +149,8 @@ export declare class NeuroLink {
138
149
  * @throws {Error} When external server manager initialization fails
139
150
  * @throws {Error} When HITL configuration is invalid (if enabled)
140
151
  */
141
- constructor(config?: {
142
- conversationMemory?: Partial<ConversationMemoryConfig>;
143
- enableOrchestration?: boolean;
144
- hitl?: HITLConfig;
145
- toolRegistry?: MCPToolRegistry;
146
- });
152
+ private observabilityConfig?;
153
+ constructor(config?: NeurolinkConstructorConfig);
147
154
  /**
148
155
  * Initialize provider registry with security settings
149
156
  */
@@ -170,6 +177,10 @@ export declare class NeuroLink {
170
177
  * Setup event handlers for external server manager
171
178
  */
172
179
  private setupExternalServerEventHandlers;
180
+ /**
181
+ * Initialize Langfuse observability for AI operations tracking
182
+ */
183
+ private initializeLangfuse;
173
184
  /**
174
185
  * Log constructor completion with final state summary
175
186
  */
@@ -279,6 +290,24 @@ export declare class NeuroLink {
279
290
  * @throws {Error} When all providers fail to generate content
280
291
  * @throws {Error} When conversation memory operations fail (if enabled)
281
292
  */
293
+ /**
294
+ * Get observability configuration
295
+ */
296
+ getObservabilityConfig(): ObservabilityConfig | undefined;
297
+ /**
298
+ * Check if Langfuse telemetry is enabled
299
+ * Centralized utility to avoid duplication across providers
300
+ */
301
+ isTelemetryEnabled(): boolean;
302
+ /**
303
+ * Public method to initialize Langfuse observability
304
+ * This method can be called externally to ensure Langfuse is properly initialized
305
+ */
306
+ initializeLangfuseObservability(): Promise<void>;
307
+ /**
308
+ * Gracefully shutdown NeuroLink and all MCP connections
309
+ */
310
+ shutdown(): Promise<void>;
282
311
  generate(optionsOrPrompt: GenerateOptions | string): Promise<GenerateResult>;
283
312
  /**
284
313
  * BACKWARD COMPATIBILITY: Legacy generateText method