@juspay/neurolink 7.29.0 → 7.29.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,3 +1,9 @@
1
+ ## [7.29.1](https://github.com/juspay/neurolink/compare/v7.29.0...v7.29.1) (2025-08-28)
2
+
3
+ ### Bug Fixes
4
+
5
+ - **(vertex):** restored support for adc ([238666a](https://github.com/juspay/neurolink/commit/238666ab907fc16945d5de6c5f79637be128f4e6))
6
+
1
7
  ## [7.29.0](https://github.com/juspay/neurolink/compare/v7.28.1...v7.29.0) (2025-08-26)
2
8
 
3
9
  ### Features
@@ -57,6 +57,34 @@ export declare class NeuroLink {
57
57
  */
58
58
  private emitToolEndEvent;
59
59
  private conversationMemory?;
60
+ /**
61
+ * Creates a new NeuroLink instance for AI text generation with MCP tool integration.
62
+ *
63
+ * @param config - Optional configuration object
64
+ * @param config.conversationMemory - Configuration for conversation memory features
65
+ * @param config.conversationMemory.enabled - Whether to enable conversation memory (default: false)
66
+ * @param config.conversationMemory.maxSessions - Maximum number of concurrent sessions (default: 100)
67
+ * @param config.conversationMemory.maxTurnsPerSession - Maximum conversation turns per session (default: 50)
68
+ *
69
+ * @example
70
+ * ```typescript
71
+ * // Basic usage
72
+ * const neurolink = new NeuroLink();
73
+ *
74
+ * // With conversation memory
75
+ * const neurolink = new NeuroLink({
76
+ * conversationMemory: {
77
+ * enabled: true,
78
+ * maxSessions: 50,
79
+ * maxTurnsPerSession: 20
80
+ * }
81
+ * });
82
+ * ```
83
+ *
84
+ * @throws {Error} When provider registry setup fails
85
+ * @throws {Error} When conversation memory initialization fails (if enabled)
86
+ * @throws {Error} When external server manager initialization fails
87
+ */
60
88
  constructor(config?: {
61
89
  conversationMemory?: Partial<ConversationMemoryConfig>;
62
90
  });
@@ -84,6 +112,54 @@ export declare class NeuroLink {
84
112
  * @param config Optional configuration to override default summarization settings.
85
113
  */
86
114
  enableContextSummarization(config?: Partial<ContextManagerConfig>): void;
115
+ /**
116
+ * Generate AI content using the best available provider with MCP tool integration.
117
+ * This is the primary method for text generation with full feature support.
118
+ *
119
+ * @param optionsOrPrompt - Either a string prompt or a comprehensive GenerateOptions object
120
+ * @param optionsOrPrompt.input - Input configuration object
121
+ * @param optionsOrPrompt.input.text - The text prompt to send to the AI (required)
122
+ * @param optionsOrPrompt.provider - AI provider to use ('auto', 'openai', 'anthropic', etc.)
123
+ * @param optionsOrPrompt.model - Specific model to use (e.g., 'gpt-4', 'claude-3-opus')
124
+ * @param optionsOrPrompt.temperature - Randomness in response (0.0 = deterministic, 2.0 = very random)
125
+ * @param optionsOrPrompt.maxTokens - Maximum tokens in response
126
+ * @param optionsOrPrompt.systemPrompt - System message to set AI behavior
127
+ * @param optionsOrPrompt.disableTools - Whether to disable MCP tool usage
128
+ * @param optionsOrPrompt.enableAnalytics - Whether to include usage analytics
129
+ * @param optionsOrPrompt.enableEvaluation - Whether to include response quality evaluation
130
+ * @param optionsOrPrompt.context - Additional context for the request
131
+ * @param optionsOrPrompt.evaluationDomain - Domain for specialized evaluation
132
+ * @param optionsOrPrompt.toolUsageContext - Context for tool usage decisions
133
+ *
134
+ * @returns Promise resolving to GenerateResult with content, usage data, and optional analytics
135
+ *
136
+ * @example
137
+ * ```typescript
138
+ * // Simple usage with string prompt
139
+ * const result = await neurolink.generate("What is artificial intelligence?");
140
+ * console.log(result.content);
141
+ *
142
+ * // Advanced usage with options
143
+ * const result = await neurolink.generate({
144
+ * input: { text: "Explain quantum computing" },
145
+ * provider: "openai",
146
+ * model: "gpt-4",
147
+ * temperature: 0.7,
148
+ * maxTokens: 500,
149
+ * enableAnalytics: true,
150
+ * enableEvaluation: true,
151
+ * context: { domain: "science", level: "intermediate" }
152
+ * });
153
+ *
154
+ * // Access analytics and evaluation data
155
+ * console.log(result.analytics?.usage);
156
+ * console.log(result.evaluation?.relevance);
157
+ * ```
158
+ *
159
+ * @throws {Error} When input text is missing or invalid
160
+ * @throws {Error} When all providers fail to generate content
161
+ * @throws {Error} When conversation memory operations fail (if enabled)
162
+ */
87
163
  generate(optionsOrPrompt: GenerateOptions | string): Promise<GenerateResult>;
88
164
  /**
89
165
  * BACKWARD COMPATIBILITY: Legacy generateText method
@@ -128,13 +204,230 @@ export declare class NeuroLink {
128
204
  */
129
205
  streamText(prompt: string, options?: Partial<StreamOptions>): Promise<AsyncIterable<string>>;
130
206
  /**
131
- * PRIMARY METHOD: Stream content using AI (recommended for new code)
132
- * Future-ready for multi-modal capabilities with current text focus
207
+ * Stream AI-generated content in real-time using the best available provider.
208
+ * This method provides real-time streaming of AI responses with full MCP tool integration.
209
+ *
210
+ * @param options - Stream configuration options
211
+ * @param options.input - Input configuration object
212
+ * @param options.input.text - The text prompt to send to the AI (required)
213
+ * @param options.provider - AI provider to use ('auto', 'openai', 'anthropic', etc.)
214
+ * @param options.model - Specific model to use (e.g., 'gpt-4', 'claude-3-opus')
215
+ * @param options.temperature - Randomness in response (0.0 = deterministic, 2.0 = very random)
216
+ * @param options.maxTokens - Maximum tokens in response
217
+ * @param options.systemPrompt - System message to set AI behavior
218
+ * @param options.disableTools - Whether to disable MCP tool usage
219
+ * @param options.enableAnalytics - Whether to include usage analytics
220
+ * @param options.enableEvaluation - Whether to include response quality evaluation
221
+ * @param options.context - Additional context for the request
222
+ * @param options.evaluationDomain - Domain for specialized evaluation
223
+ *
224
+ * @returns Promise resolving to StreamResult with an async iterable stream
225
+ *
226
+ * @example
227
+ * ```typescript
228
+ * // Basic streaming usage
229
+ * const result = await neurolink.stream({
230
+ * input: { text: "Tell me a story about space exploration" }
231
+ * });
232
+ *
233
+ * // Consume the stream
234
+ * for await (const chunk of result.stream) {
235
+ * process.stdout.write(chunk.content);
236
+ * }
237
+ *
238
+ * // Advanced streaming with options
239
+ * const result = await neurolink.stream({
240
+ * input: { text: "Explain machine learning" },
241
+ * provider: "openai",
242
+ * model: "gpt-4",
243
+ * temperature: 0.7,
244
+ * enableAnalytics: true,
245
+ * context: { domain: "education", audience: "beginners" }
246
+ * });
247
+ *
248
+ * // Access metadata and analytics
249
+ * console.log(result.provider);
250
+ * console.log(result.analytics?.usage);
251
+ * ```
252
+ *
253
+ * @throws {Error} When input text is missing or invalid
254
+ * @throws {Error} When all providers fail to generate content
255
+ * @throws {Error} When conversation memory operations fail (if enabled)
133
256
  */
134
257
  stream(options: StreamOptions): Promise<StreamResult>;
135
258
  /**
136
- * Get the EventEmitter to listen to NeuroLink events
137
- * @returns EventEmitter instance
259
+ * Get the EventEmitter instance to listen to NeuroLink events for real-time monitoring and debugging.
260
+ * This method provides access to the internal event system that emits events during AI generation,
261
+ * tool execution, streaming, and other operations for comprehensive observability.
262
+ *
263
+ * @returns EventEmitter instance that emits various NeuroLink operation events
264
+ *
265
+ * @example
266
+ * ```typescript
267
+ * // Basic event listening setup
268
+ * const neurolink = new NeuroLink();
269
+ * const emitter = neurolink.getEventEmitter();
270
+ *
271
+ * // Listen to generation events
272
+ * emitter.on('generation:start', (event) => {
273
+ * console.log(`Generation started with provider: ${event.provider}`);
274
+ * console.log(`Started at: ${new Date(event.timestamp)}`);
275
+ * });
276
+ *
277
+ * emitter.on('generation:end', (event) => {
278
+ * console.log(`Generation completed in ${event.responseTime}ms`);
279
+ * console.log(`Tools used: ${event.toolsUsed?.length || 0}`);
280
+ * });
281
+ *
282
+ * // Listen to streaming events
283
+ * emitter.on('stream:start', (event) => {
284
+ * console.log(`Streaming started with provider: ${event.provider}`);
285
+ * });
286
+ *
287
+ * emitter.on('stream:end', (event) => {
288
+ * console.log(`Streaming completed in ${event.responseTime}ms`);
289
+ * if (event.fallback) console.log('Used fallback streaming');
290
+ * });
291
+ *
292
+ * // Listen to tool execution events
293
+ * emitter.on('tool:start', (event) => {
294
+ * console.log(`Tool execution started: ${event.toolName}`);
295
+ * });
296
+ *
297
+ * emitter.on('tool:end', (event) => {
298
+ * console.log(`Tool ${event.toolName} ${event.success ? 'succeeded' : 'failed'}`);
299
+ * console.log(`Execution time: ${event.responseTime}ms`);
300
+ * });
301
+ *
302
+ * // Listen to tool registration events
303
+ * emitter.on('tools-register:start', (event) => {
304
+ * console.log(`Registering tool: ${event.toolName}`);
305
+ * });
306
+ *
307
+ * emitter.on('tools-register:end', (event) => {
308
+ * console.log(`Tool registration ${event.success ? 'succeeded' : 'failed'}: ${event.toolName}`);
309
+ * });
310
+ *
311
+ * // Listen to external MCP server events
312
+ * emitter.on('externalMCP:serverConnected', (event) => {
313
+ * console.log(`External MCP server connected: ${event.serverId}`);
314
+ * console.log(`Tools available: ${event.toolCount || 0}`);
315
+ * });
316
+ *
317
+ * emitter.on('externalMCP:serverDisconnected', (event) => {
318
+ * console.log(`External MCP server disconnected: ${event.serverId}`);
319
+ * console.log(`Reason: ${event.reason || 'Unknown'}`);
320
+ * });
321
+ *
322
+ * emitter.on('externalMCP:toolDiscovered', (event) => {
323
+ * console.log(`New tool discovered: ${event.toolName} from ${event.serverId}`);
324
+ * });
325
+ *
326
+ * // Advanced usage with error handling
327
+ * emitter.on('error', (error) => {
328
+ * console.error('NeuroLink error:', error);
329
+ * });
330
+ *
331
+ * // Clean up event listeners when done
332
+ * function cleanup() {
333
+ * emitter.removeAllListeners();
334
+ * }
335
+ *
336
+ * process.on('SIGINT', cleanup);
337
+ * process.on('SIGTERM', cleanup);
338
+ * ```
339
+ *
340
+ * @example
341
+ * ```typescript
342
+ * // Advanced monitoring with metrics collection
343
+ * const neurolink = new NeuroLink();
344
+ * const emitter = neurolink.getEventEmitter();
345
+ * const metrics = {
346
+ * generations: 0,
347
+ * totalResponseTime: 0,
348
+ * toolExecutions: 0,
349
+ * failures: 0
350
+ * };
351
+ *
352
+ * // Collect performance metrics
353
+ * emitter.on('generation:end', (event) => {
354
+ * metrics.generations++;
355
+ * metrics.totalResponseTime += event.responseTime;
356
+ * metrics.toolExecutions += event.toolsUsed?.length || 0;
357
+ * });
358
+ *
359
+ * emitter.on('tool:end', (event) => {
360
+ * if (!event.success) {
361
+ * metrics.failures++;
362
+ * }
363
+ * });
364
+ *
365
+ * // Log metrics every 10 seconds
366
+ * setInterval(() => {
367
+ * const avgResponseTime = metrics.generations > 0
368
+ * ? metrics.totalResponseTime / metrics.generations
369
+ * : 0;
370
+ *
371
+ * console.log('NeuroLink Metrics:', {
372
+ * totalGenerations: metrics.generations,
373
+ * averageResponseTime: `${avgResponseTime.toFixed(2)}ms`,
374
+ * totalToolExecutions: metrics.toolExecutions,
375
+ * failureRate: `${((metrics.failures / (metrics.toolExecutions || 1)) * 100).toFixed(2)}%`
376
+ * });
377
+ * }, 10000);
378
+ * ```
379
+ *
380
+ * **Available Events:**
381
+ *
382
+ * **Generation Events:**
383
+ * - `generation:start` - Fired when text generation begins
384
+ * - `{ provider: string, timestamp: number }`
385
+ * - `generation:end` - Fired when text generation completes
386
+ * - `{ provider: string, responseTime: number, toolsUsed?: string[], timestamp: number }`
387
+ *
388
+ * **Streaming Events:**
389
+ * - `stream:start` - Fired when streaming begins
390
+ * - `{ provider: string, timestamp: number }`
391
+ * - `stream:end` - Fired when streaming completes
392
+ * - `{ provider: string, responseTime: number, fallback?: boolean }`
393
+ *
394
+ * **Tool Events:**
395
+ * - `tool:start` - Fired when tool execution begins
396
+ * - `{ toolName: string, timestamp: number }`
397
+ * - `tool:end` - Fired when tool execution completes
398
+ * - `{ toolName: string, responseTime: number, success: boolean, timestamp: number }`
399
+ * - `tools-register:start` - Fired when tool registration begins
400
+ * - `{ toolName: string, timestamp: number }`
401
+ * - `tools-register:end` - Fired when tool registration completes
402
+ * - `{ toolName: string, success: boolean, timestamp: number }`
403
+ *
404
+ * **External MCP Events:**
405
+ * - `externalMCP:serverConnected` - Fired when external MCP server connects
406
+ * - `{ serverId: string, toolCount?: number, timestamp: number }`
407
+ * - `externalMCP:serverDisconnected` - Fired when external MCP server disconnects
408
+ * - `{ serverId: string, reason?: string, timestamp: number }`
409
+ * - `externalMCP:serverFailed` - Fired when external MCP server fails
410
+ * - `{ serverId: string, error: string, timestamp: number }`
411
+ * - `externalMCP:toolDiscovered` - Fired when external MCP tool is discovered
412
+ * - `{ toolName: string, serverId: string, timestamp: number }`
413
+ * - `externalMCP:toolRemoved` - Fired when external MCP tool is removed
414
+ * - `{ toolName: string, serverId: string, timestamp: number }`
415
+ * - `externalMCP:serverAdded` - Fired when external MCP server is added
416
+ * - `{ serverId: string, config: MCPServerInfo, toolCount: number, timestamp: number }`
417
+ * - `externalMCP:serverRemoved` - Fired when external MCP server is removed
418
+ * - `{ serverId: string, timestamp: number }`
419
+ *
420
+ * **Error Events:**
421
+ * - `error` - Fired when an error occurs
422
+ * - `{ error: Error, context?: object }`
423
+ *
424
+ * @throws {Error} This method does not throw errors as it returns the internal EventEmitter
425
+ *
426
+ * @since 1.0.0
427
+ * @see {@link https://nodejs.org/api/events.html} Node.js EventEmitter documentation
428
+ * @see {@link NeuroLink.generate} for events related to text generation
429
+ * @see {@link NeuroLink.stream} for events related to streaming
430
+ * @see {@link NeuroLink.executeTool} for events related to tool execution
138
431
  */
139
432
  getEventEmitter(): EventEmitter<[never]>;
140
433
  /**
@@ -65,6 +65,34 @@ export class NeuroLink {
65
65
  }
66
66
  // Conversation memory support
67
67
  conversationMemory;
68
+ /**
69
+ * Creates a new NeuroLink instance for AI text generation with MCP tool integration.
70
+ *
71
+ * @param config - Optional configuration object
72
+ * @param config.conversationMemory - Configuration for conversation memory features
73
+ * @param config.conversationMemory.enabled - Whether to enable conversation memory (default: false)
74
+ * @param config.conversationMemory.maxSessions - Maximum number of concurrent sessions (default: 100)
75
+ * @param config.conversationMemory.maxTurnsPerSession - Maximum conversation turns per session (default: 50)
76
+ *
77
+ * @example
78
+ * ```typescript
79
+ * // Basic usage
80
+ * const neurolink = new NeuroLink();
81
+ *
82
+ * // With conversation memory
83
+ * const neurolink = new NeuroLink({
84
+ * conversationMemory: {
85
+ * enabled: true,
86
+ * maxSessions: 50,
87
+ * maxTurnsPerSession: 20
88
+ * }
89
+ * });
90
+ * ```
91
+ *
92
+ * @throws {Error} When provider registry setup fails
93
+ * @throws {Error} When conversation memory initialization fails (if enabled)
94
+ * @throws {Error} When external server manager initialization fails
95
+ */
68
96
  constructor(config) {
69
97
  // 🚀 EXHAUSTIVE LOGGING POINT C001: CONSTRUCTOR ENTRY
70
98
  const constructorStartTime = Date.now();
@@ -724,6 +752,54 @@ export class NeuroLink {
724
752
  this.contextManager = new ContextManager(this.generateTextInternal.bind(this), contextConfig);
725
753
  logger.info("[NeuroLink] Automatic context summarization enabled.");
726
754
  }
755
+ /**
756
+ * Generate AI content using the best available provider with MCP tool integration.
757
+ * This is the primary method for text generation with full feature support.
758
+ *
759
+ * @param optionsOrPrompt - Either a string prompt or a comprehensive GenerateOptions object
760
+ * @param optionsOrPrompt.input - Input configuration object
761
+ * @param optionsOrPrompt.input.text - The text prompt to send to the AI (required)
762
+ * @param optionsOrPrompt.provider - AI provider to use ('auto', 'openai', 'anthropic', etc.)
763
+ * @param optionsOrPrompt.model - Specific model to use (e.g., 'gpt-4', 'claude-3-opus')
764
+ * @param optionsOrPrompt.temperature - Randomness in response (0.0 = deterministic, 2.0 = very random)
765
+ * @param optionsOrPrompt.maxTokens - Maximum tokens in response
766
+ * @param optionsOrPrompt.systemPrompt - System message to set AI behavior
767
+ * @param optionsOrPrompt.disableTools - Whether to disable MCP tool usage
768
+ * @param optionsOrPrompt.enableAnalytics - Whether to include usage analytics
769
+ * @param optionsOrPrompt.enableEvaluation - Whether to include response quality evaluation
770
+ * @param optionsOrPrompt.context - Additional context for the request
771
+ * @param optionsOrPrompt.evaluationDomain - Domain for specialized evaluation
772
+ * @param optionsOrPrompt.toolUsageContext - Context for tool usage decisions
773
+ *
774
+ * @returns Promise resolving to GenerateResult with content, usage data, and optional analytics
775
+ *
776
+ * @example
777
+ * ```typescript
778
+ * // Simple usage with string prompt
779
+ * const result = await neurolink.generate("What is artificial intelligence?");
780
+ * console.log(result.content);
781
+ *
782
+ * // Advanced usage with options
783
+ * const result = await neurolink.generate({
784
+ * input: { text: "Explain quantum computing" },
785
+ * provider: "openai",
786
+ * model: "gpt-4",
787
+ * temperature: 0.7,
788
+ * maxTokens: 500,
789
+ * enableAnalytics: true,
790
+ * enableEvaluation: true,
791
+ * context: { domain: "science", level: "intermediate" }
792
+ * });
793
+ *
794
+ * // Access analytics and evaluation data
795
+ * console.log(result.analytics?.usage);
796
+ * console.log(result.evaluation?.relevance);
797
+ * ```
798
+ *
799
+ * @throws {Error} When input text is missing or invalid
800
+ * @throws {Error} When all providers fail to generate content
801
+ * @throws {Error} When conversation memory operations fail (if enabled)
802
+ */
727
803
  async generate(optionsOrPrompt) {
728
804
  const originalPrompt = this._extractOriginalPrompt(optionsOrPrompt);
729
805
  // Convert string prompt to full options
@@ -1439,8 +1515,55 @@ export class NeuroLink {
1439
1515
  return stringStream();
1440
1516
  }
1441
1517
  /**
1442
- * PRIMARY METHOD: Stream content using AI (recommended for new code)
1443
- * Future-ready for multi-modal capabilities with current text focus
1518
+ * Stream AI-generated content in real-time using the best available provider.
1519
+ * This method provides real-time streaming of AI responses with full MCP tool integration.
1520
+ *
1521
+ * @param options - Stream configuration options
1522
+ * @param options.input - Input configuration object
1523
+ * @param options.input.text - The text prompt to send to the AI (required)
1524
+ * @param options.provider - AI provider to use ('auto', 'openai', 'anthropic', etc.)
1525
+ * @param options.model - Specific model to use (e.g., 'gpt-4', 'claude-3-opus')
1526
+ * @param options.temperature - Randomness in response (0.0 = deterministic, 2.0 = very random)
1527
+ * @param options.maxTokens - Maximum tokens in response
1528
+ * @param options.systemPrompt - System message to set AI behavior
1529
+ * @param options.disableTools - Whether to disable MCP tool usage
1530
+ * @param options.enableAnalytics - Whether to include usage analytics
1531
+ * @param options.enableEvaluation - Whether to include response quality evaluation
1532
+ * @param options.context - Additional context for the request
1533
+ * @param options.evaluationDomain - Domain for specialized evaluation
1534
+ *
1535
+ * @returns Promise resolving to StreamResult with an async iterable stream
1536
+ *
1537
+ * @example
1538
+ * ```typescript
1539
+ * // Basic streaming usage
1540
+ * const result = await neurolink.stream({
1541
+ * input: { text: "Tell me a story about space exploration" }
1542
+ * });
1543
+ *
1544
+ * // Consume the stream
1545
+ * for await (const chunk of result.stream) {
1546
+ * process.stdout.write(chunk.content);
1547
+ * }
1548
+ *
1549
+ * // Advanced streaming with options
1550
+ * const result = await neurolink.stream({
1551
+ * input: { text: "Explain machine learning" },
1552
+ * provider: "openai",
1553
+ * model: "gpt-4",
1554
+ * temperature: 0.7,
1555
+ * enableAnalytics: true,
1556
+ * context: { domain: "education", audience: "beginners" }
1557
+ * });
1558
+ *
1559
+ * // Access metadata and analytics
1560
+ * console.log(result.provider);
1561
+ * console.log(result.analytics?.usage);
1562
+ * ```
1563
+ *
1564
+ * @throws {Error} When input text is missing or invalid
1565
+ * @throws {Error} When all providers fail to generate content
1566
+ * @throws {Error} When conversation memory operations fail (if enabled)
1444
1567
  */
1445
1568
  async stream(options) {
1446
1569
  const startTime = Date.now();
@@ -1957,8 +2080,178 @@ export class NeuroLink {
1957
2080
  }
1958
2081
  }
1959
2082
  /**
1960
- * Get the EventEmitter to listen to NeuroLink events
1961
- * @returns EventEmitter instance
2083
+ * Get the EventEmitter instance to listen to NeuroLink events for real-time monitoring and debugging.
2084
+ * This method provides access to the internal event system that emits events during AI generation,
2085
+ * tool execution, streaming, and other operations for comprehensive observability.
2086
+ *
2087
+ * @returns EventEmitter instance that emits various NeuroLink operation events
2088
+ *
2089
+ * @example
2090
+ * ```typescript
2091
+ * // Basic event listening setup
2092
+ * const neurolink = new NeuroLink();
2093
+ * const emitter = neurolink.getEventEmitter();
2094
+ *
2095
+ * // Listen to generation events
2096
+ * emitter.on('generation:start', (event) => {
2097
+ * console.log(`Generation started with provider: ${event.provider}`);
2098
+ * console.log(`Started at: ${new Date(event.timestamp)}`);
2099
+ * });
2100
+ *
2101
+ * emitter.on('generation:end', (event) => {
2102
+ * console.log(`Generation completed in ${event.responseTime}ms`);
2103
+ * console.log(`Tools used: ${event.toolsUsed?.length || 0}`);
2104
+ * });
2105
+ *
2106
+ * // Listen to streaming events
2107
+ * emitter.on('stream:start', (event) => {
2108
+ * console.log(`Streaming started with provider: ${event.provider}`);
2109
+ * });
2110
+ *
2111
+ * emitter.on('stream:end', (event) => {
2112
+ * console.log(`Streaming completed in ${event.responseTime}ms`);
2113
+ * if (event.fallback) console.log('Used fallback streaming');
2114
+ * });
2115
+ *
2116
+ * // Listen to tool execution events
2117
+ * emitter.on('tool:start', (event) => {
2118
+ * console.log(`Tool execution started: ${event.toolName}`);
2119
+ * });
2120
+ *
2121
+ * emitter.on('tool:end', (event) => {
2122
+ * console.log(`Tool ${event.toolName} ${event.success ? 'succeeded' : 'failed'}`);
2123
+ * console.log(`Execution time: ${event.responseTime}ms`);
2124
+ * });
2125
+ *
2126
+ * // Listen to tool registration events
2127
+ * emitter.on('tools-register:start', (event) => {
2128
+ * console.log(`Registering tool: ${event.toolName}`);
2129
+ * });
2130
+ *
2131
+ * emitter.on('tools-register:end', (event) => {
2132
+ * console.log(`Tool registration ${event.success ? 'succeeded' : 'failed'}: ${event.toolName}`);
2133
+ * });
2134
+ *
2135
+ * // Listen to external MCP server events
2136
+ * emitter.on('externalMCP:serverConnected', (event) => {
2137
+ * console.log(`External MCP server connected: ${event.serverId}`);
2138
+ * console.log(`Tools available: ${event.toolCount || 0}`);
2139
+ * });
2140
+ *
2141
+ * emitter.on('externalMCP:serverDisconnected', (event) => {
2142
+ * console.log(`External MCP server disconnected: ${event.serverId}`);
2143
+ * console.log(`Reason: ${event.reason || 'Unknown'}`);
2144
+ * });
2145
+ *
2146
+ * emitter.on('externalMCP:toolDiscovered', (event) => {
2147
+ * console.log(`New tool discovered: ${event.toolName} from ${event.serverId}`);
2148
+ * });
2149
+ *
2150
+ * // Advanced usage with error handling
2151
+ * emitter.on('error', (error) => {
2152
+ * console.error('NeuroLink error:', error);
2153
+ * });
2154
+ *
2155
+ * // Clean up event listeners when done
2156
+ * function cleanup() {
2157
+ * emitter.removeAllListeners();
2158
+ * }
2159
+ *
2160
+ * process.on('SIGINT', cleanup);
2161
+ * process.on('SIGTERM', cleanup);
2162
+ * ```
2163
+ *
2164
+ * @example
2165
+ * ```typescript
2166
+ * // Advanced monitoring with metrics collection
2167
+ * const neurolink = new NeuroLink();
2168
+ * const emitter = neurolink.getEventEmitter();
2169
+ * const metrics = {
2170
+ * generations: 0,
2171
+ * totalResponseTime: 0,
2172
+ * toolExecutions: 0,
2173
+ * failures: 0
2174
+ * };
2175
+ *
2176
+ * // Collect performance metrics
2177
+ * emitter.on('generation:end', (event) => {
2178
+ * metrics.generations++;
2179
+ * metrics.totalResponseTime += event.responseTime;
2180
+ * metrics.toolExecutions += event.toolsUsed?.length || 0;
2181
+ * });
2182
+ *
2183
+ * emitter.on('tool:end', (event) => {
2184
+ * if (!event.success) {
2185
+ * metrics.failures++;
2186
+ * }
2187
+ * });
2188
+ *
2189
+ * // Log metrics every 10 seconds
2190
+ * setInterval(() => {
2191
+ * const avgResponseTime = metrics.generations > 0
2192
+ * ? metrics.totalResponseTime / metrics.generations
2193
+ * : 0;
2194
+ *
2195
+ * console.log('NeuroLink Metrics:', {
2196
+ * totalGenerations: metrics.generations,
2197
+ * averageResponseTime: `${avgResponseTime.toFixed(2)}ms`,
2198
+ * totalToolExecutions: metrics.toolExecutions,
2199
+ * failureRate: `${((metrics.failures / (metrics.toolExecutions || 1)) * 100).toFixed(2)}%`
2200
+ * });
2201
+ * }, 10000);
2202
+ * ```
2203
+ *
2204
+ * **Available Events:**
2205
+ *
2206
+ * **Generation Events:**
2207
+ * - `generation:start` - Fired when text generation begins
2208
+ * - `{ provider: string, timestamp: number }`
2209
+ * - `generation:end` - Fired when text generation completes
2210
+ * - `{ provider: string, responseTime: number, toolsUsed?: string[], timestamp: number }`
2211
+ *
2212
+ * **Streaming Events:**
2213
+ * - `stream:start` - Fired when streaming begins
2214
+ * - `{ provider: string, timestamp: number }`
2215
+ * - `stream:end` - Fired when streaming completes
2216
+ * - `{ provider: string, responseTime: number, fallback?: boolean }`
2217
+ *
2218
+ * **Tool Events:**
2219
+ * - `tool:start` - Fired when tool execution begins
2220
+ * - `{ toolName: string, timestamp: number }`
2221
+ * - `tool:end` - Fired when tool execution completes
2222
+ * - `{ toolName: string, responseTime: number, success: boolean, timestamp: number }`
2223
+ * - `tools-register:start` - Fired when tool registration begins
2224
+ * - `{ toolName: string, timestamp: number }`
2225
+ * - `tools-register:end` - Fired when tool registration completes
2226
+ * - `{ toolName: string, success: boolean, timestamp: number }`
2227
+ *
2228
+ * **External MCP Events:**
2229
+ * - `externalMCP:serverConnected` - Fired when external MCP server connects
2230
+ * - `{ serverId: string, toolCount?: number, timestamp: number }`
2231
+ * - `externalMCP:serverDisconnected` - Fired when external MCP server disconnects
2232
+ * - `{ serverId: string, reason?: string, timestamp: number }`
2233
+ * - `externalMCP:serverFailed` - Fired when external MCP server fails
2234
+ * - `{ serverId: string, error: string, timestamp: number }`
2235
+ * - `externalMCP:toolDiscovered` - Fired when external MCP tool is discovered
2236
+ * - `{ toolName: string, serverId: string, timestamp: number }`
2237
+ * - `externalMCP:toolRemoved` - Fired when external MCP tool is removed
2238
+ * - `{ toolName: string, serverId: string, timestamp: number }`
2239
+ * - `externalMCP:serverAdded` - Fired when external MCP server is added
2240
+ * - `{ serverId: string, config: MCPServerInfo, toolCount: number, timestamp: number }`
2241
+ * - `externalMCP:serverRemoved` - Fired when external MCP server is removed
2242
+ * - `{ serverId: string, timestamp: number }`
2243
+ *
2244
+ * **Error Events:**
2245
+ * - `error` - Fired when an error occurs
2246
+ * - `{ error: Error, context?: object }`
2247
+ *
2248
+ * @throws {Error} This method does not throw errors as it returns the internal EventEmitter
2249
+ *
2250
+ * @since 1.0.0
2251
+ * @see {@link https://nodejs.org/api/events.html} Node.js EventEmitter documentation
2252
+ * @see {@link NeuroLink.generate} for events related to text generation
2253
+ * @see {@link NeuroLink.stream} for events related to streaming
2254
+ * @see {@link NeuroLink.executeTool} for events related to tool execution
1962
2255
  */
1963
2256
  getEventEmitter() {
1964
2257
  return this.emitter;