@juspay/neurolink 7.28.1 → 7.29.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/dist/core/baseProvider.js +4 -2
  3. package/dist/index.d.ts +2 -3
  4. package/dist/index.js +1 -2
  5. package/dist/lib/core/baseProvider.js +4 -2
  6. package/dist/lib/core/dynamicModels.d.ts +6 -6
  7. package/dist/lib/index.d.ts +2 -3
  8. package/dist/lib/index.js +1 -2
  9. package/dist/lib/middleware/builtin/analytics.js +13 -14
  10. package/dist/lib/middleware/builtin/guardrails.d.ts +20 -0
  11. package/dist/lib/middleware/builtin/guardrails.js +87 -0
  12. package/dist/lib/middleware/factory.d.ts +29 -14
  13. package/dist/lib/middleware/factory.js +136 -110
  14. package/dist/lib/middleware/index.d.ts +3 -49
  15. package/dist/lib/middleware/index.js +4 -58
  16. package/dist/lib/middleware/registry.d.ts +1 -3
  17. package/dist/lib/middleware/registry.js +4 -5
  18. package/dist/lib/middleware/types.d.ts +3 -1
  19. package/dist/lib/neurolink.d.ts +297 -4
  20. package/dist/lib/neurolink.js +297 -4
  21. package/dist/lib/providers/googleVertex.js +13 -4
  22. package/dist/middleware/builtin/analytics.js +13 -14
  23. package/dist/middleware/builtin/guardrails.d.ts +20 -0
  24. package/dist/middleware/builtin/guardrails.js +87 -0
  25. package/dist/middleware/factory.d.ts +29 -14
  26. package/dist/middleware/factory.js +136 -110
  27. package/dist/middleware/index.d.ts +3 -49
  28. package/dist/middleware/index.js +4 -58
  29. package/dist/middleware/registry.d.ts +1 -3
  30. package/dist/middleware/registry.js +4 -5
  31. package/dist/middleware/types.d.ts +3 -1
  32. package/dist/neurolink.d.ts +297 -4
  33. package/dist/neurolink.js +297 -4
  34. package/dist/providers/googleVertex.js +13 -4
  35. package/package.json +1 -1
@@ -5,63 +5,9 @@
5
5
  * with the AI SDK's wrapLanguageModel functionality. It allows for modular enhancement
6
6
  * of language models with features like analytics, guardrails, caching, and more.
7
7
  */
8
- import { middlewareRegistry } from "./registry.js";
8
+ // Import types and classes
9
9
  import { MiddlewareFactory } from "./factory.js";
10
- // Registry for managing middleware
11
- export { MiddlewareRegistry, middlewareRegistry } from "./registry.js";
12
10
  // Factory for creating and applying middleware chains
13
- export { MiddlewareFactory } from "./factory.js";
14
- // Re-export built-in middleware when they're implemented
15
- // export { analyticsMiddleware } from './built-in/analytics.js';
16
- // export { guardrailsMiddleware } from './built-in/guardrails.js';
17
- // export { loggingMiddleware } from './built-in/logging.js';
18
- // export { cachingMiddleware } from './built-in/caching.js';
19
- // export { rateLimitMiddleware } from './built-in/rateLimit.js';
20
- /**
21
- * Convenience function to register a middleware
22
- */
23
- export function registerMiddleware(middleware, options) {
24
- middlewareRegistry.register(middleware, options);
25
- }
26
- /**
27
- * Convenience function to unregister a middleware
28
- */
29
- export function unregisterMiddleware(middlewareId) {
30
- return middlewareRegistry.unregister(middlewareId);
31
- }
32
- /**
33
- * Convenience function to get all registered middleware
34
- */
35
- export function listMiddleware() {
36
- return middlewareRegistry.list();
37
- }
38
- /**
39
- * Convenience function to check if a middleware is registered
40
- */
41
- export function hasMiddleware(middlewareId) {
42
- return middlewareRegistry.has(middlewareId);
43
- }
44
- /**
45
- * Convenience function to get middleware execution statistics
46
- */
47
- export function getMiddlewareStats() {
48
- return middlewareRegistry.getAggregatedStats();
49
- }
50
- /**
51
- * Convenience function to clear middleware execution statistics
52
- */
53
- export function clearMiddlewareStats(middlewareId) {
54
- middlewareRegistry.clearStats(middlewareId);
55
- }
56
- /**
57
- * Convenience function to get available middleware presets
58
- */
59
- export function getAvailablePresets() {
60
- return MiddlewareFactory.getAvailablePresets();
61
- }
62
- /**
63
- * Convenience function to validate middleware configuration
64
- */
65
- export function validateMiddlewareConfig(config) {
66
- return MiddlewareFactory.validateConfig(config);
67
- }
11
+ export { MiddlewareFactory };
12
+ // Export the factory as the default export for clean, direct usage
13
+ export default MiddlewareFactory;
@@ -1,8 +1,7 @@
1
1
  import type { LanguageModelV1Middleware } from "ai";
2
2
  import type { NeuroLinkMiddleware, MiddlewareConfig, MiddlewareContext, MiddlewareRegistrationOptions, MiddlewareExecutionResult } from "./types.js";
3
3
  /**
4
- * Global middleware registry for NeuroLink
5
- * Manages registration, configuration, and execution of middleware
4
+ * Manages the registration, configuration, and execution of middleware for a single factory instance.
6
5
  */
7
6
  export declare class MiddlewareRegistry {
8
7
  private middleware;
@@ -75,4 +74,3 @@ export declare class MiddlewareRegistry {
75
74
  */
76
75
  clear(): void;
77
76
  }
78
- export declare const middlewareRegistry: MiddlewareRegistry;
@@ -1,7 +1,6 @@
1
1
  import { logger } from "../utils/logger.js";
2
2
  /**
3
- * Global middleware registry for NeuroLink
4
- * Manages registration, configuration, and execution of middleware
3
+ * Manages the registration, configuration, and execution of middleware for a single factory instance.
5
4
  */
6
5
  export class MiddlewareRegistry {
7
6
  middleware = new Map();
@@ -143,7 +142,9 @@ export class MiddlewareRegistry {
143
142
  ...config?.config,
144
143
  };
145
144
  // Create wrapper that tracks execution
146
- const wrappedMiddleware = {};
145
+ const wrappedMiddleware = {
146
+ metadata: middleware.metadata,
147
+ };
147
148
  if (middleware.transformParams) {
148
149
  wrappedMiddleware.transformParams = async (args) => {
149
150
  const startTime = Date.now();
@@ -279,5 +280,3 @@ export class MiddlewareRegistry {
279
280
  logger.debug("All middleware cleared from registry");
280
281
  }
281
282
  }
282
- // Global middleware registry instance
283
- export const middlewareRegistry = new MiddlewareRegistry();
@@ -118,12 +118,14 @@ export interface MiddlewarePreset {
118
118
  /** Description of the preset */
119
119
  description: string;
120
120
  /** Middleware configurations in the preset */
121
- middleware: Record<string, MiddlewareConfig>;
121
+ config: Record<string, MiddlewareConfig>;
122
122
  }
123
123
  /**
124
124
  * Factory options for middleware
125
125
  */
126
126
  export interface MiddlewareFactoryOptions {
127
+ /** Custom middleware to register on initialization */
128
+ middleware?: NeuroLinkMiddleware[];
127
129
  /** Enable specific middleware */
128
130
  enabledMiddleware?: string[];
129
131
  /** Disable specific middleware */
@@ -57,6 +57,34 @@ export declare class NeuroLink {
57
57
  */
58
58
  private emitToolEndEvent;
59
59
  private conversationMemory?;
60
+ /**
61
+ * Creates a new NeuroLink instance for AI text generation with MCP tool integration.
62
+ *
63
+ * @param config - Optional configuration object
64
+ * @param config.conversationMemory - Configuration for conversation memory features
65
+ * @param config.conversationMemory.enabled - Whether to enable conversation memory (default: false)
66
+ * @param config.conversationMemory.maxSessions - Maximum number of concurrent sessions (default: 100)
67
+ * @param config.conversationMemory.maxTurnsPerSession - Maximum conversation turns per session (default: 50)
68
+ *
69
+ * @example
70
+ * ```typescript
71
+ * // Basic usage
72
+ * const neurolink = new NeuroLink();
73
+ *
74
+ * // With conversation memory
75
+ * const neurolink = new NeuroLink({
76
+ * conversationMemory: {
77
+ * enabled: true,
78
+ * maxSessions: 50,
79
+ * maxTurnsPerSession: 20
80
+ * }
81
+ * });
82
+ * ```
83
+ *
84
+ * @throws {Error} When provider registry setup fails
85
+ * @throws {Error} When conversation memory initialization fails (if enabled)
86
+ * @throws {Error} When external server manager initialization fails
87
+ */
60
88
  constructor(config?: {
61
89
  conversationMemory?: Partial<ConversationMemoryConfig>;
62
90
  });
@@ -84,6 +112,54 @@ export declare class NeuroLink {
84
112
  * @param config Optional configuration to override default summarization settings.
85
113
  */
86
114
  enableContextSummarization(config?: Partial<ContextManagerConfig>): void;
115
+ /**
116
+ * Generate AI content using the best available provider with MCP tool integration.
117
+ * This is the primary method for text generation with full feature support.
118
+ *
119
+ * @param optionsOrPrompt - Either a string prompt or a comprehensive GenerateOptions object
120
+ * @param optionsOrPrompt.input - Input configuration object
121
+ * @param optionsOrPrompt.input.text - The text prompt to send to the AI (required)
122
+ * @param optionsOrPrompt.provider - AI provider to use ('auto', 'openai', 'anthropic', etc.)
123
+ * @param optionsOrPrompt.model - Specific model to use (e.g., 'gpt-4', 'claude-3-opus')
124
+ * @param optionsOrPrompt.temperature - Randomness in response (0.0 = deterministic, 2.0 = very random)
125
+ * @param optionsOrPrompt.maxTokens - Maximum tokens in response
126
+ * @param optionsOrPrompt.systemPrompt - System message to set AI behavior
127
+ * @param optionsOrPrompt.disableTools - Whether to disable MCP tool usage
128
+ * @param optionsOrPrompt.enableAnalytics - Whether to include usage analytics
129
+ * @param optionsOrPrompt.enableEvaluation - Whether to include response quality evaluation
130
+ * @param optionsOrPrompt.context - Additional context for the request
131
+ * @param optionsOrPrompt.evaluationDomain - Domain for specialized evaluation
132
+ * @param optionsOrPrompt.toolUsageContext - Context for tool usage decisions
133
+ *
134
+ * @returns Promise resolving to GenerateResult with content, usage data, and optional analytics
135
+ *
136
+ * @example
137
+ * ```typescript
138
+ * // Simple usage with string prompt
139
+ * const result = await neurolink.generate("What is artificial intelligence?");
140
+ * console.log(result.content);
141
+ *
142
+ * // Advanced usage with options
143
+ * const result = await neurolink.generate({
144
+ * input: { text: "Explain quantum computing" },
145
+ * provider: "openai",
146
+ * model: "gpt-4",
147
+ * temperature: 0.7,
148
+ * maxTokens: 500,
149
+ * enableAnalytics: true,
150
+ * enableEvaluation: true,
151
+ * context: { domain: "science", level: "intermediate" }
152
+ * });
153
+ *
154
+ * // Access analytics and evaluation data
155
+ * console.log(result.analytics?.usage);
156
+ * console.log(result.evaluation?.relevance);
157
+ * ```
158
+ *
159
+ * @throws {Error} When input text is missing or invalid
160
+ * @throws {Error} When all providers fail to generate content
161
+ * @throws {Error} When conversation memory operations fail (if enabled)
162
+ */
87
163
  generate(optionsOrPrompt: GenerateOptions | string): Promise<GenerateResult>;
88
164
  /**
89
165
  * BACKWARD COMPATIBILITY: Legacy generateText method
@@ -128,13 +204,230 @@ export declare class NeuroLink {
128
204
  */
129
205
  streamText(prompt: string, options?: Partial<StreamOptions>): Promise<AsyncIterable<string>>;
130
206
  /**
131
- * PRIMARY METHOD: Stream content using AI (recommended for new code)
132
- * Future-ready for multi-modal capabilities with current text focus
207
+ * Stream AI-generated content in real-time using the best available provider.
208
+ * This method provides real-time streaming of AI responses with full MCP tool integration.
209
+ *
210
+ * @param options - Stream configuration options
211
+ * @param options.input - Input configuration object
212
+ * @param options.input.text - The text prompt to send to the AI (required)
213
+ * @param options.provider - AI provider to use ('auto', 'openai', 'anthropic', etc.)
214
+ * @param options.model - Specific model to use (e.g., 'gpt-4', 'claude-3-opus')
215
+ * @param options.temperature - Randomness in response (0.0 = deterministic, 2.0 = very random)
216
+ * @param options.maxTokens - Maximum tokens in response
217
+ * @param options.systemPrompt - System message to set AI behavior
218
+ * @param options.disableTools - Whether to disable MCP tool usage
219
+ * @param options.enableAnalytics - Whether to include usage analytics
220
+ * @param options.enableEvaluation - Whether to include response quality evaluation
221
+ * @param options.context - Additional context for the request
222
+ * @param options.evaluationDomain - Domain for specialized evaluation
223
+ *
224
+ * @returns Promise resolving to StreamResult with an async iterable stream
225
+ *
226
+ * @example
227
+ * ```typescript
228
+ * // Basic streaming usage
229
+ * const result = await neurolink.stream({
230
+ * input: { text: "Tell me a story about space exploration" }
231
+ * });
232
+ *
233
+ * // Consume the stream
234
+ * for await (const chunk of result.stream) {
235
+ * process.stdout.write(chunk.content);
236
+ * }
237
+ *
238
+ * // Advanced streaming with options
239
+ * const result = await neurolink.stream({
240
+ * input: { text: "Explain machine learning" },
241
+ * provider: "openai",
242
+ * model: "gpt-4",
243
+ * temperature: 0.7,
244
+ * enableAnalytics: true,
245
+ * context: { domain: "education", audience: "beginners" }
246
+ * });
247
+ *
248
+ * // Access metadata and analytics
249
+ * console.log(result.provider);
250
+ * console.log(result.analytics?.usage);
251
+ * ```
252
+ *
253
+ * @throws {Error} When input text is missing or invalid
254
+ * @throws {Error} When all providers fail to generate content
255
+ * @throws {Error} When conversation memory operations fail (if enabled)
133
256
  */
134
257
  stream(options: StreamOptions): Promise<StreamResult>;
135
258
  /**
136
- * Get the EventEmitter to listen to NeuroLink events
137
- * @returns EventEmitter instance
259
+ * Get the EventEmitter instance to listen to NeuroLink events for real-time monitoring and debugging.
260
+ * This method provides access to the internal event system that emits events during AI generation,
261
+ * tool execution, streaming, and other operations for comprehensive observability.
262
+ *
263
+ * @returns EventEmitter instance that emits various NeuroLink operation events
264
+ *
265
+ * @example
266
+ * ```typescript
267
+ * // Basic event listening setup
268
+ * const neurolink = new NeuroLink();
269
+ * const emitter = neurolink.getEventEmitter();
270
+ *
271
+ * // Listen to generation events
272
+ * emitter.on('generation:start', (event) => {
273
+ * console.log(`Generation started with provider: ${event.provider}`);
274
+ * console.log(`Started at: ${new Date(event.timestamp)}`);
275
+ * });
276
+ *
277
+ * emitter.on('generation:end', (event) => {
278
+ * console.log(`Generation completed in ${event.responseTime}ms`);
279
+ * console.log(`Tools used: ${event.toolsUsed?.length || 0}`);
280
+ * });
281
+ *
282
+ * // Listen to streaming events
283
+ * emitter.on('stream:start', (event) => {
284
+ * console.log(`Streaming started with provider: ${event.provider}`);
285
+ * });
286
+ *
287
+ * emitter.on('stream:end', (event) => {
288
+ * console.log(`Streaming completed in ${event.responseTime}ms`);
289
+ * if (event.fallback) console.log('Used fallback streaming');
290
+ * });
291
+ *
292
+ * // Listen to tool execution events
293
+ * emitter.on('tool:start', (event) => {
294
+ * console.log(`Tool execution started: ${event.toolName}`);
295
+ * });
296
+ *
297
+ * emitter.on('tool:end', (event) => {
298
+ * console.log(`Tool ${event.toolName} ${event.success ? 'succeeded' : 'failed'}`);
299
+ * console.log(`Execution time: ${event.responseTime}ms`);
300
+ * });
301
+ *
302
+ * // Listen to tool registration events
303
+ * emitter.on('tools-register:start', (event) => {
304
+ * console.log(`Registering tool: ${event.toolName}`);
305
+ * });
306
+ *
307
+ * emitter.on('tools-register:end', (event) => {
308
+ * console.log(`Tool registration ${event.success ? 'succeeded' : 'failed'}: ${event.toolName}`);
309
+ * });
310
+ *
311
+ * // Listen to external MCP server events
312
+ * emitter.on('externalMCP:serverConnected', (event) => {
313
+ * console.log(`External MCP server connected: ${event.serverId}`);
314
+ * console.log(`Tools available: ${event.toolCount || 0}`);
315
+ * });
316
+ *
317
+ * emitter.on('externalMCP:serverDisconnected', (event) => {
318
+ * console.log(`External MCP server disconnected: ${event.serverId}`);
319
+ * console.log(`Reason: ${event.reason || 'Unknown'}`);
320
+ * });
321
+ *
322
+ * emitter.on('externalMCP:toolDiscovered', (event) => {
323
+ * console.log(`New tool discovered: ${event.toolName} from ${event.serverId}`);
324
+ * });
325
+ *
326
+ * // Advanced usage with error handling
327
+ * emitter.on('error', (error) => {
328
+ * console.error('NeuroLink error:', error);
329
+ * });
330
+ *
331
+ * // Clean up event listeners when done
332
+ * function cleanup() {
333
+ * emitter.removeAllListeners();
334
+ * }
335
+ *
336
+ * process.on('SIGINT', cleanup);
337
+ * process.on('SIGTERM', cleanup);
338
+ * ```
339
+ *
340
+ * @example
341
+ * ```typescript
342
+ * // Advanced monitoring with metrics collection
343
+ * const neurolink = new NeuroLink();
344
+ * const emitter = neurolink.getEventEmitter();
345
+ * const metrics = {
346
+ * generations: 0,
347
+ * totalResponseTime: 0,
348
+ * toolExecutions: 0,
349
+ * failures: 0
350
+ * };
351
+ *
352
+ * // Collect performance metrics
353
+ * emitter.on('generation:end', (event) => {
354
+ * metrics.generations++;
355
+ * metrics.totalResponseTime += event.responseTime;
356
+ * metrics.toolExecutions += event.toolsUsed?.length || 0;
357
+ * });
358
+ *
359
+ * emitter.on('tool:end', (event) => {
360
+ * if (!event.success) {
361
+ * metrics.failures++;
362
+ * }
363
+ * });
364
+ *
365
+ * // Log metrics every 10 seconds
366
+ * setInterval(() => {
367
+ * const avgResponseTime = metrics.generations > 0
368
+ * ? metrics.totalResponseTime / metrics.generations
369
+ * : 0;
370
+ *
371
+ * console.log('NeuroLink Metrics:', {
372
+ * totalGenerations: metrics.generations,
373
+ * averageResponseTime: `${avgResponseTime.toFixed(2)}ms`,
374
+ * totalToolExecutions: metrics.toolExecutions,
375
+ * failureRate: `${((metrics.failures / (metrics.toolExecutions || 1)) * 100).toFixed(2)}%`
376
+ * });
377
+ * }, 10000);
378
+ * ```
379
+ *
380
+ * **Available Events:**
381
+ *
382
+ * **Generation Events:**
383
+ * - `generation:start` - Fired when text generation begins
384
+ * - `{ provider: string, timestamp: number }`
385
+ * - `generation:end` - Fired when text generation completes
386
+ * - `{ provider: string, responseTime: number, toolsUsed?: string[], timestamp: number }`
387
+ *
388
+ * **Streaming Events:**
389
+ * - `stream:start` - Fired when streaming begins
390
+ * - `{ provider: string, timestamp: number }`
391
+ * - `stream:end` - Fired when streaming completes
392
+ * - `{ provider: string, responseTime: number, fallback?: boolean }`
393
+ *
394
+ * **Tool Events:**
395
+ * - `tool:start` - Fired when tool execution begins
396
+ * - `{ toolName: string, timestamp: number }`
397
+ * - `tool:end` - Fired when tool execution completes
398
+ * - `{ toolName: string, responseTime: number, success: boolean, timestamp: number }`
399
+ * - `tools-register:start` - Fired when tool registration begins
400
+ * - `{ toolName: string, timestamp: number }`
401
+ * - `tools-register:end` - Fired when tool registration completes
402
+ * - `{ toolName: string, success: boolean, timestamp: number }`
403
+ *
404
+ * **External MCP Events:**
405
+ * - `externalMCP:serverConnected` - Fired when external MCP server connects
406
+ * - `{ serverId: string, toolCount?: number, timestamp: number }`
407
+ * - `externalMCP:serverDisconnected` - Fired when external MCP server disconnects
408
+ * - `{ serverId: string, reason?: string, timestamp: number }`
409
+ * - `externalMCP:serverFailed` - Fired when external MCP server fails
410
+ * - `{ serverId: string, error: string, timestamp: number }`
411
+ * - `externalMCP:toolDiscovered` - Fired when external MCP tool is discovered
412
+ * - `{ toolName: string, serverId: string, timestamp: number }`
413
+ * - `externalMCP:toolRemoved` - Fired when external MCP tool is removed
414
+ * - `{ toolName: string, serverId: string, timestamp: number }`
415
+ * - `externalMCP:serverAdded` - Fired when external MCP server is added
416
+ * - `{ serverId: string, config: MCPServerInfo, toolCount: number, timestamp: number }`
417
+ * - `externalMCP:serverRemoved` - Fired when external MCP server is removed
418
+ * - `{ serverId: string, timestamp: number }`
419
+ *
420
+ * **Error Events:**
421
+ * - `error` - Fired when an error occurs
422
+ * - `{ error: Error, context?: object }`
423
+ *
424
+ * @throws {Error} This method does not throw errors as it returns the internal EventEmitter
425
+ *
426
+ * @since 1.0.0
427
+ * @see {@link https://nodejs.org/api/events.html} Node.js EventEmitter documentation
428
+ * @see {@link NeuroLink.generate} for events related to text generation
429
+ * @see {@link NeuroLink.stream} for events related to streaming
430
+ * @see {@link NeuroLink.executeTool} for events related to tool execution
138
431
  */
139
432
  getEventEmitter(): EventEmitter<[never]>;
140
433
  /**