@juspay/neurolink 8.34.0 → 8.35.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,3 +1,15 @@
1
+ ## [8.35.0](https://github.com/juspay/neurolink/compare/v8.34.1...v8.35.0) (2026-01-15)
2
+
3
+ ### Features
4
+
5
+ - **(history):** Added support for maintaning sequence in ai response in Chat History ([e29fcae](https://github.com/juspay/neurolink/commit/e29fcaec10d84e4319ebe75e94aa8f0a35773ff7))
6
+
7
+ ## [8.34.1](https://github.com/juspay/neurolink/compare/v8.34.0...v8.34.1) (2026-01-14)
8
+
9
+ ### Bug Fixes
10
+
11
+ - **(dependancy):** add back text-to-speech to dependencies and added types to barrel import ([c4bc86b](https://github.com/juspay/neurolink/commit/c4bc86bbd3ca7fe59595c5fa3bc8c5583a2031a3))
12
+
1
13
  ## [8.34.0](https://github.com/juspay/neurolink/compare/v8.33.0...v8.34.0) (2026-01-13)
2
14
 
3
15
  ### Features
@@ -56,6 +56,9 @@ export class ConversationMemoryManager {
56
56
  : this.config.tokenThreshold || 50000;
57
57
  const userMsg = await this.validateAndPrepareMessage(options.userMessage, "user", tokenThreshold);
58
58
  const assistantMsg = await this.validateAndPrepareMessage(options.aiResponse, "assistant", tokenThreshold);
59
+ if (options.events && options.events.length > 0) {
60
+ assistantMsg.events = options.events;
61
+ }
59
62
  session.messages.push(userMsg, assistantMsg);
60
63
  session.lastActivity = Date.now();
61
64
  const shouldSummarize = options.enableSummarization !== undefined
@@ -297,6 +297,7 @@ export class RedisConversationMemoryManager {
297
297
  timestamp: this.generateTimestamp(),
298
298
  role: "assistant",
299
299
  content: options.aiResponse,
300
+ events: options.events || undefined,
300
301
  };
301
302
  conversation.messages.push(assistantMsg);
302
303
  logger.info("[RedisConversationMemoryManager] Added new messages", {
package/dist/index.d.ts CHANGED
@@ -33,12 +33,8 @@
33
33
  */
34
34
  import { AIProviderFactory } from "./core/factory.js";
35
35
  export { AIProviderFactory };
36
- export type { AIProvider, AIModelProviderConfig, StreamingOptions, ProviderAttempt, SupportedModelName, } from "./types/index.js";
37
- export type { GenerateOptions, GenerateResult, EnhancedProvider, } from "./types/generateTypes.js";
38
- export type { ToolContext } from "./types/tools.js";
36
+ export * from "./types/index.js";
39
37
  export { validateTool } from "./sdk/toolRegistration.js";
40
- export type { ToolResult, ToolDefinition } from "./types/tools.js";
41
- export { DEFAULT_PROVIDER_CONFIGS } from "./types/index.js";
42
38
  export { AIProviderName, BedrockModels, OpenAIModels, VertexModels, } from "./constants/enums.js";
43
39
  export { getBestProvider, getAvailableProviders, isValidProvider, } from "./utils/providerUtils.js";
44
40
  export { dynamicModelProvider } from "./core/dynamicModels.js";
package/dist/index.js CHANGED
@@ -34,9 +34,10 @@
34
34
  // Core exports
35
35
  import { AIProviderFactory } from "./core/factory.js";
36
36
  export { AIProviderFactory };
37
+ // Export ALL types from the centralized type barrel
38
+ export * from "./types/index.js";
39
+ // Tool Registration utility
37
40
  export { validateTool } from "./sdk/toolRegistration.js";
38
- // Model enums
39
- export { DEFAULT_PROVIDER_CONFIGS } from "./types/index.js";
40
41
  export { AIProviderName, BedrockModels, OpenAIModels, VertexModels, } from "./constants/enums.js";
41
42
  // Utility exports
42
43
  export { getBestProvider, getAvailableProviders, isValidProvider, } from "./utils/providerUtils.js";
@@ -56,6 +56,9 @@ export class ConversationMemoryManager {
56
56
  : this.config.tokenThreshold || 50000;
57
57
  const userMsg = await this.validateAndPrepareMessage(options.userMessage, "user", tokenThreshold);
58
58
  const assistantMsg = await this.validateAndPrepareMessage(options.aiResponse, "assistant", tokenThreshold);
59
+ if (options.events && options.events.length > 0) {
60
+ assistantMsg.events = options.events;
61
+ }
59
62
  session.messages.push(userMsg, assistantMsg);
60
63
  session.lastActivity = Date.now();
61
64
  const shouldSummarize = options.enableSummarization !== undefined
@@ -297,6 +297,7 @@ export class RedisConversationMemoryManager {
297
297
  timestamp: this.generateTimestamp(),
298
298
  role: "assistant",
299
299
  content: options.aiResponse,
300
+ events: options.events || undefined,
300
301
  };
301
302
  conversation.messages.push(assistantMsg);
302
303
  logger.info("[RedisConversationMemoryManager] Added new messages", {
@@ -33,12 +33,8 @@
33
33
  */
34
34
  import { AIProviderFactory } from "./core/factory.js";
35
35
  export { AIProviderFactory };
36
- export type { AIProvider, AIModelProviderConfig, StreamingOptions, ProviderAttempt, SupportedModelName, } from "./types/index.js";
37
- export type { GenerateOptions, GenerateResult, EnhancedProvider, } from "./types/generateTypes.js";
38
- export type { ToolContext } from "./types/tools.js";
36
+ export * from "./types/index.js";
39
37
  export { validateTool } from "./sdk/toolRegistration.js";
40
- export type { ToolResult, ToolDefinition } from "./types/tools.js";
41
- export { DEFAULT_PROVIDER_CONFIGS } from "./types/index.js";
42
38
  export { AIProviderName, BedrockModels, OpenAIModels, VertexModels, } from "./constants/enums.js";
43
39
  export { getBestProvider, getAvailableProviders, isValidProvider, } from "./utils/providerUtils.js";
44
40
  export { dynamicModelProvider } from "./core/dynamicModels.js";
package/dist/lib/index.js CHANGED
@@ -34,9 +34,10 @@
34
34
  // Core exports
35
35
  import { AIProviderFactory } from "./core/factory.js";
36
36
  export { AIProviderFactory };
37
+ // Export ALL types from the centralized type barrel
38
+ export * from "./types/index.js";
39
+ // Tool Registration utility
37
40
  export { validateTool } from "./sdk/toolRegistration.js";
38
- // Model enums
39
- export { DEFAULT_PROVIDER_CONFIGS } from "./types/index.js";
40
41
  export { AIProviderName, BedrockModels, OpenAIModels, VertexModels, } from "./constants/enums.js";
41
42
  // Utility exports
42
43
  export { getBestProvider, getAvailableProviders, isValidProvider, } from "./utils/providerUtils.js";
@@ -2184,12 +2184,57 @@ Current user's request: ${currentInput}`;
2184
2184
  const { stream: mcpStream, provider: providerName } = await this.createMCPStream(enhancedOptions);
2185
2185
  let accumulatedContent = "";
2186
2186
  let chunkCount = 0;
2187
+ const eventSequence = [];
2188
+ let eventSeqCounter = 0;
2189
+ const captureEvent = (type, data) => {
2190
+ eventSequence.push({
2191
+ type,
2192
+ seq: eventSeqCounter++,
2193
+ timestamp: Date.now(),
2194
+ ...(data && typeof data === "object" ? data : { data }),
2195
+ });
2196
+ };
2197
+ const onResponseChunk = (...args) => {
2198
+ const chunk = args[0];
2199
+ captureEvent("response:chunk", { content: chunk });
2200
+ };
2201
+ const onToolStart = (...args) => {
2202
+ const data = args[0];
2203
+ captureEvent("tool:start", data);
2204
+ };
2205
+ const onToolEnd = (...args) => {
2206
+ const data = args[0];
2207
+ captureEvent("tool:end", data);
2208
+ if (data.result && data.result.uiComponent === true) {
2209
+ captureEvent("ui-component", {
2210
+ toolName: data.toolName,
2211
+ componentData: data.result,
2212
+ timestamp: Date.now(),
2213
+ });
2214
+ }
2215
+ };
2216
+ const onUIComponent = (...args) => {
2217
+ captureEvent("ui-component", args[0]);
2218
+ };
2219
+ const onHITLRequest = (...args) => {
2220
+ captureEvent("hitl:confirmation-request", args[0]);
2221
+ };
2222
+ const onHITLResponse = (...args) => {
2223
+ captureEvent("hitl:confirmation-response", args[0]);
2224
+ };
2225
+ this.emitter.on("response:chunk", onResponseChunk);
2226
+ this.emitter.on("tool:start", onToolStart);
2227
+ this.emitter.on("tool:end", onToolEnd);
2228
+ this.emitter.on("ui-component", onUIComponent);
2229
+ this.emitter.on("hitl:confirmation-request", onHITLRequest);
2230
+ this.emitter.on("hitl:confirmation-response", onHITLResponse);
2187
2231
  const metadata = {
2188
2232
  fallbackAttempted: false,
2189
2233
  guardrailsBlocked: false,
2190
2234
  error: undefined,
2191
2235
  };
2192
- const processedStream = (async function* (self) {
2236
+ const self = this;
2237
+ const processedStream = (async function* () {
2193
2238
  try {
2194
2239
  for await (const chunk of mcpStream) {
2195
2240
  chunkCount++;
@@ -2264,6 +2309,12 @@ Current user's request: ${currentInput}`;
2264
2309
  }
2265
2310
  }
2266
2311
  finally {
2312
+ self.emitter.off("response:chunk", onResponseChunk);
2313
+ self.emitter.off("tool:start", onToolStart);
2314
+ self.emitter.off("tool:end", onToolEnd);
2315
+ self.emitter.off("ui-component", onUIComponent);
2316
+ self.emitter.off("hitl:confirmation-request", onHITLRequest);
2317
+ self.emitter.off("hitl:confirmation-response", onHITLResponse);
2267
2318
  // Store memory after stream consumption is complete
2268
2319
  if (self.conversationMemory && enhancedOptions.context?.sessionId) {
2269
2320
  const sessionId = enhancedOptions.context?.sessionId;
@@ -2284,6 +2335,12 @@ Current user's request: ${currentInput}`;
2284
2335
  startTimeStamp: new Date(startTime),
2285
2336
  providerDetails,
2286
2337
  enableSummarization: enhancedOptions.enableSummarization,
2338
+ events: eventSequence.length > 0 ? eventSequence : undefined,
2339
+ });
2340
+ logger.debug("[NeuroLink.stream] Stored conversation turn with events", {
2341
+ sessionId,
2342
+ eventCount: eventSequence.length,
2343
+ eventTypes: [...new Set(eventSequence.map((e) => e.type))],
2287
2344
  });
2288
2345
  }
2289
2346
  catch (error) {
@@ -2312,7 +2369,7 @@ Current user's request: ${currentInput}`;
2312
2369
  });
2313
2370
  }
2314
2371
  }
2315
- })(this);
2372
+ })();
2316
2373
  const streamResult = await this.processStreamResult(processedStream, enhancedOptions, factoryResult);
2317
2374
  const responseTime = Date.now() - startTime;
2318
2375
  this.emitStreamEndEvents(streamResult);
@@ -2325,6 +2382,7 @@ Current user's request: ${currentInput}`;
2325
2382
  fallback: metadata.fallbackAttempted,
2326
2383
  guardrailsBlocked: metadata.guardrailsBlocked,
2327
2384
  error: metadata.error,
2385
+ events: eventSequence,
2328
2386
  });
2329
2387
  }
2330
2388
  catch (error) {
@@ -2439,6 +2497,7 @@ Current user's request: ${currentInput}`;
2439
2497
  toolResults: streamResult.toolResults,
2440
2498
  analytics: streamResult.analytics,
2441
2499
  evaluation: streamResult.evaluation,
2500
+ events: config.events && config.events.length > 0 ? config.events : undefined,
2442
2501
  metadata: {
2443
2502
  streamId: config.streamId,
2444
2503
  startTime: config.startTime,
@@ -116,6 +116,21 @@ export type ConversationMemoryStats = {
116
116
  /** Total number of conversation turns across all sessions */
117
117
  totalTurns: number;
118
118
  };
119
+ /**
120
+ * Stream event for event sequence tracking
121
+ * Used to reconstruct exact flow of streaming responses with proper ordering
122
+ * @since 8.21.0
123
+ */
124
+ export type StreamEventSequence = {
125
+ /** Event type (text-chunk, ui-component, tool:start, tool:end, hitl:confirmation-request, etc.) */
126
+ type: string;
127
+ /** Sequence number for ordering events */
128
+ seq: number;
129
+ /** Timestamp when event occurred */
130
+ timestamp: number;
131
+ /** Event-specific data */
132
+ [key: string]: unknown;
133
+ };
119
134
  /**
120
135
  * Chat message format for conversation history
121
136
  */
@@ -145,6 +160,13 @@ export type ChatMessage = {
145
160
  type?: string;
146
161
  error?: string;
147
162
  };
163
+ /**
164
+ * Event sequence for rich history reconstruction
165
+ * Stores ordered events (text-chunk, ui-component, tool calls, HITL, etc.)
166
+ * Enables proper ordering and complete context restoration
167
+ * @since 8.21.0
168
+ */
169
+ events?: StreamEventSequence[];
148
170
  /** Message metadata (NEW - for token-based memory) */
149
171
  metadata?: {
150
172
  /** Is this a summary message? */
@@ -254,6 +276,7 @@ export type StoreConversationTurnOptions = {
254
276
  startTimeStamp?: Date;
255
277
  providerDetails?: ProviderDetails;
256
278
  enableSummarization?: boolean;
279
+ events?: StreamEventSequence[];
257
280
  };
258
281
  /**
259
282
  * Lightweight session metadata for efficient session listing
@@ -22,4 +22,4 @@ export type { Unknown, UnknownRecord, UnknownArray, JsonValue, JsonObject, JsonA
22
22
  export type { EvaluationData, EvaluationContext, EnhancedEvaluationResult, EvaluationRequest, EvaluationCriteria, } from "./evaluation.js";
23
23
  export type { TaskType, TaskClassification, ClassificationScores, ClassificationStats, ClassificationValidation, } from "./taskClassificationTypes.js";
24
24
  export type { DomainType, DomainConfig, DomainTemplate, DomainConfigOptions, DomainEvaluationCriteria, DomainValidationRule, } from "./domainTypes.js";
25
- export type { ConversationMemoryConfig, SessionMemory, ChatMessage, MessageContent, MultimodalChatMessage, ConversationMemoryEvents, ConversationMemoryError, SessionIdentifier, SessionMetadata, RedisConversationObject, RedisStorageConfig, } from "./conversation.js";
25
+ export type { ConversationMemoryConfig, SessionMemory, ChatMessage, MessageContent, MultimodalChatMessage, ConversationMemoryEvents, ConversationMemoryError, SessionIdentifier, SessionMetadata, RedisConversationObject, RedisStorageConfig, StoreConversationTurnOptions, } from "./conversation.js";
@@ -395,6 +395,12 @@ export type StreamResult = {
395
395
  };
396
396
  analytics?: AnalyticsData | Promise<AnalyticsData>;
397
397
  evaluation?: EvaluationData | Promise<EvaluationData>;
398
+ events?: Array<{
399
+ type: string;
400
+ seq: number;
401
+ timestamp: number;
402
+ [key: string]: unknown;
403
+ }>;
398
404
  };
399
405
  /**
400
406
  * Enhanced provider type with stream method
@@ -44,8 +44,8 @@ import type { StreamOptions } from "../types/streamTypes.js";
44
44
  export declare function buildMultimodalOptions(options: StreamOptions, providerName: string, modelName: string): {
45
45
  input: {
46
46
  text: string;
47
- images: (string | Buffer<ArrayBufferLike> | import("../types/multimodal.js").ImageWithAltText)[] | undefined;
48
- content: import("../types/multimodal.js").Content[] | undefined;
47
+ images: (string | Buffer<ArrayBufferLike> | import("../index.js").ImageWithAltText)[] | undefined;
48
+ content: import("../index.js").Content[] | undefined;
49
49
  files: (string | Buffer<ArrayBufferLike>)[] | undefined;
50
50
  csvFiles: (string | Buffer<ArrayBufferLike>)[] | undefined;
51
51
  pdfFiles: (string | Buffer<ArrayBufferLike>)[] | undefined;
@@ -56,12 +56,12 @@ export declare function buildMultimodalOptions(options: StreamOptions, providerN
56
56
  includeHeaders?: boolean;
57
57
  } | undefined;
58
58
  systemPrompt: string | undefined;
59
- conversationHistory: import("../types/conversation.js").ChatMessage[] | undefined;
59
+ conversationHistory: import("../index.js").ChatMessage[] | undefined;
60
60
  provider: string;
61
61
  model: string;
62
62
  temperature: number | undefined;
63
63
  maxTokens: number | undefined;
64
64
  enableAnalytics: boolean | undefined;
65
65
  enableEvaluation: boolean | undefined;
66
- context: import("../types/common.js").UnknownRecord | undefined;
66
+ context: import("../index.js").UnknownRecord | undefined;
67
67
  };
@@ -5,4 +5,4 @@
5
5
  /**
6
6
  * Direct Tools Server - Agent direct tools for immediate use
7
7
  */
8
- export declare const directToolsServer: import("../../../types/mcpTypes.js").NeuroLinkMCPServer;
8
+ export declare const directToolsServer: import("../../../index.js").NeuroLinkMCPServer;
@@ -7,4 +7,4 @@
7
7
  * AI Core Server - Central hub for AI provider management
8
8
  * Provides provider selection and status checking functionality
9
9
  */
10
- export declare const aiCoreServer: import("../../../types/mcpTypes.js").NeuroLinkMCPServer;
10
+ export declare const aiCoreServer: import("../../../index.js").NeuroLinkMCPServer;
@@ -5,4 +5,4 @@
5
5
  /**
6
6
  * Utility Server - General utility tools
7
7
  */
8
- export declare const utilityServer: import("../../../types/mcpTypes.js").NeuroLinkMCPServer;
8
+ export declare const utilityServer: import("../../../index.js").NeuroLinkMCPServer;
package/dist/neurolink.js CHANGED
@@ -2184,12 +2184,57 @@ Current user's request: ${currentInput}`;
2184
2184
  const { stream: mcpStream, provider: providerName } = await this.createMCPStream(enhancedOptions);
2185
2185
  let accumulatedContent = "";
2186
2186
  let chunkCount = 0;
2187
+ const eventSequence = [];
2188
+ let eventSeqCounter = 0;
2189
+ const captureEvent = (type, data) => {
2190
+ eventSequence.push({
2191
+ type,
2192
+ seq: eventSeqCounter++,
2193
+ timestamp: Date.now(),
2194
+ ...(data && typeof data === "object" ? data : { data }),
2195
+ });
2196
+ };
2197
+ const onResponseChunk = (...args) => {
2198
+ const chunk = args[0];
2199
+ captureEvent("response:chunk", { content: chunk });
2200
+ };
2201
+ const onToolStart = (...args) => {
2202
+ const data = args[0];
2203
+ captureEvent("tool:start", data);
2204
+ };
2205
+ const onToolEnd = (...args) => {
2206
+ const data = args[0];
2207
+ captureEvent("tool:end", data);
2208
+ if (data.result && data.result.uiComponent === true) {
2209
+ captureEvent("ui-component", {
2210
+ toolName: data.toolName,
2211
+ componentData: data.result,
2212
+ timestamp: Date.now(),
2213
+ });
2214
+ }
2215
+ };
2216
+ const onUIComponent = (...args) => {
2217
+ captureEvent("ui-component", args[0]);
2218
+ };
2219
+ const onHITLRequest = (...args) => {
2220
+ captureEvent("hitl:confirmation-request", args[0]);
2221
+ };
2222
+ const onHITLResponse = (...args) => {
2223
+ captureEvent("hitl:confirmation-response", args[0]);
2224
+ };
2225
+ this.emitter.on("response:chunk", onResponseChunk);
2226
+ this.emitter.on("tool:start", onToolStart);
2227
+ this.emitter.on("tool:end", onToolEnd);
2228
+ this.emitter.on("ui-component", onUIComponent);
2229
+ this.emitter.on("hitl:confirmation-request", onHITLRequest);
2230
+ this.emitter.on("hitl:confirmation-response", onHITLResponse);
2187
2231
  const metadata = {
2188
2232
  fallbackAttempted: false,
2189
2233
  guardrailsBlocked: false,
2190
2234
  error: undefined,
2191
2235
  };
2192
- const processedStream = (async function* (self) {
2236
+ const self = this;
2237
+ const processedStream = (async function* () {
2193
2238
  try {
2194
2239
  for await (const chunk of mcpStream) {
2195
2240
  chunkCount++;
@@ -2264,6 +2309,12 @@ Current user's request: ${currentInput}`;
2264
2309
  }
2265
2310
  }
2266
2311
  finally {
2312
+ self.emitter.off("response:chunk", onResponseChunk);
2313
+ self.emitter.off("tool:start", onToolStart);
2314
+ self.emitter.off("tool:end", onToolEnd);
2315
+ self.emitter.off("ui-component", onUIComponent);
2316
+ self.emitter.off("hitl:confirmation-request", onHITLRequest);
2317
+ self.emitter.off("hitl:confirmation-response", onHITLResponse);
2267
2318
  // Store memory after stream consumption is complete
2268
2319
  if (self.conversationMemory && enhancedOptions.context?.sessionId) {
2269
2320
  const sessionId = enhancedOptions.context?.sessionId;
@@ -2284,6 +2335,12 @@ Current user's request: ${currentInput}`;
2284
2335
  startTimeStamp: new Date(startTime),
2285
2336
  providerDetails,
2286
2337
  enableSummarization: enhancedOptions.enableSummarization,
2338
+ events: eventSequence.length > 0 ? eventSequence : undefined,
2339
+ });
2340
+ logger.debug("[NeuroLink.stream] Stored conversation turn with events", {
2341
+ sessionId,
2342
+ eventCount: eventSequence.length,
2343
+ eventTypes: [...new Set(eventSequence.map((e) => e.type))],
2287
2344
  });
2288
2345
  }
2289
2346
  catch (error) {
@@ -2312,7 +2369,7 @@ Current user's request: ${currentInput}`;
2312
2369
  });
2313
2370
  }
2314
2371
  }
2315
- })(this);
2372
+ })();
2316
2373
  const streamResult = await this.processStreamResult(processedStream, enhancedOptions, factoryResult);
2317
2374
  const responseTime = Date.now() - startTime;
2318
2375
  this.emitStreamEndEvents(streamResult);
@@ -2325,6 +2382,7 @@ Current user's request: ${currentInput}`;
2325
2382
  fallback: metadata.fallbackAttempted,
2326
2383
  guardrailsBlocked: metadata.guardrailsBlocked,
2327
2384
  error: metadata.error,
2385
+ events: eventSequence,
2328
2386
  });
2329
2387
  }
2330
2388
  catch (error) {
@@ -2439,6 +2497,7 @@ Current user's request: ${currentInput}`;
2439
2497
  toolResults: streamResult.toolResults,
2440
2498
  analytics: streamResult.analytics,
2441
2499
  evaluation: streamResult.evaluation,
2500
+ events: config.events && config.events.length > 0 ? config.events : undefined,
2442
2501
  metadata: {
2443
2502
  streamId: config.streamId,
2444
2503
  startTime: config.startTime,
@@ -116,6 +116,21 @@ export type ConversationMemoryStats = {
116
116
  /** Total number of conversation turns across all sessions */
117
117
  totalTurns: number;
118
118
  };
119
+ /**
120
+ * Stream event for event sequence tracking
121
+ * Used to reconstruct exact flow of streaming responses with proper ordering
122
+ * @since 8.21.0
123
+ */
124
+ export type StreamEventSequence = {
125
+ /** Event type (text-chunk, ui-component, tool:start, tool:end, hitl:confirmation-request, etc.) */
126
+ type: string;
127
+ /** Sequence number for ordering events */
128
+ seq: number;
129
+ /** Timestamp when event occurred */
130
+ timestamp: number;
131
+ /** Event-specific data */
132
+ [key: string]: unknown;
133
+ };
119
134
  /**
120
135
  * Chat message format for conversation history
121
136
  */
@@ -145,6 +160,13 @@ export type ChatMessage = {
145
160
  type?: string;
146
161
  error?: string;
147
162
  };
163
+ /**
164
+ * Event sequence for rich history reconstruction
165
+ * Stores ordered events (text-chunk, ui-component, tool calls, HITL, etc.)
166
+ * Enables proper ordering and complete context restoration
167
+ * @since 8.21.0
168
+ */
169
+ events?: StreamEventSequence[];
148
170
  /** Message metadata (NEW - for token-based memory) */
149
171
  metadata?: {
150
172
  /** Is this a summary message? */
@@ -254,6 +276,7 @@ export type StoreConversationTurnOptions = {
254
276
  startTimeStamp?: Date;
255
277
  providerDetails?: ProviderDetails;
256
278
  enableSummarization?: boolean;
279
+ events?: StreamEventSequence[];
257
280
  };
258
281
  /**
259
282
  * Lightweight session metadata for efficient session listing
@@ -22,4 +22,4 @@ export type { Unknown, UnknownRecord, UnknownArray, JsonValue, JsonObject, JsonA
22
22
  export type { EvaluationData, EvaluationContext, EnhancedEvaluationResult, EvaluationRequest, EvaluationCriteria, } from "./evaluation.js";
23
23
  export type { TaskType, TaskClassification, ClassificationScores, ClassificationStats, ClassificationValidation, } from "./taskClassificationTypes.js";
24
24
  export type { DomainType, DomainConfig, DomainTemplate, DomainConfigOptions, DomainEvaluationCriteria, DomainValidationRule, } from "./domainTypes.js";
25
- export type { ConversationMemoryConfig, SessionMemory, ChatMessage, MessageContent, MultimodalChatMessage, ConversationMemoryEvents, ConversationMemoryError, SessionIdentifier, SessionMetadata, RedisConversationObject, RedisStorageConfig, } from "./conversation.js";
25
+ export type { ConversationMemoryConfig, SessionMemory, ChatMessage, MessageContent, MultimodalChatMessage, ConversationMemoryEvents, ConversationMemoryError, SessionIdentifier, SessionMetadata, RedisConversationObject, RedisStorageConfig, StoreConversationTurnOptions, } from "./conversation.js";
@@ -395,6 +395,12 @@ export type StreamResult = {
395
395
  };
396
396
  analytics?: AnalyticsData | Promise<AnalyticsData>;
397
397
  evaluation?: EvaluationData | Promise<EvaluationData>;
398
+ events?: Array<{
399
+ type: string;
400
+ seq: number;
401
+ timestamp: number;
402
+ [key: string]: unknown;
403
+ }>;
398
404
  };
399
405
  /**
400
406
  * Enhanced provider type with stream method
@@ -44,8 +44,8 @@ import type { StreamOptions } from "../types/streamTypes.js";
44
44
  export declare function buildMultimodalOptions(options: StreamOptions, providerName: string, modelName: string): {
45
45
  input: {
46
46
  text: string;
47
- images: (string | Buffer<ArrayBufferLike> | import("../types/multimodal.js").ImageWithAltText)[] | undefined;
48
- content: import("../types/multimodal.js").Content[] | undefined;
47
+ images: (string | Buffer<ArrayBufferLike> | import("../index.js").ImageWithAltText)[] | undefined;
48
+ content: import("../index.js").Content[] | undefined;
49
49
  files: (string | Buffer<ArrayBufferLike>)[] | undefined;
50
50
  csvFiles: (string | Buffer<ArrayBufferLike>)[] | undefined;
51
51
  pdfFiles: (string | Buffer<ArrayBufferLike>)[] | undefined;
@@ -56,12 +56,12 @@ export declare function buildMultimodalOptions(options: StreamOptions, providerN
56
56
  includeHeaders?: boolean;
57
57
  } | undefined;
58
58
  systemPrompt: string | undefined;
59
- conversationHistory: import("../types/conversation.js").ChatMessage[] | undefined;
59
+ conversationHistory: import("../index.js").ChatMessage[] | undefined;
60
60
  provider: string;
61
61
  model: string;
62
62
  temperature: number | undefined;
63
63
  maxTokens: number | undefined;
64
64
  enableAnalytics: boolean | undefined;
65
65
  enableEvaluation: boolean | undefined;
66
- context: import("../types/common.js").UnknownRecord | undefined;
66
+ context: import("../index.js").UnknownRecord | undefined;
67
67
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@juspay/neurolink",
3
- "version": "8.34.0",
3
+ "version": "8.35.0",
4
4
  "description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 13 providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
5
5
  "author": {
6
6
  "name": "Juspay Technologies",
@@ -207,7 +207,8 @@
207
207
  "xml2js": "^0.6.2",
208
208
  "yargs": "^17.7.2",
209
209
  "zod": "^3.22.0",
210
- "zod-to-json-schema": "^3.24.6"
210
+ "zod-to-json-schema": "^3.24.6",
211
+ "@google-cloud/text-to-speech": "^5.0.0"
211
212
  },
212
213
  "optionalDependencies": {
213
214
  "canvas": "^3.2.0"
@@ -220,7 +221,6 @@
220
221
  "@changesets/changelog-github": "^0.5.1",
221
222
  "@changesets/cli": "^2.29.7",
222
223
  "@eslint/js": "^9.35.0",
223
- "@google-cloud/text-to-speech": "^5.0.0",
224
224
  "@semantic-release/changelog": "^6.0.3",
225
225
  "@semantic-release/commit-analyzer": "^13.0.1",
226
226
  "@semantic-release/git": "^10.0.1",