@juspay/neurolink 8.19.1 → 8.20.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/dist/cli/loop/optionsSchema.js +4 -0
  3. package/dist/config/conversationMemory.d.ts +15 -0
  4. package/dist/config/conversationMemory.js +22 -3
  5. package/dist/core/conversationMemoryFactory.js +0 -3
  6. package/dist/core/conversationMemoryInitializer.js +1 -9
  7. package/dist/core/conversationMemoryManager.d.ts +31 -8
  8. package/dist/core/conversationMemoryManager.js +174 -80
  9. package/dist/core/redisConversationMemoryManager.d.ts +28 -13
  10. package/dist/core/redisConversationMemoryManager.js +211 -121
  11. package/dist/lib/config/conversationMemory.d.ts +15 -0
  12. package/dist/lib/config/conversationMemory.js +22 -3
  13. package/dist/lib/core/conversationMemoryFactory.js +0 -3
  14. package/dist/lib/core/conversationMemoryInitializer.js +1 -9
  15. package/dist/lib/core/conversationMemoryManager.d.ts +31 -8
  16. package/dist/lib/core/conversationMemoryManager.js +174 -80
  17. package/dist/lib/core/redisConversationMemoryManager.d.ts +28 -13
  18. package/dist/lib/core/redisConversationMemoryManager.js +211 -121
  19. package/dist/lib/neurolink.js +29 -22
  20. package/dist/lib/types/conversation.d.ts +58 -9
  21. package/dist/lib/types/generateTypes.d.ts +1 -0
  22. package/dist/lib/types/sdkTypes.d.ts +1 -1
  23. package/dist/lib/types/streamTypes.d.ts +1 -0
  24. package/dist/lib/utils/conversationMemory.d.ts +43 -1
  25. package/dist/lib/utils/conversationMemory.js +181 -5
  26. package/dist/lib/utils/conversationMemoryUtils.js +16 -1
  27. package/dist/lib/utils/imageProcessor.d.ts +1 -0
  28. package/dist/lib/utils/imageProcessor.js +29 -1
  29. package/dist/lib/utils/redis.js +0 -5
  30. package/dist/neurolink.js +29 -22
  31. package/dist/types/conversation.d.ts +58 -9
  32. package/dist/types/generateTypes.d.ts +1 -0
  33. package/dist/types/sdkTypes.d.ts +1 -1
  34. package/dist/types/streamTypes.d.ts +1 -0
  35. package/dist/utils/conversationMemory.d.ts +43 -1
  36. package/dist/utils/conversationMemory.js +181 -5
  37. package/dist/utils/conversationMemoryUtils.js +16 -1
  38. package/dist/utils/imageProcessor.d.ts +1 -0
  39. package/dist/utils/imageProcessor.js +29 -1
  40. package/dist/utils/redis.js +0 -5
  41. package/package.json +1 -1
@@ -11,14 +11,10 @@ export type ConversationMemoryConfig = {
11
11
  enabled: boolean;
12
12
  /** Maximum number of sessions to keep in memory (default: 50) */
13
13
  maxSessions?: number;
14
- /** Maximum number of conversation turns to keep per session (default: 20) */
15
- maxTurnsPerSession?: number;
16
14
  /** Enable automatic summarization */
17
15
  enableSummarization?: boolean;
18
- /** Turn count to trigger summarization */
19
- summarizationThresholdTurns?: number;
20
- /** Target turn count for the summary */
21
- summarizationTargetTurns?: number;
16
+ /** Token threshold to trigger summarization (optional - defaults to 80% of model context) */
17
+ tokenThreshold?: number;
22
18
  /** Provider to use for summarization */
23
19
  summarizationProvider?: string;
24
20
  /** Model to use for summarization */
@@ -29,6 +25,12 @@ export type ConversationMemoryConfig = {
29
25
  mem0Config?: Mem0Config;
30
26
  /** Redis configuration (optional) - overrides environment variables */
31
27
  redisConfig?: RedisStorageConfig;
28
+ /** @deprecated Use tokenThreshold instead - Maximum number of conversation turns to keep per session (default: 20) */
29
+ maxTurnsPerSession?: number;
30
+ /** @deprecated Use tokenThreshold instead - Turn count to trigger summarization */
31
+ summarizationThresholdTurns?: number;
32
+ /** @deprecated Use tokenThreshold instead - Target turn count for the summary */
33
+ summarizationTargetTurns?: number;
32
34
  };
33
35
  /**
34
36
  * Complete memory for a conversation session
@@ -47,6 +49,16 @@ export type SessionMemory = {
47
49
  createdAt: number;
48
50
  /** When this session was last active */
49
51
  lastActivity: number;
52
+ /** Pointer to last summarized message ID (NEW - for token-based memory) */
53
+ summarizedUpToMessageId?: string;
54
+ /** Stored summary message that condenses conversation history up to summarizedUpToMessageId */
55
+ summarizedMessage?: string;
56
+ /** Per-session token threshold override (NEW - for token-based memory) */
57
+ tokenThreshold?: number;
58
+ /** Cached token count for performance (NEW - for token-based memory) */
59
+ lastTokenCount?: number;
60
+ /** When token count was last calculated (NEW - for token-based memory) */
61
+ lastCountedAt?: number;
50
62
  /** Optional session metadata */
51
63
  metadata?: {
52
64
  /** User role or permissions */
@@ -70,13 +82,13 @@ export type ConversationMemoryStats = {
70
82
  * Chat message format for conversation history
71
83
  */
72
84
  export type ChatMessage = {
85
+ /** Unique message identifier (required for token-based memory) */
86
+ id: string;
73
87
  /** Role/type of the message */
74
88
  role: "user" | "assistant" | "system" | "tool_call" | "tool_result";
75
89
  /** Content of the message */
76
90
  content: string;
77
- /** Message ID (optional) - for new format */
78
- id?: string;
79
- /** Timestamp (optional) - for new format */
91
+ /** Timestamp (ISO string) */
80
92
  timestamp?: string;
81
93
  /** Tool name (optional) - for tool_call/tool_result messages */
82
94
  tool?: string;
@@ -90,6 +102,17 @@ export type ChatMessage = {
90
102
  type?: string;
91
103
  error?: string;
92
104
  };
105
+ /** Message metadata (NEW - for token-based memory) */
106
+ metadata?: {
107
+ /** Is this a summary message? */
108
+ isSummary?: boolean;
109
+ /** First message ID that this summary covers */
110
+ summarizesFrom?: string;
111
+ /** Last message ID that this summary covers */
112
+ summarizesTo?: string;
113
+ /** Was this message truncated due to token limits? */
114
+ truncated?: boolean;
115
+ };
93
116
  };
94
117
  /**
95
118
  * Multimodal message types - Re-exported from multimodal.ts
@@ -152,6 +175,18 @@ export type SessionIdentifier = {
152
175
  sessionId: string;
153
176
  userId?: string;
154
177
  };
178
+ /**
179
+ * Options for storing a conversation turn
180
+ */
181
+ export type StoreConversationTurnOptions = {
182
+ sessionId: string;
183
+ userId?: string;
184
+ userMessage: string;
185
+ aiResponse: string;
186
+ startTimeStamp?: Date;
187
+ providerDetails?: ProviderDetails;
188
+ enableSummarization?: boolean;
189
+ };
155
190
  /**
156
191
  * Lightweight session metadata for efficient session listing
157
192
  * Contains only essential information without heavy message arrays
@@ -179,6 +214,16 @@ export type ConversationBase = {
179
214
  createdAt: string;
180
215
  /** When this conversation was last updated */
181
216
  updatedAt: string;
217
+ /** Pointer to last summarized message (token-based memory) */
218
+ summarizedUpToMessageId?: string;
219
+ /** Stored summary message that condenses conversation history up to summarizedUpToMessageId */
220
+ summarizedMessage?: string;
221
+ /** Per-session token threshold override */
222
+ tokenThreshold?: number;
223
+ /** Cached token count for efficiency */
224
+ lastTokenCount?: number;
225
+ /** Timestamp of last token count */
226
+ lastCountedAt?: number;
182
227
  };
183
228
  /**
184
229
  * Redis conversation storage object format
@@ -250,3 +295,7 @@ export type RedisStorageConfig = {
250
295
  [key: string]: string | number | boolean | undefined;
251
296
  };
252
297
  };
298
+ export type ProviderDetails = {
299
+ provider: string;
300
+ model: string;
301
+ };
@@ -350,6 +350,7 @@ export type TextGenerationOptions = {
350
350
  formatStyle?: "raw" | "markdown" | "json";
351
351
  includeHeaders?: boolean;
352
352
  };
353
+ enableSummarization?: boolean;
353
354
  };
354
355
  /**
355
356
  * Text generation result (consolidated from core types)
@@ -22,4 +22,4 @@ export type { Unknown, UnknownRecord, UnknownArray, JsonValue, JsonObject, JsonA
22
22
  export type { EvaluationData, EvaluationContext, EnhancedEvaluationResult, EvaluationRequest, EvaluationCriteria, } from "./evaluation.js";
23
23
  export type { TaskType, TaskClassification, ClassificationScores, ClassificationStats, ClassificationValidation, } from "./taskClassificationTypes.js";
24
24
  export type { DomainType, DomainConfig, DomainTemplate, DomainConfigOptions, DomainEvaluationCriteria, DomainValidationRule, } from "./domainTypes.js";
25
- export type { ConversationMemoryConfig, SessionMemory, ConversationMemoryStats, ChatMessage, MessageContent, MultimodalChatMessage, ConversationMemoryEvents, ConversationMemoryError, SessionIdentifier, SessionMetadata, RedisConversationObject, RedisStorageConfig, } from "./conversation.js";
25
+ export type { ConversationMemoryConfig, SessionMemory, ChatMessage, MessageContent, MultimodalChatMessage, ConversationMemoryEvents, ConversationMemoryError, SessionIdentifier, SessionMetadata, RedisConversationObject, RedisStorageConfig, } from "./conversation.js";
@@ -299,6 +299,7 @@ export type StreamOptions = {
299
299
  };
300
300
  conversationMessages?: ChatMessage[];
301
301
  middleware?: MiddlewareFactoryOptions;
302
+ enableSummarization?: boolean;
302
303
  };
303
304
  /**
304
305
  * Stream function result type - Primary output format for streaming
@@ -2,7 +2,7 @@
2
2
  * Conversation Memory Utilities
3
3
  * Handles configuration merging and conversation memory operations
4
4
  */
5
- import type { ConversationMemoryConfig, ChatMessage } from "../types/conversation.js";
5
+ import type { ConversationMemoryConfig, ChatMessage, SessionMemory } from "../types/conversation.js";
6
6
  import type { ConversationMemoryManager } from "../core/conversationMemoryManager.js";
7
7
  import type { RedisConversationMemoryManager } from "../core/redisConversationMemoryManager.js";
8
8
  import type { TextGenerationOptions, TextGenerationResult } from "../types/generateTypes.js";
@@ -20,3 +20,45 @@ export declare function getConversationMessages(conversationMemory: Conversation
20
20
  * Saves user messages and AI responses for conversation memory
21
21
  */
22
22
  export declare function storeConversationTurn(conversationMemory: ConversationMemoryManager | RedisConversationMemoryManager | null | undefined, originalOptions: TextGenerationOptions, result: TextGenerationResult, startTimeStamp?: Date | undefined): Promise<void>;
23
+ /**
24
+ * Build context messages from pointer onwards (token-based memory)
25
+ * Returns summary message (if exists) + all messages after the summarized pointer
26
+ * @param session - Session memory with pointer
27
+ * @returns Context messages to send to LLM
28
+ */
29
+ export declare function buildContextFromPointer(session: SessionMemory): ChatMessage[];
30
+ /**
31
+ * Create summarization prompt from message history
32
+ * Used by both in-memory and Redis conversation managers
33
+ * @param history - Messages to summarize
34
+ * @param previousSummary - Optional previous summary to build upon
35
+ */
36
+ export declare function createSummarizationPrompt(history: ChatMessage[], previousSummary?: string): string;
37
+ /**
38
+ * Calculate token threshold based on model's output token limit
39
+ * Uses existing provider token limits as proxy for context window
40
+ * @param provider - AI provider name
41
+ * @param model - Model name
42
+ * @returns Token threshold (80% of model's token limit)
43
+ */
44
+ export declare function calculateTokenThreshold(provider: string, model: string): number;
45
+ /**
46
+ * Get effective token threshold for a session
47
+ * Priority: session override > env var > model-based (80%) > fallback
48
+ * @param provider - AI provider name
49
+ * @param model - Model name
50
+ * @param envOverride - Environment variable override
51
+ * @param sessionOverride - Per-session token threshold override
52
+ * @returns Effective token threshold
53
+ */
54
+ export declare function getEffectiveTokenThreshold(provider: string, model: string, envOverride?: number, sessionOverride?: number): number;
55
+ /**
56
+ * Generate summary using configured provider and model
57
+ * Centralized summarization logic used by both ConversationMemoryManager and RedisConversationMemoryManager
58
+ * @param messages - Messages to summarize
59
+ * @param config - Conversation memory configuration containing provider/model settings
60
+ * @param previousSummary - Optional previous summary to build upon
61
+ * @param logPrefix - Prefix for log messages (e.g., "[ConversationMemory]" or "[RedisConversationMemoryManager]")
62
+ * @returns Summary text or null if generation fails
63
+ */
64
+ export declare function generateSummary(messages: ChatMessage[], config: ConversationMemoryConfig, logPrefix?: string, previousSummary?: string): Promise<string | null>;
@@ -2,8 +2,10 @@
2
2
  * Conversation Memory Utilities
3
3
  * Handles configuration merging and conversation memory operations
4
4
  */
5
- import { getConversationMemoryDefaults } from "../config/conversationMemory.js";
5
+ import { getConversationMemoryDefaults, MEMORY_THRESHOLD_PERCENTAGE, DEFAULT_FALLBACK_THRESHOLD, } from "../config/conversationMemory.js";
6
+ import { TokenUtils } from "../constants/tokens.js";
6
7
  import { logger } from "./logger.js";
8
+ import { NeuroLink } from "../neurolink.js";
7
9
  /**
8
10
  * Apply conversation memory defaults to user configuration
9
11
  * Merges user config with environment variables and default values
@@ -34,8 +36,8 @@ export async function getConversationMessages(conversationMemory, options) {
34
36
  try {
35
37
  // Extract userId from context
36
38
  const userId = options.context?.userId;
37
- // Remove duplicate summarization logic - it should be handled in ConversationMemoryManager
38
- const messages = await conversationMemory.buildContextMessages(sessionId, userId);
39
+ const enableSummarization = options.enableSummarization ?? undefined;
40
+ const messages = await conversationMemory.buildContextMessages(sessionId, userId, enableSummarization);
39
41
  logger.debug("[conversationMemoryUtils] Conversation messages retrieved successfully", {
40
42
  sessionId,
41
43
  messageCount: messages.length,
@@ -100,9 +102,24 @@ export async function storeConversationTurn(conversationMemory, originalOptions,
100
102
  return;
101
103
  }
102
104
  const userMessage = originalOptions.originalPrompt || originalOptions.prompt || "";
103
- const aiResponse = result.content;
105
+ const aiResponse = result.content ?? "";
106
+ let providerDetails = undefined;
107
+ if (result.provider && result.model) {
108
+ providerDetails = {
109
+ provider: result.provider,
110
+ model: result.model,
111
+ };
112
+ }
104
113
  try {
105
- await conversationMemory.storeConversationTurn(sessionId, userId, userMessage, aiResponse, startTimeStamp);
114
+ await conversationMemory.storeConversationTurn({
115
+ sessionId,
116
+ userId,
117
+ userMessage,
118
+ aiResponse,
119
+ startTimeStamp,
120
+ providerDetails,
121
+ enableSummarization: originalOptions.enableSummarization,
122
+ });
106
123
  logger.debug("[conversationMemoryUtils] Conversation turn stored successfully", {
107
124
  sessionId,
108
125
  userId,
@@ -121,3 +138,162 @@ export async function storeConversationTurn(conversationMemory, originalOptions,
121
138
  });
122
139
  }
123
140
  }
141
+ /**
142
+ * Build context messages from pointer onwards (token-based memory)
143
+ * Returns summary message (if exists) + all messages after the summarized pointer
144
+ * @param session - Session memory with pointer
145
+ * @returns Context messages to send to LLM
146
+ */
147
+ export function buildContextFromPointer(session) {
148
+ if (!session.summarizedUpToMessageId || !session.summarizedMessage) {
149
+ return session.messages;
150
+ }
151
+ // find a better way to wirte this
152
+ const pointerIndex = session.messages.findIndex((msg) => msg.id === session.summarizedUpToMessageId);
153
+ if (pointerIndex === -1) {
154
+ logger.warn("Pointer message not found, returning all messages", {
155
+ sessionId: session.sessionId,
156
+ pointer: session.summarizedUpToMessageId,
157
+ totalMessages: session.messages.length,
158
+ });
159
+ return session.messages;
160
+ }
161
+ const messagesAfterPointer = session.messages.slice(pointerIndex + 1);
162
+ // Construct context: summary message + recent messages
163
+ const summaryMessage = {
164
+ id: `summary-${session.summarizedUpToMessageId}`,
165
+ role: "system",
166
+ content: `Previous conversation summary: ${session.summarizedMessage}`,
167
+ timestamp: new Date().toISOString(),
168
+ metadata: {
169
+ isSummary: true,
170
+ summarizesTo: session.summarizedUpToMessageId,
171
+ },
172
+ };
173
+ logger.debug("Building context with summary", {
174
+ sessionId: session.sessionId,
175
+ pointerIndex,
176
+ messagesAfterPointer: messagesAfterPointer.length,
177
+ totalMessages: session.messages.length,
178
+ summaryLength: session.summarizedMessage.length,
179
+ });
180
+ return [summaryMessage, ...messagesAfterPointer];
181
+ }
182
+ /**
183
+ * Create summarization prompt from message history
184
+ * Used by both in-memory and Redis conversation managers
185
+ * @param history - Messages to summarize
186
+ * @param previousSummary - Optional previous summary to build upon
187
+ */
188
+ export function createSummarizationPrompt(history, previousSummary) {
189
+ const formattedHistory = history
190
+ .map((msg) => `${msg.role}: ${msg.content}`)
191
+ .join("\n\n");
192
+ const previousSummarySection = previousSummary
193
+ ? `Previous Summary:
194
+ ---
195
+ ${previousSummary}
196
+ ---
197
+
198
+ `
199
+ : "";
200
+ return `
201
+ You are a context summarization AI. Your task is to condense the following conversation history for another AI assistant.
202
+ ${previousSummary ? "Build upon the previous summary and incorporate the new conversation turns below." : ""}
203
+ The summary must be a concise, third-person narrative that retains all critical information, including key entities, technical details, decisions made, and any specific dates or times mentioned.
204
+ Ensure the summary flows logically and is ready to be used as context for the next turn in the conversation.
205
+
206
+ ${previousSummarySection}Conversation History to Summarize:
207
+ ---
208
+ ${formattedHistory}
209
+ ---
210
+ `.trim();
211
+ }
212
+ /**
213
+ * Calculate token threshold based on model's output token limit
214
+ * Uses existing provider token limits as proxy for context window
215
+ * @param provider - AI provider name
216
+ * @param model - Model name
217
+ * @returns Token threshold (80% of model's token limit)
218
+ */
219
+ export function calculateTokenThreshold(provider, model) {
220
+ try {
221
+ // Get model's token limit from existing TokenUtils
222
+ const modelTokenLimit = TokenUtils.getProviderTokenLimit(provider, model);
223
+ // Return 80% of token limit for conversation memory
224
+ // This is conservative since output limits are typically smaller than input limits
225
+ return Math.floor(modelTokenLimit * MEMORY_THRESHOLD_PERCENTAGE);
226
+ }
227
+ catch (error) {
228
+ logger.warn("Failed to calculate model threshold, using fallback", {
229
+ provider,
230
+ model,
231
+ error: error instanceof Error ? error.message : String(error),
232
+ });
233
+ return DEFAULT_FALLBACK_THRESHOLD;
234
+ }
235
+ }
236
+ /**
237
+ * Get effective token threshold for a session
238
+ * Priority: session override > env var > model-based (80%) > fallback
239
+ * @param provider - AI provider name
240
+ * @param model - Model name
241
+ * @param envOverride - Environment variable override
242
+ * @param sessionOverride - Per-session token threshold override
243
+ * @returns Effective token threshold
244
+ */
245
+ export function getEffectiveTokenThreshold(provider, model, envOverride, sessionOverride) {
246
+ // Priority 1: Session-level override
247
+ if (sessionOverride && sessionOverride > 0) {
248
+ return sessionOverride;
249
+ }
250
+ // Priority 2: Environment variable override
251
+ if (envOverride && envOverride > 0) {
252
+ return envOverride;
253
+ }
254
+ // Priority 3: Model-based calculation (80% of context window)
255
+ try {
256
+ return calculateTokenThreshold(provider, model);
257
+ }
258
+ catch (error) {
259
+ logger.warn("Failed to calculate effective threshold, using fallback", {
260
+ provider,
261
+ model,
262
+ error: error instanceof Error ? error.message : String(error),
263
+ });
264
+ // Priority 4: Fallback for unknown models
265
+ return DEFAULT_FALLBACK_THRESHOLD;
266
+ }
267
+ }
268
+ /**
269
+ * Generate summary using configured provider and model
270
+ * Centralized summarization logic used by both ConversationMemoryManager and RedisConversationMemoryManager
271
+ * @param messages - Messages to summarize
272
+ * @param config - Conversation memory configuration containing provider/model settings
273
+ * @param previousSummary - Optional previous summary to build upon
274
+ * @param logPrefix - Prefix for log messages (e.g., "[ConversationMemory]" or "[RedisConversationMemoryManager]")
275
+ * @returns Summary text or null if generation fails
276
+ */
277
+ export async function generateSummary(messages, config, logPrefix = "[ConversationMemory]", previousSummary) {
278
+ const summarizationPrompt = createSummarizationPrompt(messages, previousSummary);
279
+ const summarizer = new NeuroLink({
280
+ conversationMemory: { enabled: false },
281
+ });
282
+ try {
283
+ if (!config.summarizationProvider || !config.summarizationModel) {
284
+ logger.error(`${logPrefix} Missing summarization provider`);
285
+ return null;
286
+ }
287
+ const summaryResult = await summarizer.generate({
288
+ input: { text: summarizationPrompt },
289
+ provider: config.summarizationProvider,
290
+ model: config.summarizationModel,
291
+ disableTools: true,
292
+ });
293
+ return summaryResult.content || null;
294
+ }
295
+ catch (error) {
296
+ logger.error(`${logPrefix} Error generating summary`, { error });
297
+ return null;
298
+ }
299
+ }
@@ -58,8 +58,23 @@ export async function storeConversationTurn(conversationMemory, originalOptions,
58
58
  if (typeof sessionId !== "string" || !sessionId) {
59
59
  return;
60
60
  }
61
+ let providerDetails = undefined;
62
+ if (result.provider && result.model) {
63
+ providerDetails = {
64
+ provider: result.provider,
65
+ model: result.model,
66
+ };
67
+ }
61
68
  try {
62
- await conversationMemory.storeConversationTurn(sessionId, userId, originalOptions.originalPrompt || originalOptions.prompt || "", result.content, startTimeStamp);
69
+ await conversationMemory.storeConversationTurn({
70
+ sessionId,
71
+ userId,
72
+ userMessage: originalOptions.originalPrompt || originalOptions.prompt || "",
73
+ aiResponse: result.content,
74
+ startTimeStamp,
75
+ providerDetails,
76
+ enableSummarization: originalOptions.enableSummarization,
77
+ });
63
78
  logger.debug("Conversation turn stored", {
64
79
  sessionId,
65
80
  userId,
@@ -157,6 +157,7 @@ export declare const imageUtils: {
157
157
  createDataUri: (base64: string, mimeType?: string) => string;
158
158
  /**
159
159
  * Validate base64 string format
160
+ * Validates format BEFORE buffer allocation to prevent memory exhaustion
160
161
  */
161
162
  isValidBase64: (str: string) => boolean;
162
163
  /**
@@ -622,12 +622,40 @@ export const imageUtils = {
622
622
  },
623
623
  /**
624
624
  * Validate base64 string format
625
+ * Validates format BEFORE buffer allocation to prevent memory exhaustion
625
626
  */
626
627
  isValidBase64: (str) => {
627
628
  try {
628
629
  // Remove data URI prefix if present
629
630
  const cleanBase64 = str.includes(",") ? str.split(",")[1] : str;
630
- // Check if it's valid base64
631
+ // Empty string check
632
+ if (!cleanBase64 || cleanBase64.length === 0) {
633
+ return false;
634
+ }
635
+ // 1. Validate character set FIRST (A-Z, a-z, 0-9, +, /, =)
636
+ // This prevents memory allocation for invalid input like "hello world"
637
+ const base64Regex = /^[A-Za-z0-9+/]*={0,2}$/;
638
+ if (!base64Regex.test(cleanBase64)) {
639
+ return false;
640
+ }
641
+ // 2. Check length is multiple of 4
642
+ if (cleanBase64.length % 4 !== 0) {
643
+ return false;
644
+ }
645
+ // 3. Validate padding position (max 2 equals at end only)
646
+ const paddingIndex = cleanBase64.indexOf("=");
647
+ if (paddingIndex !== -1) {
648
+ // Padding must be at the end
649
+ if (paddingIndex < cleanBase64.length - 2) {
650
+ return false;
651
+ }
652
+ // No characters after padding
653
+ const afterPadding = cleanBase64.slice(paddingIndex);
654
+ if (!/^=+$/.test(afterPadding)) {
655
+ return false;
656
+ }
657
+ }
658
+ // 4. ONLY NOW decode if format is valid
631
659
  const decoded = Buffer.from(cleanBase64, "base64");
632
660
  const reencoded = decoded.toString("base64");
633
661
  // Remove padding for comparison (base64 can have different padding)
@@ -91,14 +91,9 @@ export function serializeConversation(conversation) {
91
91
  */
92
92
  export function deserializeConversation(data) {
93
93
  if (!data) {
94
- logger.debug("[redisUtils] No conversation data to deserialize, returning null");
95
94
  return null;
96
95
  }
97
96
  try {
98
- logger.debug("[redisUtils] Deserializing conversation", {
99
- dataLength: data.length,
100
- dataPreview: data.substring(0, 100) + (data.length > 100 ? "..." : ""),
101
- });
102
97
  // Parse as unknown first, then validate before casting
103
98
  const parsedData = JSON.parse(data);
104
99
  // Check if the parsed data is an object with required properties
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@juspay/neurolink",
3
- "version": "8.19.1",
3
+ "version": "8.20.1",
4
4
  "description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 9 major providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
5
5
  "author": {
6
6
  "name": "Juspay Technologies",