@juspay/neurolink 8.19.1 → 8.20.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/dist/cli/loop/optionsSchema.js +4 -0
- package/dist/config/conversationMemory.d.ts +15 -0
- package/dist/config/conversationMemory.js +22 -3
- package/dist/core/conversationMemoryFactory.js +0 -3
- package/dist/core/conversationMemoryInitializer.js +1 -9
- package/dist/core/conversationMemoryManager.d.ts +31 -8
- package/dist/core/conversationMemoryManager.js +174 -80
- package/dist/core/redisConversationMemoryManager.d.ts +28 -13
- package/dist/core/redisConversationMemoryManager.js +211 -121
- package/dist/lib/config/conversationMemory.d.ts +15 -0
- package/dist/lib/config/conversationMemory.js +22 -3
- package/dist/lib/core/conversationMemoryFactory.js +0 -3
- package/dist/lib/core/conversationMemoryInitializer.js +1 -9
- package/dist/lib/core/conversationMemoryManager.d.ts +31 -8
- package/dist/lib/core/conversationMemoryManager.js +174 -80
- package/dist/lib/core/redisConversationMemoryManager.d.ts +28 -13
- package/dist/lib/core/redisConversationMemoryManager.js +211 -121
- package/dist/lib/neurolink.js +29 -22
- package/dist/lib/types/conversation.d.ts +58 -9
- package/dist/lib/types/generateTypes.d.ts +1 -0
- package/dist/lib/types/sdkTypes.d.ts +1 -1
- package/dist/lib/types/streamTypes.d.ts +1 -0
- package/dist/lib/utils/conversationMemory.d.ts +43 -1
- package/dist/lib/utils/conversationMemory.js +181 -5
- package/dist/lib/utils/conversationMemoryUtils.js +16 -1
- package/dist/lib/utils/redis.js +0 -5
- package/dist/neurolink.js +29 -22
- package/dist/types/conversation.d.ts +58 -9
- package/dist/types/generateTypes.d.ts +1 -0
- package/dist/types/sdkTypes.d.ts +1 -1
- package/dist/types/streamTypes.d.ts +1 -0
- package/dist/utils/conversationMemory.d.ts +43 -1
- package/dist/utils/conversationMemory.js +181 -5
- package/dist/utils/conversationMemoryUtils.js +16 -1
- package/dist/utils/redis.js +0 -5
- package/package.json +1 -1
|
@@ -11,14 +11,10 @@ export type ConversationMemoryConfig = {
|
|
|
11
11
|
enabled: boolean;
|
|
12
12
|
/** Maximum number of sessions to keep in memory (default: 50) */
|
|
13
13
|
maxSessions?: number;
|
|
14
|
-
/** Maximum number of conversation turns to keep per session (default: 20) */
|
|
15
|
-
maxTurnsPerSession?: number;
|
|
16
14
|
/** Enable automatic summarization */
|
|
17
15
|
enableSummarization?: boolean;
|
|
18
|
-
/**
|
|
19
|
-
|
|
20
|
-
/** Target turn count for the summary */
|
|
21
|
-
summarizationTargetTurns?: number;
|
|
16
|
+
/** Token threshold to trigger summarization (optional - defaults to 80% of model context) */
|
|
17
|
+
tokenThreshold?: number;
|
|
22
18
|
/** Provider to use for summarization */
|
|
23
19
|
summarizationProvider?: string;
|
|
24
20
|
/** Model to use for summarization */
|
|
@@ -29,6 +25,12 @@ export type ConversationMemoryConfig = {
|
|
|
29
25
|
mem0Config?: Mem0Config;
|
|
30
26
|
/** Redis configuration (optional) - overrides environment variables */
|
|
31
27
|
redisConfig?: RedisStorageConfig;
|
|
28
|
+
/** @deprecated Use tokenThreshold instead - Maximum number of conversation turns to keep per session (default: 20) */
|
|
29
|
+
maxTurnsPerSession?: number;
|
|
30
|
+
/** @deprecated Use tokenThreshold instead - Turn count to trigger summarization */
|
|
31
|
+
summarizationThresholdTurns?: number;
|
|
32
|
+
/** @deprecated Use tokenThreshold instead - Target turn count for the summary */
|
|
33
|
+
summarizationTargetTurns?: number;
|
|
32
34
|
};
|
|
33
35
|
/**
|
|
34
36
|
* Complete memory for a conversation session
|
|
@@ -47,6 +49,16 @@ export type SessionMemory = {
|
|
|
47
49
|
createdAt: number;
|
|
48
50
|
/** When this session was last active */
|
|
49
51
|
lastActivity: number;
|
|
52
|
+
/** Pointer to last summarized message ID (NEW - for token-based memory) */
|
|
53
|
+
summarizedUpToMessageId?: string;
|
|
54
|
+
/** Stored summary message that condenses conversation history up to summarizedUpToMessageId */
|
|
55
|
+
summarizedMessage?: string;
|
|
56
|
+
/** Per-session token threshold override (NEW - for token-based memory) */
|
|
57
|
+
tokenThreshold?: number;
|
|
58
|
+
/** Cached token count for performance (NEW - for token-based memory) */
|
|
59
|
+
lastTokenCount?: number;
|
|
60
|
+
/** When token count was last calculated (NEW - for token-based memory) */
|
|
61
|
+
lastCountedAt?: number;
|
|
50
62
|
/** Optional session metadata */
|
|
51
63
|
metadata?: {
|
|
52
64
|
/** User role or permissions */
|
|
@@ -70,13 +82,13 @@ export type ConversationMemoryStats = {
|
|
|
70
82
|
* Chat message format for conversation history
|
|
71
83
|
*/
|
|
72
84
|
export type ChatMessage = {
|
|
85
|
+
/** Unique message identifier (required for token-based memory) */
|
|
86
|
+
id: string;
|
|
73
87
|
/** Role/type of the message */
|
|
74
88
|
role: "user" | "assistant" | "system" | "tool_call" | "tool_result";
|
|
75
89
|
/** Content of the message */
|
|
76
90
|
content: string;
|
|
77
|
-
/**
|
|
78
|
-
id?: string;
|
|
79
|
-
/** Timestamp (optional) - for new format */
|
|
91
|
+
/** Timestamp (ISO string) */
|
|
80
92
|
timestamp?: string;
|
|
81
93
|
/** Tool name (optional) - for tool_call/tool_result messages */
|
|
82
94
|
tool?: string;
|
|
@@ -90,6 +102,17 @@ export type ChatMessage = {
|
|
|
90
102
|
type?: string;
|
|
91
103
|
error?: string;
|
|
92
104
|
};
|
|
105
|
+
/** Message metadata (NEW - for token-based memory) */
|
|
106
|
+
metadata?: {
|
|
107
|
+
/** Is this a summary message? */
|
|
108
|
+
isSummary?: boolean;
|
|
109
|
+
/** First message ID that this summary covers */
|
|
110
|
+
summarizesFrom?: string;
|
|
111
|
+
/** Last message ID that this summary covers */
|
|
112
|
+
summarizesTo?: string;
|
|
113
|
+
/** Was this message truncated due to token limits? */
|
|
114
|
+
truncated?: boolean;
|
|
115
|
+
};
|
|
93
116
|
};
|
|
94
117
|
/**
|
|
95
118
|
* Multimodal message types - Re-exported from multimodal.ts
|
|
@@ -152,6 +175,18 @@ export type SessionIdentifier = {
|
|
|
152
175
|
sessionId: string;
|
|
153
176
|
userId?: string;
|
|
154
177
|
};
|
|
178
|
+
/**
|
|
179
|
+
* Options for storing a conversation turn
|
|
180
|
+
*/
|
|
181
|
+
export type StoreConversationTurnOptions = {
|
|
182
|
+
sessionId: string;
|
|
183
|
+
userId?: string;
|
|
184
|
+
userMessage: string;
|
|
185
|
+
aiResponse: string;
|
|
186
|
+
startTimeStamp?: Date;
|
|
187
|
+
providerDetails?: ProviderDetails;
|
|
188
|
+
enableSummarization?: boolean;
|
|
189
|
+
};
|
|
155
190
|
/**
|
|
156
191
|
* Lightweight session metadata for efficient session listing
|
|
157
192
|
* Contains only essential information without heavy message arrays
|
|
@@ -179,6 +214,16 @@ export type ConversationBase = {
|
|
|
179
214
|
createdAt: string;
|
|
180
215
|
/** When this conversation was last updated */
|
|
181
216
|
updatedAt: string;
|
|
217
|
+
/** Pointer to last summarized message (token-based memory) */
|
|
218
|
+
summarizedUpToMessageId?: string;
|
|
219
|
+
/** Stored summary message that condenses conversation history up to summarizedUpToMessageId */
|
|
220
|
+
summarizedMessage?: string;
|
|
221
|
+
/** Per-session token threshold override */
|
|
222
|
+
tokenThreshold?: number;
|
|
223
|
+
/** Cached token count for efficiency */
|
|
224
|
+
lastTokenCount?: number;
|
|
225
|
+
/** Timestamp of last token count */
|
|
226
|
+
lastCountedAt?: number;
|
|
182
227
|
};
|
|
183
228
|
/**
|
|
184
229
|
* Redis conversation storage object format
|
|
@@ -250,3 +295,7 @@ export type RedisStorageConfig = {
|
|
|
250
295
|
[key: string]: string | number | boolean | undefined;
|
|
251
296
|
};
|
|
252
297
|
};
|
|
298
|
+
export type ProviderDetails = {
|
|
299
|
+
provider: string;
|
|
300
|
+
model: string;
|
|
301
|
+
};
|
|
@@ -22,4 +22,4 @@ export type { Unknown, UnknownRecord, UnknownArray, JsonValue, JsonObject, JsonA
|
|
|
22
22
|
export type { EvaluationData, EvaluationContext, EnhancedEvaluationResult, EvaluationRequest, EvaluationCriteria, } from "./evaluation.js";
|
|
23
23
|
export type { TaskType, TaskClassification, ClassificationScores, ClassificationStats, ClassificationValidation, } from "./taskClassificationTypes.js";
|
|
24
24
|
export type { DomainType, DomainConfig, DomainTemplate, DomainConfigOptions, DomainEvaluationCriteria, DomainValidationRule, } from "./domainTypes.js";
|
|
25
|
-
export type { ConversationMemoryConfig, SessionMemory,
|
|
25
|
+
export type { ConversationMemoryConfig, SessionMemory, ChatMessage, MessageContent, MultimodalChatMessage, ConversationMemoryEvents, ConversationMemoryError, SessionIdentifier, SessionMetadata, RedisConversationObject, RedisStorageConfig, } from "./conversation.js";
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
* Conversation Memory Utilities
|
|
3
3
|
* Handles configuration merging and conversation memory operations
|
|
4
4
|
*/
|
|
5
|
-
import type { ConversationMemoryConfig, ChatMessage } from "../types/conversation.js";
|
|
5
|
+
import type { ConversationMemoryConfig, ChatMessage, SessionMemory } from "../types/conversation.js";
|
|
6
6
|
import type { ConversationMemoryManager } from "../core/conversationMemoryManager.js";
|
|
7
7
|
import type { RedisConversationMemoryManager } from "../core/redisConversationMemoryManager.js";
|
|
8
8
|
import type { TextGenerationOptions, TextGenerationResult } from "../types/generateTypes.js";
|
|
@@ -20,3 +20,45 @@ export declare function getConversationMessages(conversationMemory: Conversation
|
|
|
20
20
|
* Saves user messages and AI responses for conversation memory
|
|
21
21
|
*/
|
|
22
22
|
export declare function storeConversationTurn(conversationMemory: ConversationMemoryManager | RedisConversationMemoryManager | null | undefined, originalOptions: TextGenerationOptions, result: TextGenerationResult, startTimeStamp?: Date | undefined): Promise<void>;
|
|
23
|
+
/**
|
|
24
|
+
* Build context messages from pointer onwards (token-based memory)
|
|
25
|
+
* Returns summary message (if exists) + all messages after the summarized pointer
|
|
26
|
+
* @param session - Session memory with pointer
|
|
27
|
+
* @returns Context messages to send to LLM
|
|
28
|
+
*/
|
|
29
|
+
export declare function buildContextFromPointer(session: SessionMemory): ChatMessage[];
|
|
30
|
+
/**
|
|
31
|
+
* Create summarization prompt from message history
|
|
32
|
+
* Used by both in-memory and Redis conversation managers
|
|
33
|
+
* @param history - Messages to summarize
|
|
34
|
+
* @param previousSummary - Optional previous summary to build upon
|
|
35
|
+
*/
|
|
36
|
+
export declare function createSummarizationPrompt(history: ChatMessage[], previousSummary?: string): string;
|
|
37
|
+
/**
|
|
38
|
+
* Calculate token threshold based on model's output token limit
|
|
39
|
+
* Uses existing provider token limits as proxy for context window
|
|
40
|
+
* @param provider - AI provider name
|
|
41
|
+
* @param model - Model name
|
|
42
|
+
* @returns Token threshold (80% of model's token limit)
|
|
43
|
+
*/
|
|
44
|
+
export declare function calculateTokenThreshold(provider: string, model: string): number;
|
|
45
|
+
/**
|
|
46
|
+
* Get effective token threshold for a session
|
|
47
|
+
* Priority: session override > env var > model-based (80%) > fallback
|
|
48
|
+
* @param provider - AI provider name
|
|
49
|
+
* @param model - Model name
|
|
50
|
+
* @param envOverride - Environment variable override
|
|
51
|
+
* @param sessionOverride - Per-session token threshold override
|
|
52
|
+
* @returns Effective token threshold
|
|
53
|
+
*/
|
|
54
|
+
export declare function getEffectiveTokenThreshold(provider: string, model: string, envOverride?: number, sessionOverride?: number): number;
|
|
55
|
+
/**
|
|
56
|
+
* Generate summary using configured provider and model
|
|
57
|
+
* Centralized summarization logic used by both ConversationMemoryManager and RedisConversationMemoryManager
|
|
58
|
+
* @param messages - Messages to summarize
|
|
59
|
+
* @param config - Conversation memory configuration containing provider/model settings
|
|
60
|
+
* @param previousSummary - Optional previous summary to build upon
|
|
61
|
+
* @param logPrefix - Prefix for log messages (e.g., "[ConversationMemory]" or "[RedisConversationMemoryManager]")
|
|
62
|
+
* @returns Summary text or null if generation fails
|
|
63
|
+
*/
|
|
64
|
+
export declare function generateSummary(messages: ChatMessage[], config: ConversationMemoryConfig, logPrefix?: string, previousSummary?: string): Promise<string | null>;
|
|
@@ -2,8 +2,10 @@
|
|
|
2
2
|
* Conversation Memory Utilities
|
|
3
3
|
* Handles configuration merging and conversation memory operations
|
|
4
4
|
*/
|
|
5
|
-
import { getConversationMemoryDefaults } from "../config/conversationMemory.js";
|
|
5
|
+
import { getConversationMemoryDefaults, MEMORY_THRESHOLD_PERCENTAGE, DEFAULT_FALLBACK_THRESHOLD, } from "../config/conversationMemory.js";
|
|
6
|
+
import { TokenUtils } from "../constants/tokens.js";
|
|
6
7
|
import { logger } from "./logger.js";
|
|
8
|
+
import { NeuroLink } from "../neurolink.js";
|
|
7
9
|
/**
|
|
8
10
|
* Apply conversation memory defaults to user configuration
|
|
9
11
|
* Merges user config with environment variables and default values
|
|
@@ -34,8 +36,8 @@ export async function getConversationMessages(conversationMemory, options) {
|
|
|
34
36
|
try {
|
|
35
37
|
// Extract userId from context
|
|
36
38
|
const userId = options.context?.userId;
|
|
37
|
-
|
|
38
|
-
const messages = await conversationMemory.buildContextMessages(sessionId, userId);
|
|
39
|
+
const enableSummarization = options.enableSummarization ?? undefined;
|
|
40
|
+
const messages = await conversationMemory.buildContextMessages(sessionId, userId, enableSummarization);
|
|
39
41
|
logger.debug("[conversationMemoryUtils] Conversation messages retrieved successfully", {
|
|
40
42
|
sessionId,
|
|
41
43
|
messageCount: messages.length,
|
|
@@ -100,9 +102,24 @@ export async function storeConversationTurn(conversationMemory, originalOptions,
|
|
|
100
102
|
return;
|
|
101
103
|
}
|
|
102
104
|
const userMessage = originalOptions.originalPrompt || originalOptions.prompt || "";
|
|
103
|
-
const aiResponse = result.content;
|
|
105
|
+
const aiResponse = result.content ?? "";
|
|
106
|
+
let providerDetails = undefined;
|
|
107
|
+
if (result.provider && result.model) {
|
|
108
|
+
providerDetails = {
|
|
109
|
+
provider: result.provider,
|
|
110
|
+
model: result.model,
|
|
111
|
+
};
|
|
112
|
+
}
|
|
104
113
|
try {
|
|
105
|
-
await conversationMemory.storeConversationTurn(
|
|
114
|
+
await conversationMemory.storeConversationTurn({
|
|
115
|
+
sessionId,
|
|
116
|
+
userId,
|
|
117
|
+
userMessage,
|
|
118
|
+
aiResponse,
|
|
119
|
+
startTimeStamp,
|
|
120
|
+
providerDetails,
|
|
121
|
+
enableSummarization: originalOptions.enableSummarization,
|
|
122
|
+
});
|
|
106
123
|
logger.debug("[conversationMemoryUtils] Conversation turn stored successfully", {
|
|
107
124
|
sessionId,
|
|
108
125
|
userId,
|
|
@@ -121,4 +138,163 @@ export async function storeConversationTurn(conversationMemory, originalOptions,
|
|
|
121
138
|
});
|
|
122
139
|
}
|
|
123
140
|
}
|
|
141
|
+
/**
|
|
142
|
+
* Build context messages from pointer onwards (token-based memory)
|
|
143
|
+
* Returns summary message (if exists) + all messages after the summarized pointer
|
|
144
|
+
* @param session - Session memory with pointer
|
|
145
|
+
* @returns Context messages to send to LLM
|
|
146
|
+
*/
|
|
147
|
+
export function buildContextFromPointer(session) {
|
|
148
|
+
if (!session.summarizedUpToMessageId || !session.summarizedMessage) {
|
|
149
|
+
return session.messages;
|
|
150
|
+
}
|
|
151
|
+
// find a better way to wirte this
|
|
152
|
+
const pointerIndex = session.messages.findIndex((msg) => msg.id === session.summarizedUpToMessageId);
|
|
153
|
+
if (pointerIndex === -1) {
|
|
154
|
+
logger.warn("Pointer message not found, returning all messages", {
|
|
155
|
+
sessionId: session.sessionId,
|
|
156
|
+
pointer: session.summarizedUpToMessageId,
|
|
157
|
+
totalMessages: session.messages.length,
|
|
158
|
+
});
|
|
159
|
+
return session.messages;
|
|
160
|
+
}
|
|
161
|
+
const messagesAfterPointer = session.messages.slice(pointerIndex + 1);
|
|
162
|
+
// Construct context: summary message + recent messages
|
|
163
|
+
const summaryMessage = {
|
|
164
|
+
id: `summary-${session.summarizedUpToMessageId}`,
|
|
165
|
+
role: "system",
|
|
166
|
+
content: `Previous conversation summary: ${session.summarizedMessage}`,
|
|
167
|
+
timestamp: new Date().toISOString(),
|
|
168
|
+
metadata: {
|
|
169
|
+
isSummary: true,
|
|
170
|
+
summarizesTo: session.summarizedUpToMessageId,
|
|
171
|
+
},
|
|
172
|
+
};
|
|
173
|
+
logger.debug("Building context with summary", {
|
|
174
|
+
sessionId: session.sessionId,
|
|
175
|
+
pointerIndex,
|
|
176
|
+
messagesAfterPointer: messagesAfterPointer.length,
|
|
177
|
+
totalMessages: session.messages.length,
|
|
178
|
+
summaryLength: session.summarizedMessage.length,
|
|
179
|
+
});
|
|
180
|
+
return [summaryMessage, ...messagesAfterPointer];
|
|
181
|
+
}
|
|
182
|
+
/**
|
|
183
|
+
* Create summarization prompt from message history
|
|
184
|
+
* Used by both in-memory and Redis conversation managers
|
|
185
|
+
* @param history - Messages to summarize
|
|
186
|
+
* @param previousSummary - Optional previous summary to build upon
|
|
187
|
+
*/
|
|
188
|
+
export function createSummarizationPrompt(history, previousSummary) {
|
|
189
|
+
const formattedHistory = history
|
|
190
|
+
.map((msg) => `${msg.role}: ${msg.content}`)
|
|
191
|
+
.join("\n\n");
|
|
192
|
+
const previousSummarySection = previousSummary
|
|
193
|
+
? `Previous Summary:
|
|
194
|
+
---
|
|
195
|
+
${previousSummary}
|
|
196
|
+
---
|
|
197
|
+
|
|
198
|
+
`
|
|
199
|
+
: "";
|
|
200
|
+
return `
|
|
201
|
+
You are a context summarization AI. Your task is to condense the following conversation history for another AI assistant.
|
|
202
|
+
${previousSummary ? "Build upon the previous summary and incorporate the new conversation turns below." : ""}
|
|
203
|
+
The summary must be a concise, third-person narrative that retains all critical information, including key entities, technical details, decisions made, and any specific dates or times mentioned.
|
|
204
|
+
Ensure the summary flows logically and is ready to be used as context for the next turn in the conversation.
|
|
205
|
+
|
|
206
|
+
${previousSummarySection}Conversation History to Summarize:
|
|
207
|
+
---
|
|
208
|
+
${formattedHistory}
|
|
209
|
+
---
|
|
210
|
+
`.trim();
|
|
211
|
+
}
|
|
212
|
+
/**
|
|
213
|
+
* Calculate token threshold based on model's output token limit
|
|
214
|
+
* Uses existing provider token limits as proxy for context window
|
|
215
|
+
* @param provider - AI provider name
|
|
216
|
+
* @param model - Model name
|
|
217
|
+
* @returns Token threshold (80% of model's token limit)
|
|
218
|
+
*/
|
|
219
|
+
export function calculateTokenThreshold(provider, model) {
|
|
220
|
+
try {
|
|
221
|
+
// Get model's token limit from existing TokenUtils
|
|
222
|
+
const modelTokenLimit = TokenUtils.getProviderTokenLimit(provider, model);
|
|
223
|
+
// Return 80% of token limit for conversation memory
|
|
224
|
+
// This is conservative since output limits are typically smaller than input limits
|
|
225
|
+
return Math.floor(modelTokenLimit * MEMORY_THRESHOLD_PERCENTAGE);
|
|
226
|
+
}
|
|
227
|
+
catch (error) {
|
|
228
|
+
logger.warn("Failed to calculate model threshold, using fallback", {
|
|
229
|
+
provider,
|
|
230
|
+
model,
|
|
231
|
+
error: error instanceof Error ? error.message : String(error),
|
|
232
|
+
});
|
|
233
|
+
return DEFAULT_FALLBACK_THRESHOLD;
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
/**
|
|
237
|
+
* Get effective token threshold for a session
|
|
238
|
+
* Priority: session override > env var > model-based (80%) > fallback
|
|
239
|
+
* @param provider - AI provider name
|
|
240
|
+
* @param model - Model name
|
|
241
|
+
* @param envOverride - Environment variable override
|
|
242
|
+
* @param sessionOverride - Per-session token threshold override
|
|
243
|
+
* @returns Effective token threshold
|
|
244
|
+
*/
|
|
245
|
+
export function getEffectiveTokenThreshold(provider, model, envOverride, sessionOverride) {
|
|
246
|
+
// Priority 1: Session-level override
|
|
247
|
+
if (sessionOverride && sessionOverride > 0) {
|
|
248
|
+
return sessionOverride;
|
|
249
|
+
}
|
|
250
|
+
// Priority 2: Environment variable override
|
|
251
|
+
if (envOverride && envOverride > 0) {
|
|
252
|
+
return envOverride;
|
|
253
|
+
}
|
|
254
|
+
// Priority 3: Model-based calculation (80% of context window)
|
|
255
|
+
try {
|
|
256
|
+
return calculateTokenThreshold(provider, model);
|
|
257
|
+
}
|
|
258
|
+
catch (error) {
|
|
259
|
+
logger.warn("Failed to calculate effective threshold, using fallback", {
|
|
260
|
+
provider,
|
|
261
|
+
model,
|
|
262
|
+
error: error instanceof Error ? error.message : String(error),
|
|
263
|
+
});
|
|
264
|
+
// Priority 4: Fallback for unknown models
|
|
265
|
+
return DEFAULT_FALLBACK_THRESHOLD;
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
/**
|
|
269
|
+
* Generate summary using configured provider and model
|
|
270
|
+
* Centralized summarization logic used by both ConversationMemoryManager and RedisConversationMemoryManager
|
|
271
|
+
* @param messages - Messages to summarize
|
|
272
|
+
* @param config - Conversation memory configuration containing provider/model settings
|
|
273
|
+
* @param previousSummary - Optional previous summary to build upon
|
|
274
|
+
* @param logPrefix - Prefix for log messages (e.g., "[ConversationMemory]" or "[RedisConversationMemoryManager]")
|
|
275
|
+
* @returns Summary text or null if generation fails
|
|
276
|
+
*/
|
|
277
|
+
export async function generateSummary(messages, config, logPrefix = "[ConversationMemory]", previousSummary) {
|
|
278
|
+
const summarizationPrompt = createSummarizationPrompt(messages, previousSummary);
|
|
279
|
+
const summarizer = new NeuroLink({
|
|
280
|
+
conversationMemory: { enabled: false },
|
|
281
|
+
});
|
|
282
|
+
try {
|
|
283
|
+
if (!config.summarizationProvider || !config.summarizationModel) {
|
|
284
|
+
logger.error(`${logPrefix} Missing summarization provider`);
|
|
285
|
+
return null;
|
|
286
|
+
}
|
|
287
|
+
const summaryResult = await summarizer.generate({
|
|
288
|
+
input: { text: summarizationPrompt },
|
|
289
|
+
provider: config.summarizationProvider,
|
|
290
|
+
model: config.summarizationModel,
|
|
291
|
+
disableTools: true,
|
|
292
|
+
});
|
|
293
|
+
return summaryResult.content || null;
|
|
294
|
+
}
|
|
295
|
+
catch (error) {
|
|
296
|
+
logger.error(`${logPrefix} Error generating summary`, { error });
|
|
297
|
+
return null;
|
|
298
|
+
}
|
|
299
|
+
}
|
|
124
300
|
//# sourceMappingURL=conversationMemory.js.map
|
|
@@ -58,8 +58,23 @@ export async function storeConversationTurn(conversationMemory, originalOptions,
|
|
|
58
58
|
if (typeof sessionId !== "string" || !sessionId) {
|
|
59
59
|
return;
|
|
60
60
|
}
|
|
61
|
+
let providerDetails = undefined;
|
|
62
|
+
if (result.provider && result.model) {
|
|
63
|
+
providerDetails = {
|
|
64
|
+
provider: result.provider,
|
|
65
|
+
model: result.model,
|
|
66
|
+
};
|
|
67
|
+
}
|
|
61
68
|
try {
|
|
62
|
-
await conversationMemory.storeConversationTurn(
|
|
69
|
+
await conversationMemory.storeConversationTurn({
|
|
70
|
+
sessionId,
|
|
71
|
+
userId,
|
|
72
|
+
userMessage: originalOptions.originalPrompt || originalOptions.prompt || "",
|
|
73
|
+
aiResponse: result.content,
|
|
74
|
+
startTimeStamp,
|
|
75
|
+
providerDetails,
|
|
76
|
+
enableSummarization: originalOptions.enableSummarization,
|
|
77
|
+
});
|
|
63
78
|
logger.debug("Conversation turn stored", {
|
|
64
79
|
sessionId,
|
|
65
80
|
userId,
|
package/dist/lib/utils/redis.js
CHANGED
|
@@ -91,14 +91,9 @@ export function serializeConversation(conversation) {
|
|
|
91
91
|
*/
|
|
92
92
|
export function deserializeConversation(data) {
|
|
93
93
|
if (!data) {
|
|
94
|
-
logger.debug("[redisUtils] No conversation data to deserialize, returning null");
|
|
95
94
|
return null;
|
|
96
95
|
}
|
|
97
96
|
try {
|
|
98
|
-
logger.debug("[redisUtils] Deserializing conversation", {
|
|
99
|
-
dataLength: data.length,
|
|
100
|
-
dataPreview: data.substring(0, 100) + (data.length > 100 ? "..." : ""),
|
|
101
|
-
});
|
|
102
97
|
// Parse as unknown first, then validate before casting
|
|
103
98
|
const parsedData = JSON.parse(data);
|
|
104
99
|
// Check if the parsed data is an object with required properties
|
package/dist/neurolink.js
CHANGED
|
@@ -2089,12 +2089,22 @@ Current user's request: ${currentInput}`;
|
|
|
2089
2089
|
if (self.conversationMemory && enhancedOptions.context?.sessionId) {
|
|
2090
2090
|
const sessionId = enhancedOptions.context?.sessionId;
|
|
2091
2091
|
const userId = enhancedOptions.context?.userId;
|
|
2092
|
+
let providerDetails = undefined;
|
|
2093
|
+
if (enhancedOptions.model) {
|
|
2094
|
+
providerDetails = {
|
|
2095
|
+
provider: providerName,
|
|
2096
|
+
model: enhancedOptions.model,
|
|
2097
|
+
};
|
|
2098
|
+
}
|
|
2092
2099
|
try {
|
|
2093
|
-
await self.conversationMemory.storeConversationTurn(
|
|
2094
|
-
logger.debug("Stream conversation turn stored", {
|
|
2100
|
+
await self.conversationMemory.storeConversationTurn({
|
|
2095
2101
|
sessionId,
|
|
2096
|
-
|
|
2097
|
-
|
|
2102
|
+
userId,
|
|
2103
|
+
userMessage: originalPrompt ?? "",
|
|
2104
|
+
aiResponse: accumulatedContent,
|
|
2105
|
+
startTimeStamp: new Date(startTime),
|
|
2106
|
+
providerDetails,
|
|
2107
|
+
enableSummarization: enhancedOptions.enableSummarization,
|
|
2098
2108
|
});
|
|
2099
2109
|
}
|
|
2100
2110
|
catch (error) {
|
|
@@ -2194,6 +2204,7 @@ Current user's request: ${currentInput}`;
|
|
|
2194
2204
|
const enhancedSystemPrompt = this.createToolAwareSystemPrompt(options.systemPrompt, availableTools);
|
|
2195
2205
|
// Get conversation messages for context
|
|
2196
2206
|
const conversationMessages = await getConversationMessages(this.conversationMemory, {
|
|
2207
|
+
...options,
|
|
2197
2208
|
prompt: options.input.text,
|
|
2198
2209
|
context: options.context,
|
|
2199
2210
|
});
|
|
@@ -2297,12 +2308,22 @@ Current user's request: ${currentInput}`;
|
|
|
2297
2308
|
const sessionId = enhancedOptions?.context?.sessionId;
|
|
2298
2309
|
const userId = enhancedOptions?.context
|
|
2299
2310
|
?.userId;
|
|
2311
|
+
let providerDetails = undefined;
|
|
2312
|
+
if (options.model) {
|
|
2313
|
+
providerDetails = {
|
|
2314
|
+
provider: providerName,
|
|
2315
|
+
model: options.model,
|
|
2316
|
+
};
|
|
2317
|
+
}
|
|
2300
2318
|
try {
|
|
2301
|
-
await self.conversationMemory.storeConversationTurn(
|
|
2302
|
-
logger.debug("Fallback stream conversation turn stored", {
|
|
2319
|
+
await self.conversationMemory.storeConversationTurn({
|
|
2303
2320
|
sessionId: sessionId || options.context?.sessionId,
|
|
2304
|
-
|
|
2305
|
-
|
|
2321
|
+
userId: userId || options.context?.userId,
|
|
2322
|
+
userMessage: originalPrompt ?? "",
|
|
2323
|
+
aiResponse: fallbackAccumulatedContent,
|
|
2324
|
+
startTimeStamp: new Date(startTime),
|
|
2325
|
+
providerDetails,
|
|
2326
|
+
enableSummarization: enhancedOptions?.enableSummarization,
|
|
2306
2327
|
});
|
|
2307
2328
|
}
|
|
2308
2329
|
catch (error) {
|
|
@@ -4231,23 +4252,9 @@ Current user's request: ${currentInput}`;
|
|
|
4231
4252
|
// Import the integration module
|
|
4232
4253
|
const { initializeConversationMemory } = await import("./core/conversationMemoryInitializer.js");
|
|
4233
4254
|
// Use the integration module to create the appropriate memory manager
|
|
4234
|
-
const memoryManagerCreateStartTime = process.hrtime.bigint();
|
|
4235
4255
|
const memoryManager = await initializeConversationMemory(this.conversationMemoryConfig);
|
|
4236
4256
|
// Assign to conversationMemory with proper type to handle both memory manager types
|
|
4237
4257
|
this.conversationMemory = memoryManager;
|
|
4238
|
-
const memoryManagerCreateEndTime = process.hrtime.bigint();
|
|
4239
|
-
const memoryManagerCreateDurationNs = memoryManagerCreateEndTime - memoryManagerCreateStartTime;
|
|
4240
|
-
logger.info(`[NeuroLink] ✅ LOG_POINT_G004_MEMORY_LAZY_INIT_SUCCESS`, {
|
|
4241
|
-
logPoint: "G004_MEMORY_LAZY_INIT_SUCCESS",
|
|
4242
|
-
generateInternalId,
|
|
4243
|
-
timestamp: new Date().toISOString(),
|
|
4244
|
-
elapsedMs: Date.now() - generateInternalStartTime,
|
|
4245
|
-
elapsedNs: (process.hrtime.bigint() - generateInternalHrTimeStart).toString(),
|
|
4246
|
-
memoryManagerCreateDurationNs: memoryManagerCreateDurationNs.toString(),
|
|
4247
|
-
memoryManagerCreateDurationMs: Number(memoryManagerCreateDurationNs) / 1000000,
|
|
4248
|
-
storageType: process.env.STORAGE_TYPE || "memory",
|
|
4249
|
-
message: "Lazy conversation memory initialization completed successfully",
|
|
4250
|
-
});
|
|
4251
4258
|
// Reset the lazy init flag since we've now initialized
|
|
4252
4259
|
this.conversationMemoryNeedsInit = false;
|
|
4253
4260
|
}
|
|
@@ -11,14 +11,10 @@ export type ConversationMemoryConfig = {
|
|
|
11
11
|
enabled: boolean;
|
|
12
12
|
/** Maximum number of sessions to keep in memory (default: 50) */
|
|
13
13
|
maxSessions?: number;
|
|
14
|
-
/** Maximum number of conversation turns to keep per session (default: 20) */
|
|
15
|
-
maxTurnsPerSession?: number;
|
|
16
14
|
/** Enable automatic summarization */
|
|
17
15
|
enableSummarization?: boolean;
|
|
18
|
-
/**
|
|
19
|
-
|
|
20
|
-
/** Target turn count for the summary */
|
|
21
|
-
summarizationTargetTurns?: number;
|
|
16
|
+
/** Token threshold to trigger summarization (optional - defaults to 80% of model context) */
|
|
17
|
+
tokenThreshold?: number;
|
|
22
18
|
/** Provider to use for summarization */
|
|
23
19
|
summarizationProvider?: string;
|
|
24
20
|
/** Model to use for summarization */
|
|
@@ -29,6 +25,12 @@ export type ConversationMemoryConfig = {
|
|
|
29
25
|
mem0Config?: Mem0Config;
|
|
30
26
|
/** Redis configuration (optional) - overrides environment variables */
|
|
31
27
|
redisConfig?: RedisStorageConfig;
|
|
28
|
+
/** @deprecated Use tokenThreshold instead - Maximum number of conversation turns to keep per session (default: 20) */
|
|
29
|
+
maxTurnsPerSession?: number;
|
|
30
|
+
/** @deprecated Use tokenThreshold instead - Turn count to trigger summarization */
|
|
31
|
+
summarizationThresholdTurns?: number;
|
|
32
|
+
/** @deprecated Use tokenThreshold instead - Target turn count for the summary */
|
|
33
|
+
summarizationTargetTurns?: number;
|
|
32
34
|
};
|
|
33
35
|
/**
|
|
34
36
|
* Complete memory for a conversation session
|
|
@@ -47,6 +49,16 @@ export type SessionMemory = {
|
|
|
47
49
|
createdAt: number;
|
|
48
50
|
/** When this session was last active */
|
|
49
51
|
lastActivity: number;
|
|
52
|
+
/** Pointer to last summarized message ID (NEW - for token-based memory) */
|
|
53
|
+
summarizedUpToMessageId?: string;
|
|
54
|
+
/** Stored summary message that condenses conversation history up to summarizedUpToMessageId */
|
|
55
|
+
summarizedMessage?: string;
|
|
56
|
+
/** Per-session token threshold override (NEW - for token-based memory) */
|
|
57
|
+
tokenThreshold?: number;
|
|
58
|
+
/** Cached token count for performance (NEW - for token-based memory) */
|
|
59
|
+
lastTokenCount?: number;
|
|
60
|
+
/** When token count was last calculated (NEW - for token-based memory) */
|
|
61
|
+
lastCountedAt?: number;
|
|
50
62
|
/** Optional session metadata */
|
|
51
63
|
metadata?: {
|
|
52
64
|
/** User role or permissions */
|
|
@@ -70,13 +82,13 @@ export type ConversationMemoryStats = {
|
|
|
70
82
|
* Chat message format for conversation history
|
|
71
83
|
*/
|
|
72
84
|
export type ChatMessage = {
|
|
85
|
+
/** Unique message identifier (required for token-based memory) */
|
|
86
|
+
id: string;
|
|
73
87
|
/** Role/type of the message */
|
|
74
88
|
role: "user" | "assistant" | "system" | "tool_call" | "tool_result";
|
|
75
89
|
/** Content of the message */
|
|
76
90
|
content: string;
|
|
77
|
-
/**
|
|
78
|
-
id?: string;
|
|
79
|
-
/** Timestamp (optional) - for new format */
|
|
91
|
+
/** Timestamp (ISO string) */
|
|
80
92
|
timestamp?: string;
|
|
81
93
|
/** Tool name (optional) - for tool_call/tool_result messages */
|
|
82
94
|
tool?: string;
|
|
@@ -90,6 +102,17 @@ export type ChatMessage = {
|
|
|
90
102
|
type?: string;
|
|
91
103
|
error?: string;
|
|
92
104
|
};
|
|
105
|
+
/** Message metadata (NEW - for token-based memory) */
|
|
106
|
+
metadata?: {
|
|
107
|
+
/** Is this a summary message? */
|
|
108
|
+
isSummary?: boolean;
|
|
109
|
+
/** First message ID that this summary covers */
|
|
110
|
+
summarizesFrom?: string;
|
|
111
|
+
/** Last message ID that this summary covers */
|
|
112
|
+
summarizesTo?: string;
|
|
113
|
+
/** Was this message truncated due to token limits? */
|
|
114
|
+
truncated?: boolean;
|
|
115
|
+
};
|
|
93
116
|
};
|
|
94
117
|
/**
|
|
95
118
|
* Multimodal message types - Re-exported from multimodal.ts
|
|
@@ -152,6 +175,18 @@ export type SessionIdentifier = {
|
|
|
152
175
|
sessionId: string;
|
|
153
176
|
userId?: string;
|
|
154
177
|
};
|
|
178
|
+
/**
|
|
179
|
+
* Options for storing a conversation turn
|
|
180
|
+
*/
|
|
181
|
+
export type StoreConversationTurnOptions = {
|
|
182
|
+
sessionId: string;
|
|
183
|
+
userId?: string;
|
|
184
|
+
userMessage: string;
|
|
185
|
+
aiResponse: string;
|
|
186
|
+
startTimeStamp?: Date;
|
|
187
|
+
providerDetails?: ProviderDetails;
|
|
188
|
+
enableSummarization?: boolean;
|
|
189
|
+
};
|
|
155
190
|
/**
|
|
156
191
|
* Lightweight session metadata for efficient session listing
|
|
157
192
|
* Contains only essential information without heavy message arrays
|
|
@@ -179,6 +214,16 @@ export type ConversationBase = {
|
|
|
179
214
|
createdAt: string;
|
|
180
215
|
/** When this conversation was last updated */
|
|
181
216
|
updatedAt: string;
|
|
217
|
+
/** Pointer to last summarized message (token-based memory) */
|
|
218
|
+
summarizedUpToMessageId?: string;
|
|
219
|
+
/** Stored summary message that condenses conversation history up to summarizedUpToMessageId */
|
|
220
|
+
summarizedMessage?: string;
|
|
221
|
+
/** Per-session token threshold override */
|
|
222
|
+
tokenThreshold?: number;
|
|
223
|
+
/** Cached token count for efficiency */
|
|
224
|
+
lastTokenCount?: number;
|
|
225
|
+
/** Timestamp of last token count */
|
|
226
|
+
lastCountedAt?: number;
|
|
182
227
|
};
|
|
183
228
|
/**
|
|
184
229
|
* Redis conversation storage object format
|
|
@@ -250,3 +295,7 @@ export type RedisStorageConfig = {
|
|
|
250
295
|
[key: string]: string | number | boolean | undefined;
|
|
251
296
|
};
|
|
252
297
|
};
|
|
298
|
+
export type ProviderDetails = {
|
|
299
|
+
provider: string;
|
|
300
|
+
model: string;
|
|
301
|
+
};
|