@juspay/neurolink 8.19.1 → 8.20.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/dist/cli/loop/optionsSchema.js +4 -0
- package/dist/config/conversationMemory.d.ts +15 -0
- package/dist/config/conversationMemory.js +22 -3
- package/dist/core/conversationMemoryFactory.js +0 -3
- package/dist/core/conversationMemoryInitializer.js +1 -9
- package/dist/core/conversationMemoryManager.d.ts +31 -8
- package/dist/core/conversationMemoryManager.js +174 -80
- package/dist/core/redisConversationMemoryManager.d.ts +28 -13
- package/dist/core/redisConversationMemoryManager.js +211 -121
- package/dist/lib/config/conversationMemory.d.ts +15 -0
- package/dist/lib/config/conversationMemory.js +22 -3
- package/dist/lib/core/conversationMemoryFactory.js +0 -3
- package/dist/lib/core/conversationMemoryInitializer.js +1 -9
- package/dist/lib/core/conversationMemoryManager.d.ts +31 -8
- package/dist/lib/core/conversationMemoryManager.js +174 -80
- package/dist/lib/core/redisConversationMemoryManager.d.ts +28 -13
- package/dist/lib/core/redisConversationMemoryManager.js +211 -121
- package/dist/lib/neurolink.js +29 -22
- package/dist/lib/types/conversation.d.ts +58 -9
- package/dist/lib/types/generateTypes.d.ts +1 -0
- package/dist/lib/types/sdkTypes.d.ts +1 -1
- package/dist/lib/types/streamTypes.d.ts +1 -0
- package/dist/lib/utils/conversationMemory.d.ts +43 -1
- package/dist/lib/utils/conversationMemory.js +181 -5
- package/dist/lib/utils/conversationMemoryUtils.js +16 -1
- package/dist/lib/utils/redis.js +0 -5
- package/dist/neurolink.js +29 -22
- package/dist/types/conversation.d.ts +58 -9
- package/dist/types/generateTypes.d.ts +1 -0
- package/dist/types/sdkTypes.d.ts +1 -1
- package/dist/types/streamTypes.d.ts +1 -0
- package/dist/utils/conversationMemory.d.ts +43 -1
- package/dist/utils/conversationMemory.js +181 -5
- package/dist/utils/conversationMemoryUtils.js +16 -1
- package/dist/utils/redis.js +0 -5
- package/package.json +1 -1
package/dist/types/sdkTypes.d.ts
CHANGED
|
@@ -22,4 +22,4 @@ export type { Unknown, UnknownRecord, UnknownArray, JsonValue, JsonObject, JsonA
|
|
|
22
22
|
export type { EvaluationData, EvaluationContext, EnhancedEvaluationResult, EvaluationRequest, EvaluationCriteria, } from "./evaluation.js";
|
|
23
23
|
export type { TaskType, TaskClassification, ClassificationScores, ClassificationStats, ClassificationValidation, } from "./taskClassificationTypes.js";
|
|
24
24
|
export type { DomainType, DomainConfig, DomainTemplate, DomainConfigOptions, DomainEvaluationCriteria, DomainValidationRule, } from "./domainTypes.js";
|
|
25
|
-
export type { ConversationMemoryConfig, SessionMemory,
|
|
25
|
+
export type { ConversationMemoryConfig, SessionMemory, ChatMessage, MessageContent, MultimodalChatMessage, ConversationMemoryEvents, ConversationMemoryError, SessionIdentifier, SessionMetadata, RedisConversationObject, RedisStorageConfig, } from "./conversation.js";
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
* Conversation Memory Utilities
|
|
3
3
|
* Handles configuration merging and conversation memory operations
|
|
4
4
|
*/
|
|
5
|
-
import type { ConversationMemoryConfig, ChatMessage } from "../types/conversation.js";
|
|
5
|
+
import type { ConversationMemoryConfig, ChatMessage, SessionMemory } from "../types/conversation.js";
|
|
6
6
|
import type { ConversationMemoryManager } from "../core/conversationMemoryManager.js";
|
|
7
7
|
import type { RedisConversationMemoryManager } from "../core/redisConversationMemoryManager.js";
|
|
8
8
|
import type { TextGenerationOptions, TextGenerationResult } from "../types/generateTypes.js";
|
|
@@ -20,3 +20,45 @@ export declare function getConversationMessages(conversationMemory: Conversation
|
|
|
20
20
|
* Saves user messages and AI responses for conversation memory
|
|
21
21
|
*/
|
|
22
22
|
export declare function storeConversationTurn(conversationMemory: ConversationMemoryManager | RedisConversationMemoryManager | null | undefined, originalOptions: TextGenerationOptions, result: TextGenerationResult, startTimeStamp?: Date | undefined): Promise<void>;
|
|
23
|
+
/**
|
|
24
|
+
* Build context messages from pointer onwards (token-based memory)
|
|
25
|
+
* Returns summary message (if exists) + all messages after the summarized pointer
|
|
26
|
+
* @param session - Session memory with pointer
|
|
27
|
+
* @returns Context messages to send to LLM
|
|
28
|
+
*/
|
|
29
|
+
export declare function buildContextFromPointer(session: SessionMemory): ChatMessage[];
|
|
30
|
+
/**
|
|
31
|
+
* Create summarization prompt from message history
|
|
32
|
+
* Used by both in-memory and Redis conversation managers
|
|
33
|
+
* @param history - Messages to summarize
|
|
34
|
+
* @param previousSummary - Optional previous summary to build upon
|
|
35
|
+
*/
|
|
36
|
+
export declare function createSummarizationPrompt(history: ChatMessage[], previousSummary?: string): string;
|
|
37
|
+
/**
|
|
38
|
+
* Calculate token threshold based on model's output token limit
|
|
39
|
+
* Uses existing provider token limits as proxy for context window
|
|
40
|
+
* @param provider - AI provider name
|
|
41
|
+
* @param model - Model name
|
|
42
|
+
* @returns Token threshold (80% of model's token limit)
|
|
43
|
+
*/
|
|
44
|
+
export declare function calculateTokenThreshold(provider: string, model: string): number;
|
|
45
|
+
/**
|
|
46
|
+
* Get effective token threshold for a session
|
|
47
|
+
* Priority: session override > env var > model-based (80%) > fallback
|
|
48
|
+
* @param provider - AI provider name
|
|
49
|
+
* @param model - Model name
|
|
50
|
+
* @param envOverride - Environment variable override
|
|
51
|
+
* @param sessionOverride - Per-session token threshold override
|
|
52
|
+
* @returns Effective token threshold
|
|
53
|
+
*/
|
|
54
|
+
export declare function getEffectiveTokenThreshold(provider: string, model: string, envOverride?: number, sessionOverride?: number): number;
|
|
55
|
+
/**
|
|
56
|
+
* Generate summary using configured provider and model
|
|
57
|
+
* Centralized summarization logic used by both ConversationMemoryManager and RedisConversationMemoryManager
|
|
58
|
+
* @param messages - Messages to summarize
|
|
59
|
+
* @param config - Conversation memory configuration containing provider/model settings
|
|
60
|
+
* @param previousSummary - Optional previous summary to build upon
|
|
61
|
+
* @param logPrefix - Prefix for log messages (e.g., "[ConversationMemory]" or "[RedisConversationMemoryManager]")
|
|
62
|
+
* @returns Summary text or null if generation fails
|
|
63
|
+
*/
|
|
64
|
+
export declare function generateSummary(messages: ChatMessage[], config: ConversationMemoryConfig, logPrefix?: string, previousSummary?: string): Promise<string | null>;
|
|
@@ -2,8 +2,10 @@
|
|
|
2
2
|
* Conversation Memory Utilities
|
|
3
3
|
* Handles configuration merging and conversation memory operations
|
|
4
4
|
*/
|
|
5
|
-
import { getConversationMemoryDefaults } from "../config/conversationMemory.js";
|
|
5
|
+
import { getConversationMemoryDefaults, MEMORY_THRESHOLD_PERCENTAGE, DEFAULT_FALLBACK_THRESHOLD, } from "../config/conversationMemory.js";
|
|
6
|
+
import { TokenUtils } from "../constants/tokens.js";
|
|
6
7
|
import { logger } from "./logger.js";
|
|
8
|
+
import { NeuroLink } from "../neurolink.js";
|
|
7
9
|
/**
|
|
8
10
|
* Apply conversation memory defaults to user configuration
|
|
9
11
|
* Merges user config with environment variables and default values
|
|
@@ -34,8 +36,8 @@ export async function getConversationMessages(conversationMemory, options) {
|
|
|
34
36
|
try {
|
|
35
37
|
// Extract userId from context
|
|
36
38
|
const userId = options.context?.userId;
|
|
37
|
-
|
|
38
|
-
const messages = await conversationMemory.buildContextMessages(sessionId, userId);
|
|
39
|
+
const enableSummarization = options.enableSummarization ?? undefined;
|
|
40
|
+
const messages = await conversationMemory.buildContextMessages(sessionId, userId, enableSummarization);
|
|
39
41
|
logger.debug("[conversationMemoryUtils] Conversation messages retrieved successfully", {
|
|
40
42
|
sessionId,
|
|
41
43
|
messageCount: messages.length,
|
|
@@ -100,9 +102,24 @@ export async function storeConversationTurn(conversationMemory, originalOptions,
|
|
|
100
102
|
return;
|
|
101
103
|
}
|
|
102
104
|
const userMessage = originalOptions.originalPrompt || originalOptions.prompt || "";
|
|
103
|
-
const aiResponse = result.content;
|
|
105
|
+
const aiResponse = result.content ?? "";
|
|
106
|
+
let providerDetails = undefined;
|
|
107
|
+
if (result.provider && result.model) {
|
|
108
|
+
providerDetails = {
|
|
109
|
+
provider: result.provider,
|
|
110
|
+
model: result.model,
|
|
111
|
+
};
|
|
112
|
+
}
|
|
104
113
|
try {
|
|
105
|
-
await conversationMemory.storeConversationTurn(
|
|
114
|
+
await conversationMemory.storeConversationTurn({
|
|
115
|
+
sessionId,
|
|
116
|
+
userId,
|
|
117
|
+
userMessage,
|
|
118
|
+
aiResponse,
|
|
119
|
+
startTimeStamp,
|
|
120
|
+
providerDetails,
|
|
121
|
+
enableSummarization: originalOptions.enableSummarization,
|
|
122
|
+
});
|
|
106
123
|
logger.debug("[conversationMemoryUtils] Conversation turn stored successfully", {
|
|
107
124
|
sessionId,
|
|
108
125
|
userId,
|
|
@@ -121,3 +138,162 @@ export async function storeConversationTurn(conversationMemory, originalOptions,
|
|
|
121
138
|
});
|
|
122
139
|
}
|
|
123
140
|
}
|
|
141
|
+
/**
|
|
142
|
+
* Build context messages from pointer onwards (token-based memory)
|
|
143
|
+
* Returns summary message (if exists) + all messages after the summarized pointer
|
|
144
|
+
* @param session - Session memory with pointer
|
|
145
|
+
* @returns Context messages to send to LLM
|
|
146
|
+
*/
|
|
147
|
+
export function buildContextFromPointer(session) {
|
|
148
|
+
if (!session.summarizedUpToMessageId || !session.summarizedMessage) {
|
|
149
|
+
return session.messages;
|
|
150
|
+
}
|
|
151
|
+
// find a better way to wirte this
|
|
152
|
+
const pointerIndex = session.messages.findIndex((msg) => msg.id === session.summarizedUpToMessageId);
|
|
153
|
+
if (pointerIndex === -1) {
|
|
154
|
+
logger.warn("Pointer message not found, returning all messages", {
|
|
155
|
+
sessionId: session.sessionId,
|
|
156
|
+
pointer: session.summarizedUpToMessageId,
|
|
157
|
+
totalMessages: session.messages.length,
|
|
158
|
+
});
|
|
159
|
+
return session.messages;
|
|
160
|
+
}
|
|
161
|
+
const messagesAfterPointer = session.messages.slice(pointerIndex + 1);
|
|
162
|
+
// Construct context: summary message + recent messages
|
|
163
|
+
const summaryMessage = {
|
|
164
|
+
id: `summary-${session.summarizedUpToMessageId}`,
|
|
165
|
+
role: "system",
|
|
166
|
+
content: `Previous conversation summary: ${session.summarizedMessage}`,
|
|
167
|
+
timestamp: new Date().toISOString(),
|
|
168
|
+
metadata: {
|
|
169
|
+
isSummary: true,
|
|
170
|
+
summarizesTo: session.summarizedUpToMessageId,
|
|
171
|
+
},
|
|
172
|
+
};
|
|
173
|
+
logger.debug("Building context with summary", {
|
|
174
|
+
sessionId: session.sessionId,
|
|
175
|
+
pointerIndex,
|
|
176
|
+
messagesAfterPointer: messagesAfterPointer.length,
|
|
177
|
+
totalMessages: session.messages.length,
|
|
178
|
+
summaryLength: session.summarizedMessage.length,
|
|
179
|
+
});
|
|
180
|
+
return [summaryMessage, ...messagesAfterPointer];
|
|
181
|
+
}
|
|
182
|
+
/**
|
|
183
|
+
* Create summarization prompt from message history
|
|
184
|
+
* Used by both in-memory and Redis conversation managers
|
|
185
|
+
* @param history - Messages to summarize
|
|
186
|
+
* @param previousSummary - Optional previous summary to build upon
|
|
187
|
+
*/
|
|
188
|
+
export function createSummarizationPrompt(history, previousSummary) {
|
|
189
|
+
const formattedHistory = history
|
|
190
|
+
.map((msg) => `${msg.role}: ${msg.content}`)
|
|
191
|
+
.join("\n\n");
|
|
192
|
+
const previousSummarySection = previousSummary
|
|
193
|
+
? `Previous Summary:
|
|
194
|
+
---
|
|
195
|
+
${previousSummary}
|
|
196
|
+
---
|
|
197
|
+
|
|
198
|
+
`
|
|
199
|
+
: "";
|
|
200
|
+
return `
|
|
201
|
+
You are a context summarization AI. Your task is to condense the following conversation history for another AI assistant.
|
|
202
|
+
${previousSummary ? "Build upon the previous summary and incorporate the new conversation turns below." : ""}
|
|
203
|
+
The summary must be a concise, third-person narrative that retains all critical information, including key entities, technical details, decisions made, and any specific dates or times mentioned.
|
|
204
|
+
Ensure the summary flows logically and is ready to be used as context for the next turn in the conversation.
|
|
205
|
+
|
|
206
|
+
${previousSummarySection}Conversation History to Summarize:
|
|
207
|
+
---
|
|
208
|
+
${formattedHistory}
|
|
209
|
+
---
|
|
210
|
+
`.trim();
|
|
211
|
+
}
|
|
212
|
+
/**
|
|
213
|
+
* Calculate token threshold based on model's output token limit
|
|
214
|
+
* Uses existing provider token limits as proxy for context window
|
|
215
|
+
* @param provider - AI provider name
|
|
216
|
+
* @param model - Model name
|
|
217
|
+
* @returns Token threshold (80% of model's token limit)
|
|
218
|
+
*/
|
|
219
|
+
export function calculateTokenThreshold(provider, model) {
|
|
220
|
+
try {
|
|
221
|
+
// Get model's token limit from existing TokenUtils
|
|
222
|
+
const modelTokenLimit = TokenUtils.getProviderTokenLimit(provider, model);
|
|
223
|
+
// Return 80% of token limit for conversation memory
|
|
224
|
+
// This is conservative since output limits are typically smaller than input limits
|
|
225
|
+
return Math.floor(modelTokenLimit * MEMORY_THRESHOLD_PERCENTAGE);
|
|
226
|
+
}
|
|
227
|
+
catch (error) {
|
|
228
|
+
logger.warn("Failed to calculate model threshold, using fallback", {
|
|
229
|
+
provider,
|
|
230
|
+
model,
|
|
231
|
+
error: error instanceof Error ? error.message : String(error),
|
|
232
|
+
});
|
|
233
|
+
return DEFAULT_FALLBACK_THRESHOLD;
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
/**
|
|
237
|
+
* Get effective token threshold for a session
|
|
238
|
+
* Priority: session override > env var > model-based (80%) > fallback
|
|
239
|
+
* @param provider - AI provider name
|
|
240
|
+
* @param model - Model name
|
|
241
|
+
* @param envOverride - Environment variable override
|
|
242
|
+
* @param sessionOverride - Per-session token threshold override
|
|
243
|
+
* @returns Effective token threshold
|
|
244
|
+
*/
|
|
245
|
+
export function getEffectiveTokenThreshold(provider, model, envOverride, sessionOverride) {
|
|
246
|
+
// Priority 1: Session-level override
|
|
247
|
+
if (sessionOverride && sessionOverride > 0) {
|
|
248
|
+
return sessionOverride;
|
|
249
|
+
}
|
|
250
|
+
// Priority 2: Environment variable override
|
|
251
|
+
if (envOverride && envOverride > 0) {
|
|
252
|
+
return envOverride;
|
|
253
|
+
}
|
|
254
|
+
// Priority 3: Model-based calculation (80% of context window)
|
|
255
|
+
try {
|
|
256
|
+
return calculateTokenThreshold(provider, model);
|
|
257
|
+
}
|
|
258
|
+
catch (error) {
|
|
259
|
+
logger.warn("Failed to calculate effective threshold, using fallback", {
|
|
260
|
+
provider,
|
|
261
|
+
model,
|
|
262
|
+
error: error instanceof Error ? error.message : String(error),
|
|
263
|
+
});
|
|
264
|
+
// Priority 4: Fallback for unknown models
|
|
265
|
+
return DEFAULT_FALLBACK_THRESHOLD;
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
/**
|
|
269
|
+
* Generate summary using configured provider and model
|
|
270
|
+
* Centralized summarization logic used by both ConversationMemoryManager and RedisConversationMemoryManager
|
|
271
|
+
* @param messages - Messages to summarize
|
|
272
|
+
* @param config - Conversation memory configuration containing provider/model settings
|
|
273
|
+
* @param previousSummary - Optional previous summary to build upon
|
|
274
|
+
* @param logPrefix - Prefix for log messages (e.g., "[ConversationMemory]" or "[RedisConversationMemoryManager]")
|
|
275
|
+
* @returns Summary text or null if generation fails
|
|
276
|
+
*/
|
|
277
|
+
export async function generateSummary(messages, config, logPrefix = "[ConversationMemory]", previousSummary) {
|
|
278
|
+
const summarizationPrompt = createSummarizationPrompt(messages, previousSummary);
|
|
279
|
+
const summarizer = new NeuroLink({
|
|
280
|
+
conversationMemory: { enabled: false },
|
|
281
|
+
});
|
|
282
|
+
try {
|
|
283
|
+
if (!config.summarizationProvider || !config.summarizationModel) {
|
|
284
|
+
logger.error(`${logPrefix} Missing summarization provider`);
|
|
285
|
+
return null;
|
|
286
|
+
}
|
|
287
|
+
const summaryResult = await summarizer.generate({
|
|
288
|
+
input: { text: summarizationPrompt },
|
|
289
|
+
provider: config.summarizationProvider,
|
|
290
|
+
model: config.summarizationModel,
|
|
291
|
+
disableTools: true,
|
|
292
|
+
});
|
|
293
|
+
return summaryResult.content || null;
|
|
294
|
+
}
|
|
295
|
+
catch (error) {
|
|
296
|
+
logger.error(`${logPrefix} Error generating summary`, { error });
|
|
297
|
+
return null;
|
|
298
|
+
}
|
|
299
|
+
}
|
|
@@ -58,8 +58,23 @@ export async function storeConversationTurn(conversationMemory, originalOptions,
|
|
|
58
58
|
if (typeof sessionId !== "string" || !sessionId) {
|
|
59
59
|
return;
|
|
60
60
|
}
|
|
61
|
+
let providerDetails = undefined;
|
|
62
|
+
if (result.provider && result.model) {
|
|
63
|
+
providerDetails = {
|
|
64
|
+
provider: result.provider,
|
|
65
|
+
model: result.model,
|
|
66
|
+
};
|
|
67
|
+
}
|
|
61
68
|
try {
|
|
62
|
-
await conversationMemory.storeConversationTurn(
|
|
69
|
+
await conversationMemory.storeConversationTurn({
|
|
70
|
+
sessionId,
|
|
71
|
+
userId,
|
|
72
|
+
userMessage: originalOptions.originalPrompt || originalOptions.prompt || "",
|
|
73
|
+
aiResponse: result.content,
|
|
74
|
+
startTimeStamp,
|
|
75
|
+
providerDetails,
|
|
76
|
+
enableSummarization: originalOptions.enableSummarization,
|
|
77
|
+
});
|
|
63
78
|
logger.debug("Conversation turn stored", {
|
|
64
79
|
sessionId,
|
|
65
80
|
userId,
|
package/dist/utils/redis.js
CHANGED
|
@@ -91,14 +91,9 @@ export function serializeConversation(conversation) {
|
|
|
91
91
|
*/
|
|
92
92
|
export function deserializeConversation(data) {
|
|
93
93
|
if (!data) {
|
|
94
|
-
logger.debug("[redisUtils] No conversation data to deserialize, returning null");
|
|
95
94
|
return null;
|
|
96
95
|
}
|
|
97
96
|
try {
|
|
98
|
-
logger.debug("[redisUtils] Deserializing conversation", {
|
|
99
|
-
dataLength: data.length,
|
|
100
|
-
dataPreview: data.substring(0, 100) + (data.length > 100 ? "..." : ""),
|
|
101
|
-
});
|
|
102
97
|
// Parse as unknown first, then validate before casting
|
|
103
98
|
const parsedData = JSON.parse(data);
|
|
104
99
|
// Check if the parsed data is an object with required properties
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@juspay/neurolink",
|
|
3
|
-
"version": "8.
|
|
3
|
+
"version": "8.20.0",
|
|
4
4
|
"description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 9 major providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
|
|
5
5
|
"author": {
|
|
6
6
|
"name": "Juspay Technologies",
|