@juspay/neurolink 7.12.0 → 7.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,3 +1,9 @@
1
+ ## [7.13.0](https://github.com/juspay/neurolink/compare/v7.12.0...v7.13.0) (2025-08-14)
2
+
3
+ ### Features
4
+
5
+ - **(SDK):** Add context summarizer for conversation BZ-43204 ([38231c4](https://github.com/juspay/neurolink/commit/38231c475b7546c16db741010173794251a7dbaa))
6
+
1
7
  ## [7.12.0](https://github.com/juspay/neurolink/compare/v7.11.1...v7.12.0) (2025-08-14)
2
8
 
3
9
  ### Features
package/README.md CHANGED
@@ -331,6 +331,7 @@ console.log(productData.name, productData.price, productData.features);
331
331
  - 🔍 **Smart Model Resolution** - Fuzzy matching, aliases, and capability-based search across all providers
332
332
  - 🏠 **Local AI Support** - Run completely offline with Ollama or through LiteLLM proxy
333
333
  - 🌍 **Universal Model Access** - Direct providers + 100,000+ models via Hugging Face + 100+ models via LiteLLM
334
+ - 🧠 **Automatic Context Summarization** - Stateful, long-running conversations with automatic history summarization.
334
335
  - 📊 **Analytics & Evaluation** - Built-in usage tracking and AI-powered quality assessment
335
336
 
336
337
  ## 🛠️ MCP Integration Status ✅ **BUILT-IN TOOLS WORKING**
@@ -0,0 +1,28 @@
1
+ import type { TextGenerationOptions, TextGenerationResult } from "../core/types.js";
2
+ import type { ContextManagerConfig } from "./types.js";
3
+ type InternalGenerator = (options: TextGenerationOptions) => Promise<TextGenerationResult>;
4
+ /**
5
+ * Manages conversation context, automatically summarizing it when it
6
+ * exceeds a specified word count limit.
7
+ */
8
+ export declare class ContextManager {
9
+ private static readonly SUMMARIZATION_FAILED_WARNING;
10
+ private static readonly SUMMARIZATION_EMPTY_WARNING;
11
+ private history;
12
+ private wordCount;
13
+ private readonly internalGenerator;
14
+ private readonly config;
15
+ constructor(generatorFunction: InternalGenerator, config: ContextManagerConfig, initialContext?: string);
16
+ addTurn(role: "user" | "assistant", message: string): Promise<void>;
17
+ /**
18
+ * Formats the history including the latest user turn for the prompt, without modifying the permanent history.
19
+ */
20
+ getContextForPrompt(role: "user", message: string): string;
21
+ getCurrentContext(): string;
22
+ private _summarize;
23
+ /**
24
+ * Truncates the history to a specific word count, preserving the most recent messages.
25
+ */
26
+ private _truncateHistory;
27
+ }
28
+ export {};
@@ -0,0 +1,102 @@
1
+ import { logger } from "../utils/logger.js";
2
+ import { formatHistoryToString } from "./utils.js";
3
+ /**
4
+ * Manages conversation context, automatically summarizing it when it
5
+ * exceeds a specified word count limit.
6
+ */
7
+ export class ContextManager {
8
+ static SUMMARIZATION_FAILED_WARNING = "[System Warning: Context summarization failed. Conversation history has been truncated.]";
9
+ static SUMMARIZATION_EMPTY_WARNING = "[System Warning: Context summarization failed to return valid content. Conversation history has been truncated.]";
10
+ history;
11
+ wordCount;
12
+ internalGenerator;
13
+ config;
14
+ constructor(generatorFunction, config, initialContext = "This is the start of the conversation.") {
15
+ this.internalGenerator = generatorFunction;
16
+ this.config = config;
17
+ const initialMessage = { role: "system", content: initialContext };
18
+ initialMessage.wordCount = this.config.estimateWordCount([initialMessage]);
19
+ this.history = [initialMessage];
20
+ this.wordCount = initialMessage.wordCount;
21
+ }
22
+ async addTurn(role, message) {
23
+ const newMessage = { role, content: message };
24
+ newMessage.wordCount = this.config.estimateWordCount([newMessage]);
25
+ this.history.push(newMessage);
26
+ this.wordCount += newMessage.wordCount;
27
+ logger.info(`[ContextManager] Current word count: ${this.wordCount} / ${this.config.highWaterMarkWords}`);
28
+ if (this.wordCount > this.config.highWaterMarkWords) {
29
+ await this._summarize();
30
+ }
31
+ }
32
+ /**
33
+ * Formats the history including the latest user turn for the prompt, without modifying the permanent history.
34
+ */
35
+ getContextForPrompt(role, message) {
36
+ const tempHistory = [...this.history, { role, content: message }];
37
+ return formatHistoryToString(tempHistory);
38
+ }
39
+ getCurrentContext() {
40
+ // Format the history into a single string for the provider prompt
41
+ return formatHistoryToString(this.history);
42
+ }
43
+ async _summarize() {
44
+ try {
45
+ const prompt = this.config.getSummarizationPrompt(this.history, this.config.lowWaterMarkWords);
46
+ // Construct options for the internal method, bypassing the main 'generate' entry point
47
+ const textOptions = {
48
+ prompt,
49
+ provider: this.config.summarizationProvider,
50
+ model: this.config.summarizationModel,
51
+ // Ensure summarization does not trigger more context management or tools
52
+ disableTools: true,
53
+ };
54
+ // Call the internal generation function directly to avoid recursion
55
+ const result = await this.internalGenerator(textOptions);
56
+ if (typeof result.content === "string" && result.content.length > 0) {
57
+ // Replace the history with a single system message containing the summary
58
+ const newHistory = [{ role: "system", content: result.content }];
59
+ this.history = newHistory;
60
+ this.wordCount = this.config.estimateWordCount(this.history);
61
+ logger.info(`[ContextManager] Summarization complete. New history length: ${this.wordCount} words.`);
62
+ }
63
+ else {
64
+ logger.warn("[ContextManager] Summarization returned empty or non-string content; truncating history as a fallback.");
65
+ this._truncateHistory(this.config.lowWaterMarkWords);
66
+ this.history.unshift({ role: "system", content: ContextManager.SUMMARIZATION_EMPTY_WARNING });
67
+ this.wordCount = this.config.estimateWordCount(this.history);
68
+ }
69
+ logger.debug(`[ContextManager] New history: ${JSON.stringify(this.history)}`);
70
+ }
71
+ catch (error) {
72
+ logger.error("Context summarization failed:", { error });
73
+ // Fallback strategy: truncate the history to the target word count.
74
+ this._truncateHistory(this.config.lowWaterMarkWords);
75
+ this.history.unshift({ role: "system", content: ContextManager.SUMMARIZATION_FAILED_WARNING });
76
+ this.wordCount = this.config.estimateWordCount(this.history);
77
+ }
78
+ }
79
+ /**
80
+ * Truncates the history to a specific word count, preserving the most recent messages.
81
+ */
82
+ _truncateHistory(wordLimit) {
83
+ if (this.wordCount <= wordLimit) {
84
+ return;
85
+ }
86
+ let runningCount = 0;
87
+ let sliceIndex = this.history.length;
88
+ for (let i = this.history.length - 1; i >= 0; i--) {
89
+ let wordCount = this.history[i].wordCount;
90
+ if (wordCount === undefined) {
91
+ logger.warn(`[ContextManager] Word count cache missing for message at index ${i}. Recalculating.`);
92
+ wordCount = this.config.estimateWordCount([this.history[i]]);
93
+ }
94
+ runningCount += wordCount;
95
+ if (runningCount > wordLimit) {
96
+ sliceIndex = i + 1;
97
+ break;
98
+ }
99
+ }
100
+ this.history = this.history.slice(sliceIndex);
101
+ }
102
+ }
@@ -0,0 +1,5 @@
1
+ import type { ContextManagerConfig } from "./types.js";
2
+ /**
3
+ * Default configuration for the ContextManager.
4
+ */
5
+ export declare const defaultContextConfig: ContextManagerConfig;
@@ -0,0 +1,38 @@
1
+ import { formatHistoryToString } from "./utils.js";
2
+ /**
3
+ * Estimates the word count of a conversation history.
4
+ */
5
+ function estimateWordCount(history) {
6
+ if (!history || history.length === 0) {
7
+ return 0;
8
+ }
9
+ return history.reduce((acc, msg) => acc + (msg.content.trim().split(/\s+/).filter(word => word.length > 0).length || 0), 0);
10
+ }
11
+ /**
12
+ * Generates the default prompt for summarization.
13
+ */
14
+ function getDefaultSummarizationPrompt(history, wordLimit) {
15
+ const formattedHistory = formatHistoryToString(history);
16
+ return `
17
+ You are a context summarization AI. Your task is to condense the following conversation history for another AI assistant.
18
+ The summary must be a concise, third-person narrative that retains all critical information. Pay special attention to retaining key entities, technical details, decisions made, and any specific dates or times mentioned.
19
+ Ensure the summary flows logically and is ready to be used as context for the next turn in the conversation.
20
+ Please keep the summary under ${wordLimit} words.
21
+
22
+ Conversation History to Summarize:
23
+ ---
24
+ ${formattedHistory}
25
+ ---
26
+ `.trim();
27
+ }
28
+ /**
29
+ * Default configuration for the ContextManager.
30
+ */
31
+ export const defaultContextConfig = {
32
+ highWaterMarkWords: 3000,
33
+ lowWaterMarkWords: 800,
34
+ summarizationModel: "gemini-2.5-flash",
35
+ summarizationProvider: "googlevertex",
36
+ getSummarizationPrompt: getDefaultSummarizationPrompt,
37
+ estimateWordCount: estimateWordCount,
38
+ };
@@ -0,0 +1,20 @@
1
+ export interface ChatMessage {
2
+ /** Role of the message sender */
3
+ role: "user" | "assistant" | "system";
4
+ /** Content of the message */
5
+ content: string;
6
+ /** Cached word count for performance */
7
+ wordCount?: number;
8
+ }
9
+ /**
10
+ * Defines the configuration for the ContextManager.
11
+ * This allows for easy customization of the summarization behavior.
12
+ */
13
+ export interface ContextManagerConfig {
14
+ highWaterMarkWords: number;
15
+ lowWaterMarkWords: number;
16
+ summarizationModel: string;
17
+ summarizationProvider: string;
18
+ getSummarizationPrompt: (history: ChatMessage[], wordLimit: number) => string;
19
+ estimateWordCount: (history: ChatMessage[]) => number;
20
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,7 @@
1
+ import type { ChatMessage } from "./types.js";
2
+ /**
3
+ * Formats a chat history array into a single string for use in a prompt.
4
+ * @param history The array of ChatMessage objects.
5
+ * @returns A formatted string representing the conversation.
6
+ */
7
+ export declare function formatHistoryToString(history: ChatMessage[]): string;
@@ -0,0 +1,8 @@
1
+ /**
2
+ * Formats a chat history array into a single string for use in a prompt.
3
+ * @param history The array of ChatMessage objects.
4
+ * @returns A formatted string representing the conversation.
5
+ */
6
+ export function formatHistoryToString(history) {
7
+ return history.map(msg => `${msg.role}: ${msg.content}`).join('\n\n');
8
+ }
@@ -0,0 +1,28 @@
1
+ import type { TextGenerationOptions, TextGenerationResult } from "../core/types.js";
2
+ import type { ContextManagerConfig } from "./types.js";
3
+ type InternalGenerator = (options: TextGenerationOptions) => Promise<TextGenerationResult>;
4
+ /**
5
+ * Manages conversation context, automatically summarizing it when it
6
+ * exceeds a specified word count limit.
7
+ */
8
+ export declare class ContextManager {
9
+ private static readonly SUMMARIZATION_FAILED_WARNING;
10
+ private static readonly SUMMARIZATION_EMPTY_WARNING;
11
+ private history;
12
+ private wordCount;
13
+ private readonly internalGenerator;
14
+ private readonly config;
15
+ constructor(generatorFunction: InternalGenerator, config: ContextManagerConfig, initialContext?: string);
16
+ addTurn(role: "user" | "assistant", message: string): Promise<void>;
17
+ /**
18
+ * Formats the history including the latest user turn for the prompt, without modifying the permanent history.
19
+ */
20
+ getContextForPrompt(role: "user", message: string): string;
21
+ getCurrentContext(): string;
22
+ private _summarize;
23
+ /**
24
+ * Truncates the history to a specific word count, preserving the most recent messages.
25
+ */
26
+ private _truncateHistory;
27
+ }
28
+ export {};
@@ -0,0 +1,102 @@
1
+ import { logger } from "../utils/logger.js";
2
+ import { formatHistoryToString } from "./utils.js";
3
+ /**
4
+ * Manages conversation context, automatically summarizing it when it
5
+ * exceeds a specified word count limit.
6
+ */
7
+ export class ContextManager {
8
+ static SUMMARIZATION_FAILED_WARNING = "[System Warning: Context summarization failed. Conversation history has been truncated.]";
9
+ static SUMMARIZATION_EMPTY_WARNING = "[System Warning: Context summarization failed to return valid content. Conversation history has been truncated.]";
10
+ history;
11
+ wordCount;
12
+ internalGenerator;
13
+ config;
14
+ constructor(generatorFunction, config, initialContext = "This is the start of the conversation.") {
15
+ this.internalGenerator = generatorFunction;
16
+ this.config = config;
17
+ const initialMessage = { role: "system", content: initialContext };
18
+ initialMessage.wordCount = this.config.estimateWordCount([initialMessage]);
19
+ this.history = [initialMessage];
20
+ this.wordCount = initialMessage.wordCount;
21
+ }
22
+ async addTurn(role, message) {
23
+ const newMessage = { role, content: message };
24
+ newMessage.wordCount = this.config.estimateWordCount([newMessage]);
25
+ this.history.push(newMessage);
26
+ this.wordCount += newMessage.wordCount;
27
+ logger.info(`[ContextManager] Current word count: ${this.wordCount} / ${this.config.highWaterMarkWords}`);
28
+ if (this.wordCount > this.config.highWaterMarkWords) {
29
+ await this._summarize();
30
+ }
31
+ }
32
+ /**
33
+ * Formats the history including the latest user turn for the prompt, without modifying the permanent history.
34
+ */
35
+ getContextForPrompt(role, message) {
36
+ const tempHistory = [...this.history, { role, content: message }];
37
+ return formatHistoryToString(tempHistory);
38
+ }
39
+ getCurrentContext() {
40
+ // Format the history into a single string for the provider prompt
41
+ return formatHistoryToString(this.history);
42
+ }
43
+ async _summarize() {
44
+ try {
45
+ const prompt = this.config.getSummarizationPrompt(this.history, this.config.lowWaterMarkWords);
46
+ // Construct options for the internal method, bypassing the main 'generate' entry point
47
+ const textOptions = {
48
+ prompt,
49
+ provider: this.config.summarizationProvider,
50
+ model: this.config.summarizationModel,
51
+ // Ensure summarization does not trigger more context management or tools
52
+ disableTools: true,
53
+ };
54
+ // Call the internal generation function directly to avoid recursion
55
+ const result = await this.internalGenerator(textOptions);
56
+ if (typeof result.content === "string" && result.content.length > 0) {
57
+ // Replace the history with a single system message containing the summary
58
+ const newHistory = [{ role: "system", content: result.content }];
59
+ this.history = newHistory;
60
+ this.wordCount = this.config.estimateWordCount(this.history);
61
+ logger.info(`[ContextManager] Summarization complete. New history length: ${this.wordCount} words.`);
62
+ }
63
+ else {
64
+ logger.warn("[ContextManager] Summarization returned empty or non-string content; truncating history as a fallback.");
65
+ this._truncateHistory(this.config.lowWaterMarkWords);
66
+ this.history.unshift({ role: "system", content: ContextManager.SUMMARIZATION_EMPTY_WARNING });
67
+ this.wordCount = this.config.estimateWordCount(this.history);
68
+ }
69
+ logger.debug(`[ContextManager] New history: ${JSON.stringify(this.history)}`);
70
+ }
71
+ catch (error) {
72
+ logger.error("Context summarization failed:", { error });
73
+ // Fallback strategy: truncate the history to the target word count.
74
+ this._truncateHistory(this.config.lowWaterMarkWords);
75
+ this.history.unshift({ role: "system", content: ContextManager.SUMMARIZATION_FAILED_WARNING });
76
+ this.wordCount = this.config.estimateWordCount(this.history);
77
+ }
78
+ }
79
+ /**
80
+ * Truncates the history to a specific word count, preserving the most recent messages.
81
+ */
82
+ _truncateHistory(wordLimit) {
83
+ if (this.wordCount <= wordLimit) {
84
+ return;
85
+ }
86
+ let runningCount = 0;
87
+ let sliceIndex = this.history.length;
88
+ for (let i = this.history.length - 1; i >= 0; i--) {
89
+ let wordCount = this.history[i].wordCount;
90
+ if (wordCount === undefined) {
91
+ logger.warn(`[ContextManager] Word count cache missing for message at index ${i}. Recalculating.`);
92
+ wordCount = this.config.estimateWordCount([this.history[i]]);
93
+ }
94
+ runningCount += wordCount;
95
+ if (runningCount > wordLimit) {
96
+ sliceIndex = i + 1;
97
+ break;
98
+ }
99
+ }
100
+ this.history = this.history.slice(sliceIndex);
101
+ }
102
+ }
@@ -0,0 +1,5 @@
1
+ import type { ContextManagerConfig } from "./types.js";
2
+ /**
3
+ * Default configuration for the ContextManager.
4
+ */
5
+ export declare const defaultContextConfig: ContextManagerConfig;
@@ -0,0 +1,38 @@
1
+ import { formatHistoryToString } from "./utils.js";
2
+ /**
3
+ * Estimates the word count of a conversation history.
4
+ */
5
+ function estimateWordCount(history) {
6
+ if (!history || history.length === 0) {
7
+ return 0;
8
+ }
9
+ return history.reduce((acc, msg) => acc + (msg.content.trim().split(/\s+/).filter(word => word.length > 0).length || 0), 0);
10
+ }
11
+ /**
12
+ * Generates the default prompt for summarization.
13
+ */
14
+ function getDefaultSummarizationPrompt(history, wordLimit) {
15
+ const formattedHistory = formatHistoryToString(history);
16
+ return `
17
+ You are a context summarization AI. Your task is to condense the following conversation history for another AI assistant.
18
+ The summary must be a concise, third-person narrative that retains all critical information. Pay special attention to retaining key entities, technical details, decisions made, and any specific dates or times mentioned.
19
+ Ensure the summary flows logically and is ready to be used as context for the next turn in the conversation.
20
+ Please keep the summary under ${wordLimit} words.
21
+
22
+ Conversation History to Summarize:
23
+ ---
24
+ ${formattedHistory}
25
+ ---
26
+ `.trim();
27
+ }
28
+ /**
29
+ * Default configuration for the ContextManager.
30
+ */
31
+ export const defaultContextConfig = {
32
+ highWaterMarkWords: 3000,
33
+ lowWaterMarkWords: 800,
34
+ summarizationModel: "gemini-2.5-flash",
35
+ summarizationProvider: "googlevertex",
36
+ getSummarizationPrompt: getDefaultSummarizationPrompt,
37
+ estimateWordCount: estimateWordCount,
38
+ };
@@ -0,0 +1,20 @@
1
+ export interface ChatMessage {
2
+ /** Role of the message sender */
3
+ role: "user" | "assistant" | "system";
4
+ /** Content of the message */
5
+ content: string;
6
+ /** Cached word count for performance */
7
+ wordCount?: number;
8
+ }
9
+ /**
10
+ * Defines the configuration for the ContextManager.
11
+ * This allows for easy customization of the summarization behavior.
12
+ */
13
+ export interface ContextManagerConfig {
14
+ highWaterMarkWords: number;
15
+ lowWaterMarkWords: number;
16
+ summarizationModel: string;
17
+ summarizationProvider: string;
18
+ getSummarizationPrompt: (history: ChatMessage[], wordLimit: number) => string;
19
+ estimateWordCount: (history: ChatMessage[]) => number;
20
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,7 @@
1
+ import type { ChatMessage } from "./types.js";
2
+ /**
3
+ * Formats a chat history array into a single string for use in a prompt.
4
+ * @param history The array of ChatMessage objects.
5
+ * @returns A formatted string representing the conversation.
6
+ */
7
+ export declare function formatHistoryToString(history: ChatMessage[]): string;
@@ -0,0 +1,8 @@
1
+ /**
2
+ * Formats a chat history array into a single string for use in a prompt.
3
+ * @param history The array of ChatMessage objects.
4
+ * @returns A formatted string representing the conversation.
5
+ */
6
+ export function formatHistoryToString(history) {
7
+ return history.map(msg => `${msg.role}: ${msg.content}`).join('\n\n');
8
+ }
@@ -41,9 +41,11 @@ export interface MCPServerInfo {
41
41
  hasServer: boolean;
42
42
  metadata?: unknown;
43
43
  }
44
+ import type { ContextManagerConfig } from "./context/types.js";
44
45
  export declare class NeuroLink {
45
46
  private mcpInitialized;
46
47
  private emitter;
48
+ private contextManager;
47
49
  private customTools;
48
50
  private inMemoryServers;
49
51
  private toolCircuitBreakers;
@@ -69,6 +71,21 @@ export declare class NeuroLink {
69
71
  * MAIN ENTRY POINT: Enhanced generate method with new function signature
70
72
  * Replaces both generateText and legacy methods
71
73
  */
74
+ /**
75
+ * Extracts the original prompt text from the provided input.
76
+ * If a string is provided, it returns the string directly.
77
+ * If a GenerateOptions object is provided, it returns the input text from the object.
78
+ * @param optionsOrPrompt The prompt input, either as a string or a GenerateOptions object.
79
+ * @returns The original prompt text as a string.
80
+ */
81
+ private _extractOriginalPrompt;
82
+ /**
83
+ * Enables automatic context summarization for the NeuroLink instance.
84
+ * Once enabled, the instance will maintain conversation history and
85
+ * automatically summarize it when it exceeds token limits.
86
+ * @param config Optional configuration to override default summarization settings.
87
+ */
88
+ enableContextSummarization(config?: Partial<ContextManagerConfig>): void;
72
89
  generate(optionsOrPrompt: GenerateOptions | string): Promise<GenerateResult>;
73
90
  /**
74
91
  * BACKWARD COMPATIBILITY: Legacy generateText method
@@ -29,10 +29,13 @@ import { ErrorFactory, NeuroLinkError, withTimeout, withRetry, isRetriableError,
29
29
  import { EventEmitter } from "events";
30
30
  import { ConversationMemoryManager } from "./core/conversationMemoryManager.js";
31
31
  import { applyConversationMemoryDefaults, getConversationMessages, storeConversationTurn, } from "./utils/conversationMemoryUtils.js";
32
+ import { ContextManager } from "./context/ContextManager.js";
33
+ import { defaultContextConfig } from "./context/config.js";
32
34
  // Core types imported from core/types.js
33
35
  export class NeuroLink {
34
36
  mcpInitialized = false;
35
37
  emitter = new EventEmitter();
38
+ contextManager = null;
36
39
  // Tool registration support
37
40
  customTools = new Map();
38
41
  inMemoryServers = new Map();
@@ -120,7 +123,33 @@ export class NeuroLink {
120
123
  * MAIN ENTRY POINT: Enhanced generate method with new function signature
121
124
  * Replaces both generateText and legacy methods
122
125
  */
126
+ /**
127
+ * Extracts the original prompt text from the provided input.
128
+ * If a string is provided, it returns the string directly.
129
+ * If a GenerateOptions object is provided, it returns the input text from the object.
130
+ * @param optionsOrPrompt The prompt input, either as a string or a GenerateOptions object.
131
+ * @returns The original prompt text as a string.
132
+ */
133
+ _extractOriginalPrompt(optionsOrPrompt) {
134
+ return typeof optionsOrPrompt === 'string' ? optionsOrPrompt : optionsOrPrompt.input.text;
135
+ }
136
+ /**
137
+ * Enables automatic context summarization for the NeuroLink instance.
138
+ * Once enabled, the instance will maintain conversation history and
139
+ * automatically summarize it when it exceeds token limits.
140
+ * @param config Optional configuration to override default summarization settings.
141
+ */
142
+ enableContextSummarization(config) {
143
+ const contextConfig = {
144
+ ...defaultContextConfig,
145
+ ...config,
146
+ };
147
+ // Pass the internal generator function directly, bound to the correct `this` context.
148
+ this.contextManager = new ContextManager(this.generateTextInternal.bind(this), contextConfig);
149
+ logger.info("[NeuroLink] Automatic context summarization enabled.");
150
+ }
123
151
  async generate(optionsOrPrompt) {
152
+ const originalPrompt = this._extractOriginalPrompt(optionsOrPrompt);
124
153
  // Convert string prompt to full options
125
154
  const options = typeof optionsOrPrompt === "string"
126
155
  ? { input: { text: optionsOrPrompt } }
@@ -129,6 +158,11 @@ export class NeuroLink {
129
158
  if (!options.input?.text || typeof options.input.text !== "string") {
130
159
  throw new Error("Input text is required and must be a non-empty string");
131
160
  }
161
+ // Handle Context Management if enabled
162
+ if (this.contextManager) {
163
+ // Get the full context for the prompt without permanently adding the user's turn yet
164
+ options.input.text = this.contextManager.getContextForPrompt("user", options.input.text);
165
+ }
132
166
  const startTime = Date.now();
133
167
  // Emit generation start event
134
168
  this.emitter.emit("generation:start", {
@@ -240,6 +274,11 @@ export class NeuroLink {
240
274
  }
241
275
  : undefined,
242
276
  };
277
+ // Add both the user's turn and the AI's response to the permanent history
278
+ if (this.contextManager) {
279
+ await this.contextManager.addTurn("user", originalPrompt);
280
+ await this.contextManager.addTurn("assistant", generateResult.content);
281
+ }
243
282
  return generateResult;
244
283
  }
245
284
  /**
@@ -41,9 +41,11 @@ export interface MCPServerInfo {
41
41
  hasServer: boolean;
42
42
  metadata?: unknown;
43
43
  }
44
+ import type { ContextManagerConfig } from "./context/types.js";
44
45
  export declare class NeuroLink {
45
46
  private mcpInitialized;
46
47
  private emitter;
48
+ private contextManager;
47
49
  private customTools;
48
50
  private inMemoryServers;
49
51
  private toolCircuitBreakers;
@@ -69,6 +71,21 @@ export declare class NeuroLink {
69
71
  * MAIN ENTRY POINT: Enhanced generate method with new function signature
70
72
  * Replaces both generateText and legacy methods
71
73
  */
74
+ /**
75
+ * Extracts the original prompt text from the provided input.
76
+ * If a string is provided, it returns the string directly.
77
+ * If a GenerateOptions object is provided, it returns the input text from the object.
78
+ * @param optionsOrPrompt The prompt input, either as a string or a GenerateOptions object.
79
+ * @returns The original prompt text as a string.
80
+ */
81
+ private _extractOriginalPrompt;
82
+ /**
83
+ * Enables automatic context summarization for the NeuroLink instance.
84
+ * Once enabled, the instance will maintain conversation history and
85
+ * automatically summarize it when it exceeds token limits.
86
+ * @param config Optional configuration to override default summarization settings.
87
+ */
88
+ enableContextSummarization(config?: Partial<ContextManagerConfig>): void;
72
89
  generate(optionsOrPrompt: GenerateOptions | string): Promise<GenerateResult>;
73
90
  /**
74
91
  * BACKWARD COMPATIBILITY: Legacy generateText method
package/dist/neurolink.js CHANGED
@@ -29,10 +29,13 @@ import { ErrorFactory, NeuroLinkError, withTimeout, withRetry, isRetriableError,
29
29
  import { EventEmitter } from "events";
30
30
  import { ConversationMemoryManager } from "./core/conversationMemoryManager.js";
31
31
  import { applyConversationMemoryDefaults, getConversationMessages, storeConversationTurn, } from "./utils/conversationMemoryUtils.js";
32
+ import { ContextManager } from "./context/ContextManager.js";
33
+ import { defaultContextConfig } from "./context/config.js";
32
34
  // Core types imported from core/types.js
33
35
  export class NeuroLink {
34
36
  mcpInitialized = false;
35
37
  emitter = new EventEmitter();
38
+ contextManager = null;
36
39
  // Tool registration support
37
40
  customTools = new Map();
38
41
  inMemoryServers = new Map();
@@ -120,7 +123,33 @@ export class NeuroLink {
120
123
  * MAIN ENTRY POINT: Enhanced generate method with new function signature
121
124
  * Replaces both generateText and legacy methods
122
125
  */
126
+ /**
127
+ * Extracts the original prompt text from the provided input.
128
+ * If a string is provided, it returns the string directly.
129
+ * If a GenerateOptions object is provided, it returns the input text from the object.
130
+ * @param optionsOrPrompt The prompt input, either as a string or a GenerateOptions object.
131
+ * @returns The original prompt text as a string.
132
+ */
133
+ _extractOriginalPrompt(optionsOrPrompt) {
134
+ return typeof optionsOrPrompt === 'string' ? optionsOrPrompt : optionsOrPrompt.input.text;
135
+ }
136
+ /**
137
+ * Enables automatic context summarization for the NeuroLink instance.
138
+ * Once enabled, the instance will maintain conversation history and
139
+ * automatically summarize it when it exceeds token limits.
140
+ * @param config Optional configuration to override default summarization settings.
141
+ */
142
+ enableContextSummarization(config) {
143
+ const contextConfig = {
144
+ ...defaultContextConfig,
145
+ ...config,
146
+ };
147
+ // Pass the internal generator function directly, bound to the correct `this` context.
148
+ this.contextManager = new ContextManager(this.generateTextInternal.bind(this), contextConfig);
149
+ logger.info("[NeuroLink] Automatic context summarization enabled.");
150
+ }
123
151
  async generate(optionsOrPrompt) {
152
+ const originalPrompt = this._extractOriginalPrompt(optionsOrPrompt);
124
153
  // Convert string prompt to full options
125
154
  const options = typeof optionsOrPrompt === "string"
126
155
  ? { input: { text: optionsOrPrompt } }
@@ -129,6 +158,11 @@ export class NeuroLink {
129
158
  if (!options.input?.text || typeof options.input.text !== "string") {
130
159
  throw new Error("Input text is required and must be a non-empty string");
131
160
  }
161
+ // Handle Context Management if enabled
162
+ if (this.contextManager) {
163
+ // Get the full context for the prompt without permanently adding the user's turn yet
164
+ options.input.text = this.contextManager.getContextForPrompt("user", options.input.text);
165
+ }
132
166
  const startTime = Date.now();
133
167
  // Emit generation start event
134
168
  this.emitter.emit("generation:start", {
@@ -240,6 +274,11 @@ export class NeuroLink {
240
274
  }
241
275
  : undefined,
242
276
  };
277
+ // Add both the user's turn and the AI's response to the permanent history
278
+ if (this.contextManager) {
279
+ await this.contextManager.addTurn("user", originalPrompt);
280
+ await this.contextManager.addTurn("assistant", generateResult.content);
281
+ }
243
282
  return generateResult;
244
283
  }
245
284
  /**
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@juspay/neurolink",
3
- "version": "7.12.0",
3
+ "version": "7.13.0",
4
4
  "description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 9 major providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
5
5
  "author": {
6
6
  "name": "Juspay Technologies",