@juspay/neurolink 7.29.3 → 7.30.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/CHANGELOG.md +6 -0
  2. package/dist/config/conversationMemoryConfig.js +5 -0
  3. package/dist/core/conversationMemoryManager.d.ts +9 -15
  4. package/dist/core/conversationMemoryManager.js +98 -57
  5. package/dist/core/types.d.ts +3 -1
  6. package/dist/lib/config/conversationMemoryConfig.js +5 -0
  7. package/dist/lib/core/conversationMemoryManager.d.ts +9 -15
  8. package/dist/lib/core/conversationMemoryManager.js +98 -57
  9. package/dist/lib/core/types.d.ts +3 -1
  10. package/dist/lib/neurolink.d.ts +0 -9
  11. package/dist/lib/neurolink.js +7 -39
  12. package/dist/lib/types/conversationTypes.d.ts +10 -0
  13. package/dist/lib/types/generateTypes.d.ts +1 -2
  14. package/dist/lib/utils/conversationMemoryUtils.d.ts +1 -2
  15. package/dist/lib/utils/conversationMemoryUtils.js +6 -7
  16. package/dist/neurolink.d.ts +0 -9
  17. package/dist/neurolink.js +7 -39
  18. package/dist/types/conversationTypes.d.ts +10 -0
  19. package/dist/types/generateTypes.d.ts +1 -2
  20. package/dist/utils/conversationMemoryUtils.d.ts +1 -2
  21. package/dist/utils/conversationMemoryUtils.js +6 -7
  22. package/package.json +1 -1
  23. package/dist/context/ContextManager.d.ts +0 -28
  24. package/dist/context/ContextManager.js +0 -113
  25. package/dist/context/config.d.ts +0 -5
  26. package/dist/context/config.js +0 -42
  27. package/dist/context/types.d.ts +0 -20
  28. package/dist/context/types.js +0 -1
  29. package/dist/context/utils.d.ts +0 -7
  30. package/dist/context/utils.js +0 -8
  31. package/dist/lib/context/ContextManager.d.ts +0 -28
  32. package/dist/lib/context/ContextManager.js +0 -113
  33. package/dist/lib/context/config.d.ts +0 -5
  34. package/dist/lib/context/config.js +0 -42
  35. package/dist/lib/context/types.d.ts +0 -20
  36. package/dist/lib/context/types.js +0 -1
  37. package/dist/lib/context/utils.d.ts +0 -7
  38. package/dist/lib/context/utils.js +0 -8
@@ -35,14 +35,11 @@ import { applyConversationMemoryDefaults, getConversationMessages, storeConversa
35
35
  import { ExternalServerManager } from "./mcp/externalServerManager.js";
36
36
  // Import direct tools server for automatic registration
37
37
  import { directToolsServer } from "./mcp/servers/agent/directToolsServer.js";
38
- import { ContextManager } from "./context/ContextManager.js";
39
- import { defaultContextConfig } from "./context/config.js";
40
38
  import { isNonNullObject } from "./utils/typeUtils.js";
41
39
  // Core types imported from core/types.js
42
40
  export class NeuroLink {
43
41
  mcpInitialized = false;
44
42
  emitter = new EventEmitter();
45
- contextManager = null;
46
43
  autoDiscoveredServerInfos = [];
47
44
  // External MCP server management
48
45
  externalServerManager;
@@ -796,21 +793,6 @@ export class NeuroLink {
796
793
  ? optionsOrPrompt
797
794
  : optionsOrPrompt.input.text;
798
795
  }
799
- /**
800
- * Enables automatic context summarization for the NeuroLink instance.
801
- * Once enabled, the instance will maintain conversation history and
802
- * automatically summarize it when it exceeds token limits.
803
- * @param config Optional configuration to override default summarization settings.
804
- */
805
- enableContextSummarization(config) {
806
- const contextConfig = {
807
- ...defaultContextConfig,
808
- ...config,
809
- };
810
- // Pass the internal generator function directly, bound to the correct `this` context.
811
- this.contextManager = new ContextManager(this.generateTextInternal.bind(this), contextConfig);
812
- logger.info("[NeuroLink] Automatic context summarization enabled.");
813
- }
814
796
  /**
815
797
  * Generate AI content using the best available provider with MCP tool integration.
816
798
  * This is the primary method for text generation with full feature support.
@@ -869,11 +851,6 @@ export class NeuroLink {
869
851
  if (!options.input?.text || typeof options.input.text !== "string") {
870
852
  throw new Error("Input text is required and must be a non-empty string");
871
853
  }
872
- // Handle Context Management if enabled
873
- if (this.contextManager) {
874
- // Get the full context for the prompt without permanently adding the user's turn yet
875
- options.input.text = this.contextManager.getContextForPrompt("user", options.input.text);
876
- }
877
854
  const startTime = Date.now();
878
855
  // Emit generation start event (NeuroLink format - keep existing)
879
856
  this.emitter.emit("generation:start", {
@@ -913,6 +890,12 @@ export class NeuroLink {
913
890
  };
914
891
  // Apply factory enhancement using centralized utilities
915
892
  const textOptions = enhanceTextGenerationOptions(baseOptions, factoryResult);
893
+ // Pass conversation memory config if available
894
+ if (this.conversationMemory) {
895
+ textOptions.conversationMemoryConfig = this.conversationMemory.config;
896
+ // Include original prompt for context summarization
897
+ textOptions.originalPrompt = originalPrompt;
898
+ }
916
899
  // Detect and execute domain-specific tools
917
900
  const { toolResults, enhancedPrompt } = await this.detectAndExecuteTools(textOptions.prompt || options.input.text, factoryResult.domainType);
918
901
  // Update prompt with tool results if available
@@ -978,11 +961,6 @@ export class NeuroLink {
978
961
  }
979
962
  : undefined,
980
963
  };
981
- // Add both the user's turn and the AI's response to the permanent history
982
- if (this.contextManager) {
983
- await this.contextManager.addTurn("user", originalPrompt);
984
- await this.contextManager.addTurn("assistant", generateResult.content);
985
- }
986
964
  return generateResult;
987
965
  }
988
966
  /**
@@ -1081,7 +1059,6 @@ export class NeuroLink {
1081
1059
  hasToolRegistry: !!toolRegistry,
1082
1060
  toolRegistrySize: 0,
1083
1061
  hasExternalServerManager: !!this.externalServerManager,
1084
- hasContextManager: !!this.contextManager,
1085
1062
  },
1086
1063
  environmentContext: {
1087
1064
  nodeVersion: process.version,
@@ -1669,10 +1646,7 @@ export class NeuroLink {
1669
1646
  let factoryResult;
1670
1647
  try {
1671
1648
  await this.initializeMCP();
1672
- const originalPrompt = options.input.text;
1673
- if (this.contextManager) {
1674
- options.input.text = this.contextManager.getContextForPrompt("user", options.input.text);
1675
- }
1649
+ const _originalPrompt = options.input.text;
1676
1650
  factoryResult = processStreamingFactoryOptions(options);
1677
1651
  enhancedOptions = createCleanStreamOptions(options);
1678
1652
  const { toolResults: _toolResults, enhancedPrompt } = await this.detectAndExecuteTools(options.input.text, undefined);
@@ -1682,12 +1656,6 @@ export class NeuroLink {
1682
1656
  const { stream: mcpStream, provider: providerName } = await this.createMCPStream(enhancedOptions);
1683
1657
  const streamResult = await this.processStreamResult(mcpStream, enhancedOptions, factoryResult);
1684
1658
  const responseTime = Date.now() - startTime;
1685
- if (this.contextManager) {
1686
- await this.contextManager.addTurn("user", originalPrompt);
1687
- if (streamResult.content) {
1688
- await this.contextManager.addTurn("assistant", streamResult.content);
1689
- }
1690
- }
1691
1659
  this.emitStreamEndEvents(streamResult);
1692
1660
  return this.createStreamResponse(streamResult, mcpStream, {
1693
1661
  providerName,
@@ -12,6 +12,16 @@ export interface ConversationMemoryConfig {
12
12
  maxSessions?: number;
13
13
  /** Maximum number of conversation turns to keep per session (default: 20) */
14
14
  maxTurnsPerSession?: number;
15
+ /** Enable automatic summarization */
16
+ enableSummarization?: boolean;
17
+ /** Turn count to trigger summarization */
18
+ summarizationThresholdTurns?: number;
19
+ /** Target turn count for the summary */
20
+ summarizationTargetTurns?: number;
21
+ /** Provider to use for summarization */
22
+ summarizationProvider?: string;
23
+ /** Model to use for summarization */
24
+ summarizationModel?: string;
15
25
  }
16
26
  /**
17
27
  * Complete memory for a conversation session
@@ -1,7 +1,6 @@
1
1
  import type { Tool } from "ai";
2
2
  import type { ValidationSchema, StandardRecord } from "./typeAliases.js";
3
- import type { AIProviderName, AnalyticsData } from "../core/types.js";
4
- import type { EvaluationData } from "../index.js";
3
+ import type { AIProviderName, AnalyticsData, EvaluationData } from "../core/types.js";
5
4
  import type { TokenUsage } from "./providers.js";
6
5
  /**
7
6
  * Generate function options interface - Primary method for content generation
@@ -11,8 +11,7 @@ import type { TextGenerationOptions, TextGenerationResult } from "../core/types.
11
11
  */
12
12
  export declare function applyConversationMemoryDefaults(userConfig?: Partial<ConversationMemoryConfig>): ConversationMemoryConfig;
13
13
  /**
14
- * Get conversation history as message array (PREFERRED METHOD)
15
- * Returns proper message array format for AI providers
14
+ * Get conversation history as message array, summarizing if needed.
16
15
  */
17
16
  export declare function getConversationMessages(conversationMemory: ConversationMemoryManager | undefined, options: TextGenerationOptions): Promise<ChatMessage[]>;
18
17
  /**
@@ -2,7 +2,7 @@
2
2
  * Conversation Memory Utilities
3
3
  * Handles configuration merging and conversation memory operations
4
4
  */
5
- import { getConversationMemoryDefaults } from "../config/conversationMemoryConfig.js";
5
+ import { getConversationMemoryDefaults, } from "../config/conversationMemoryConfig.js";
6
6
  import { logger } from "./logger.js";
7
7
  /**
8
8
  * Apply conversation memory defaults to user configuration
@@ -11,14 +11,12 @@ import { logger } from "./logger.js";
11
11
  export function applyConversationMemoryDefaults(userConfig) {
12
12
  const defaults = getConversationMemoryDefaults();
13
13
  return {
14
- enabled: userConfig?.enabled ?? defaults.enabled,
15
- maxSessions: userConfig?.maxSessions ?? defaults.maxSessions,
16
- maxTurnsPerSession: userConfig?.maxTurnsPerSession ?? defaults.maxTurnsPerSession,
14
+ ...defaults,
15
+ ...userConfig,
17
16
  };
18
17
  }
19
18
  /**
20
- * Get conversation history as message array (PREFERRED METHOD)
21
- * Returns proper message array format for AI providers
19
+ * Get conversation history as message array, summarizing if needed.
22
20
  */
23
21
  export async function getConversationMessages(conversationMemory, options) {
24
22
  if (!conversationMemory || !options.context) {
@@ -29,6 +27,7 @@ export async function getConversationMessages(conversationMemory, options) {
29
27
  return [];
30
28
  }
31
29
  try {
30
+ // Remove duplicate summarization logic - it should be handled in ConversationMemoryManager
32
31
  const messages = conversationMemory.buildContextMessages(sessionId);
33
32
  logger.debug("Conversation messages retrieved", {
34
33
  sessionId,
@@ -59,7 +58,7 @@ export async function storeConversationTurn(conversationMemory, originalOptions,
59
58
  return;
60
59
  }
61
60
  try {
62
- await conversationMemory.storeConversationTurn(sessionId, userId, originalOptions.prompt || "", result.content);
61
+ await conversationMemory.storeConversationTurn(sessionId, userId, originalOptions.originalPrompt || originalOptions.prompt || "", result.content);
63
62
  logger.debug("Conversation turn stored", {
64
63
  sessionId,
65
64
  userId,
@@ -39,11 +39,9 @@ export interface MCPStatus {
39
39
  error?: string;
40
40
  [key: string]: unknown;
41
41
  }
42
- import type { ContextManagerConfig } from "./context/types.js";
43
42
  export declare class NeuroLink {
44
43
  private mcpInitialized;
45
44
  private emitter;
46
- private contextManager;
47
45
  private autoDiscoveredServerInfos;
48
46
  private externalServerManager;
49
47
  private toolCircuitBreakers;
@@ -167,13 +165,6 @@ export declare class NeuroLink {
167
165
  * @returns The original prompt text as a string.
168
166
  */
169
167
  private _extractOriginalPrompt;
170
- /**
171
- * Enables automatic context summarization for the NeuroLink instance.
172
- * Once enabled, the instance will maintain conversation history and
173
- * automatically summarize it when it exceeds token limits.
174
- * @param config Optional configuration to override default summarization settings.
175
- */
176
- enableContextSummarization(config?: Partial<ContextManagerConfig>): void;
177
168
  /**
178
169
  * Generate AI content using the best available provider with MCP tool integration.
179
170
  * This is the primary method for text generation with full feature support.
package/dist/neurolink.js CHANGED
@@ -35,14 +35,11 @@ import { applyConversationMemoryDefaults, getConversationMessages, storeConversa
35
35
  import { ExternalServerManager } from "./mcp/externalServerManager.js";
36
36
  // Import direct tools server for automatic registration
37
37
  import { directToolsServer } from "./mcp/servers/agent/directToolsServer.js";
38
- import { ContextManager } from "./context/ContextManager.js";
39
- import { defaultContextConfig } from "./context/config.js";
40
38
  import { isNonNullObject } from "./utils/typeUtils.js";
41
39
  // Core types imported from core/types.js
42
40
  export class NeuroLink {
43
41
  mcpInitialized = false;
44
42
  emitter = new EventEmitter();
45
- contextManager = null;
46
43
  autoDiscoveredServerInfos = [];
47
44
  // External MCP server management
48
45
  externalServerManager;
@@ -796,21 +793,6 @@ export class NeuroLink {
796
793
  ? optionsOrPrompt
797
794
  : optionsOrPrompt.input.text;
798
795
  }
799
- /**
800
- * Enables automatic context summarization for the NeuroLink instance.
801
- * Once enabled, the instance will maintain conversation history and
802
- * automatically summarize it when it exceeds token limits.
803
- * @param config Optional configuration to override default summarization settings.
804
- */
805
- enableContextSummarization(config) {
806
- const contextConfig = {
807
- ...defaultContextConfig,
808
- ...config,
809
- };
810
- // Pass the internal generator function directly, bound to the correct `this` context.
811
- this.contextManager = new ContextManager(this.generateTextInternal.bind(this), contextConfig);
812
- logger.info("[NeuroLink] Automatic context summarization enabled.");
813
- }
814
796
  /**
815
797
  * Generate AI content using the best available provider with MCP tool integration.
816
798
  * This is the primary method for text generation with full feature support.
@@ -869,11 +851,6 @@ export class NeuroLink {
869
851
  if (!options.input?.text || typeof options.input.text !== "string") {
870
852
  throw new Error("Input text is required and must be a non-empty string");
871
853
  }
872
- // Handle Context Management if enabled
873
- if (this.contextManager) {
874
- // Get the full context for the prompt without permanently adding the user's turn yet
875
- options.input.text = this.contextManager.getContextForPrompt("user", options.input.text);
876
- }
877
854
  const startTime = Date.now();
878
855
  // Emit generation start event (NeuroLink format - keep existing)
879
856
  this.emitter.emit("generation:start", {
@@ -913,6 +890,12 @@ export class NeuroLink {
913
890
  };
914
891
  // Apply factory enhancement using centralized utilities
915
892
  const textOptions = enhanceTextGenerationOptions(baseOptions, factoryResult);
893
+ // Pass conversation memory config if available
894
+ if (this.conversationMemory) {
895
+ textOptions.conversationMemoryConfig = this.conversationMemory.config;
896
+ // Include original prompt for context summarization
897
+ textOptions.originalPrompt = originalPrompt;
898
+ }
916
899
  // Detect and execute domain-specific tools
917
900
  const { toolResults, enhancedPrompt } = await this.detectAndExecuteTools(textOptions.prompt || options.input.text, factoryResult.domainType);
918
901
  // Update prompt with tool results if available
@@ -978,11 +961,6 @@ export class NeuroLink {
978
961
  }
979
962
  : undefined,
980
963
  };
981
- // Add both the user's turn and the AI's response to the permanent history
982
- if (this.contextManager) {
983
- await this.contextManager.addTurn("user", originalPrompt);
984
- await this.contextManager.addTurn("assistant", generateResult.content);
985
- }
986
964
  return generateResult;
987
965
  }
988
966
  /**
@@ -1081,7 +1059,6 @@ export class NeuroLink {
1081
1059
  hasToolRegistry: !!toolRegistry,
1082
1060
  toolRegistrySize: 0,
1083
1061
  hasExternalServerManager: !!this.externalServerManager,
1084
- hasContextManager: !!this.contextManager,
1085
1062
  },
1086
1063
  environmentContext: {
1087
1064
  nodeVersion: process.version,
@@ -1669,10 +1646,7 @@ export class NeuroLink {
1669
1646
  let factoryResult;
1670
1647
  try {
1671
1648
  await this.initializeMCP();
1672
- const originalPrompt = options.input.text;
1673
- if (this.contextManager) {
1674
- options.input.text = this.contextManager.getContextForPrompt("user", options.input.text);
1675
- }
1649
+ const _originalPrompt = options.input.text;
1676
1650
  factoryResult = processStreamingFactoryOptions(options);
1677
1651
  enhancedOptions = createCleanStreamOptions(options);
1678
1652
  const { toolResults: _toolResults, enhancedPrompt } = await this.detectAndExecuteTools(options.input.text, undefined);
@@ -1682,12 +1656,6 @@ export class NeuroLink {
1682
1656
  const { stream: mcpStream, provider: providerName } = await this.createMCPStream(enhancedOptions);
1683
1657
  const streamResult = await this.processStreamResult(mcpStream, enhancedOptions, factoryResult);
1684
1658
  const responseTime = Date.now() - startTime;
1685
- if (this.contextManager) {
1686
- await this.contextManager.addTurn("user", originalPrompt);
1687
- if (streamResult.content) {
1688
- await this.contextManager.addTurn("assistant", streamResult.content);
1689
- }
1690
- }
1691
1659
  this.emitStreamEndEvents(streamResult);
1692
1660
  return this.createStreamResponse(streamResult, mcpStream, {
1693
1661
  providerName,
@@ -12,6 +12,16 @@ export interface ConversationMemoryConfig {
12
12
  maxSessions?: number;
13
13
  /** Maximum number of conversation turns to keep per session (default: 20) */
14
14
  maxTurnsPerSession?: number;
15
+ /** Enable automatic summarization */
16
+ enableSummarization?: boolean;
17
+ /** Turn count to trigger summarization */
18
+ summarizationThresholdTurns?: number;
19
+ /** Target turn count for the summary */
20
+ summarizationTargetTurns?: number;
21
+ /** Provider to use for summarization */
22
+ summarizationProvider?: string;
23
+ /** Model to use for summarization */
24
+ summarizationModel?: string;
15
25
  }
16
26
  /**
17
27
  * Complete memory for a conversation session
@@ -1,7 +1,6 @@
1
1
  import type { Tool } from "ai";
2
2
  import type { ValidationSchema, StandardRecord } from "./typeAliases.js";
3
- import type { AIProviderName, AnalyticsData } from "../core/types.js";
4
- import type { EvaluationData } from "../index.js";
3
+ import type { AIProviderName, AnalyticsData, EvaluationData } from "../core/types.js";
5
4
  import type { TokenUsage } from "./providers.js";
6
5
  /**
7
6
  * Generate function options interface - Primary method for content generation
@@ -11,8 +11,7 @@ import type { TextGenerationOptions, TextGenerationResult } from "../core/types.
11
11
  */
12
12
  export declare function applyConversationMemoryDefaults(userConfig?: Partial<ConversationMemoryConfig>): ConversationMemoryConfig;
13
13
  /**
14
- * Get conversation history as message array (PREFERRED METHOD)
15
- * Returns proper message array format for AI providers
14
+ * Get conversation history as message array, summarizing if needed.
16
15
  */
17
16
  export declare function getConversationMessages(conversationMemory: ConversationMemoryManager | undefined, options: TextGenerationOptions): Promise<ChatMessage[]>;
18
17
  /**
@@ -2,7 +2,7 @@
2
2
  * Conversation Memory Utilities
3
3
  * Handles configuration merging and conversation memory operations
4
4
  */
5
- import { getConversationMemoryDefaults } from "../config/conversationMemoryConfig.js";
5
+ import { getConversationMemoryDefaults, } from "../config/conversationMemoryConfig.js";
6
6
  import { logger } from "./logger.js";
7
7
  /**
8
8
  * Apply conversation memory defaults to user configuration
@@ -11,14 +11,12 @@ import { logger } from "./logger.js";
11
11
  export function applyConversationMemoryDefaults(userConfig) {
12
12
  const defaults = getConversationMemoryDefaults();
13
13
  return {
14
- enabled: userConfig?.enabled ?? defaults.enabled,
15
- maxSessions: userConfig?.maxSessions ?? defaults.maxSessions,
16
- maxTurnsPerSession: userConfig?.maxTurnsPerSession ?? defaults.maxTurnsPerSession,
14
+ ...defaults,
15
+ ...userConfig,
17
16
  };
18
17
  }
19
18
  /**
20
- * Get conversation history as message array (PREFERRED METHOD)
21
- * Returns proper message array format for AI providers
19
+ * Get conversation history as message array, summarizing if needed.
22
20
  */
23
21
  export async function getConversationMessages(conversationMemory, options) {
24
22
  if (!conversationMemory || !options.context) {
@@ -29,6 +27,7 @@ export async function getConversationMessages(conversationMemory, options) {
29
27
  return [];
30
28
  }
31
29
  try {
30
+ // Remove duplicate summarization logic - it should be handled in ConversationMemoryManager
32
31
  const messages = conversationMemory.buildContextMessages(sessionId);
33
32
  logger.debug("Conversation messages retrieved", {
34
33
  sessionId,
@@ -59,7 +58,7 @@ export async function storeConversationTurn(conversationMemory, originalOptions,
59
58
  return;
60
59
  }
61
60
  try {
62
- await conversationMemory.storeConversationTurn(sessionId, userId, originalOptions.prompt || "", result.content);
61
+ await conversationMemory.storeConversationTurn(sessionId, userId, originalOptions.originalPrompt || originalOptions.prompt || "", result.content);
63
62
  logger.debug("Conversation turn stored", {
64
63
  sessionId,
65
64
  userId,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@juspay/neurolink",
3
- "version": "7.29.3",
3
+ "version": "7.30.0",
4
4
  "description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 9 major providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
5
5
  "author": {
6
6
  "name": "Juspay Technologies",
@@ -1,28 +0,0 @@
1
- import type { TextGenerationOptions, TextGenerationResult } from "../core/types.js";
2
- import type { ContextManagerConfig } from "./types.js";
3
- type InternalGenerator = (options: TextGenerationOptions) => Promise<TextGenerationResult>;
4
- /**
5
- * Manages conversation context, automatically summarizing it when it
6
- * exceeds a specified word count limit.
7
- */
8
- export declare class ContextManager {
9
- private static readonly SUMMARIZATION_FAILED_WARNING;
10
- private static readonly SUMMARIZATION_EMPTY_WARNING;
11
- private history;
12
- private wordCount;
13
- private readonly internalGenerator;
14
- private readonly config;
15
- constructor(generatorFunction: InternalGenerator, config: ContextManagerConfig, initialContext?: string);
16
- addTurn(role: "user" | "assistant", message: string): Promise<void>;
17
- /**
18
- * Formats the history including the latest user turn for the prompt, without modifying the permanent history.
19
- */
20
- getContextForPrompt(role: "user", message: string): string;
21
- getCurrentContext(): string;
22
- private _summarize;
23
- /**
24
- * Truncates the history to a specific word count, preserving the most recent messages.
25
- */
26
- private _truncateHistory;
27
- }
28
- export {};
@@ -1,113 +0,0 @@
1
- import { logger } from "../utils/logger.js";
2
- import { formatHistoryToString } from "./utils.js";
3
- /**
4
- * Manages conversation context, automatically summarizing it when it
5
- * exceeds a specified word count limit.
6
- */
7
- export class ContextManager {
8
- static SUMMARIZATION_FAILED_WARNING = "[System Warning: Context summarization failed. Conversation history has been truncated.]";
9
- static SUMMARIZATION_EMPTY_WARNING = "[System Warning: Context summarization failed to return valid content. Conversation history has been truncated.]";
10
- history;
11
- wordCount;
12
- internalGenerator;
13
- config;
14
- constructor(generatorFunction, config, initialContext = "This is the start of the conversation.") {
15
- this.internalGenerator = generatorFunction;
16
- this.config = config;
17
- const initialMessage = {
18
- role: "system",
19
- content: initialContext,
20
- };
21
- initialMessage.wordCount = this.config.estimateWordCount([initialMessage]);
22
- this.history = [initialMessage];
23
- this.wordCount = initialMessage.wordCount;
24
- }
25
- async addTurn(role, message) {
26
- const newMessage = { role, content: message };
27
- newMessage.wordCount = this.config.estimateWordCount([newMessage]);
28
- this.history.push(newMessage);
29
- this.wordCount += newMessage.wordCount;
30
- logger.info(`[ContextManager] Current word count: ${this.wordCount} / ${this.config.highWaterMarkWords}`);
31
- if (this.wordCount > this.config.highWaterMarkWords) {
32
- await this._summarize();
33
- }
34
- }
35
- /**
36
- * Formats the history including the latest user turn for the prompt, without modifying the permanent history.
37
- */
38
- getContextForPrompt(role, message) {
39
- const tempHistory = [...this.history, { role, content: message }];
40
- return formatHistoryToString(tempHistory);
41
- }
42
- getCurrentContext() {
43
- // Format the history into a single string for the provider prompt
44
- return formatHistoryToString(this.history);
45
- }
46
- async _summarize() {
47
- try {
48
- const prompt = this.config.getSummarizationPrompt(this.history, this.config.lowWaterMarkWords);
49
- // Construct options for the internal method, bypassing the main 'generate' entry point
50
- const textOptions = {
51
- prompt,
52
- provider: this.config.summarizationProvider,
53
- model: this.config.summarizationModel,
54
- // Ensure summarization does not trigger more context management or tools
55
- disableTools: true,
56
- };
57
- // Call the internal generation function directly to avoid recursion
58
- const result = await this.internalGenerator(textOptions);
59
- if (typeof result.content === "string" && result.content.length > 0) {
60
- // Replace the history with a single system message containing the summary
61
- const newHistory = [
62
- { role: "system", content: result.content },
63
- ];
64
- this.history = newHistory;
65
- this.wordCount = this.config.estimateWordCount(this.history);
66
- logger.info(`[ContextManager] Summarization complete. New history length: ${this.wordCount} words.`);
67
- }
68
- else {
69
- logger.warn("[ContextManager] Summarization returned empty or non-string content; truncating history as a fallback.");
70
- this._truncateHistory(this.config.lowWaterMarkWords);
71
- this.history.unshift({
72
- role: "system",
73
- content: ContextManager.SUMMARIZATION_EMPTY_WARNING,
74
- });
75
- this.wordCount = this.config.estimateWordCount(this.history);
76
- }
77
- logger.debug(`[ContextManager] New history: ${JSON.stringify(this.history)}`);
78
- }
79
- catch (error) {
80
- logger.error("Context summarization failed:", { error });
81
- // Fallback strategy: truncate the history to the target word count.
82
- this._truncateHistory(this.config.lowWaterMarkWords);
83
- this.history.unshift({
84
- role: "system",
85
- content: ContextManager.SUMMARIZATION_FAILED_WARNING,
86
- });
87
- this.wordCount = this.config.estimateWordCount(this.history);
88
- }
89
- }
90
- /**
91
- * Truncates the history to a specific word count, preserving the most recent messages.
92
- */
93
- _truncateHistory(wordLimit) {
94
- if (this.wordCount <= wordLimit) {
95
- return;
96
- }
97
- let runningCount = 0;
98
- let sliceIndex = this.history.length;
99
- for (let i = this.history.length - 1; i >= 0; i--) {
100
- let wordCount = this.history[i].wordCount;
101
- if (wordCount === undefined) {
102
- logger.warn(`[ContextManager] Word count cache missing for message at index ${i}. Recalculating.`);
103
- wordCount = this.config.estimateWordCount([this.history[i]]);
104
- }
105
- runningCount += wordCount;
106
- if (runningCount > wordLimit) {
107
- sliceIndex = i + 1;
108
- break;
109
- }
110
- }
111
- this.history = this.history.slice(sliceIndex);
112
- }
113
- }
@@ -1,5 +0,0 @@
1
- import type { ContextManagerConfig } from "./types.js";
2
- /**
3
- * Default configuration for the ContextManager.
4
- */
5
- export declare const defaultContextConfig: ContextManagerConfig;
@@ -1,42 +0,0 @@
1
- import { formatHistoryToString } from "./utils.js";
2
- /**
3
- * Estimates the word count of a conversation history.
4
- */
5
- function estimateWordCount(history) {
6
- if (!history || history.length === 0) {
7
- return 0;
8
- }
9
- return history.reduce((acc, msg) => acc +
10
- (msg.content
11
- .trim()
12
- .split(/\s+/)
13
- .filter((word) => word.length > 0).length || 0), 0);
14
- }
15
- /**
16
- * Generates the default prompt for summarization.
17
- */
18
- function getDefaultSummarizationPrompt(history, wordLimit) {
19
- const formattedHistory = formatHistoryToString(history);
20
- return `
21
- You are a context summarization AI. Your task is to condense the following conversation history for another AI assistant.
22
- The summary must be a concise, third-person narrative that retains all critical information. Pay special attention to retaining key entities, technical details, decisions made, and any specific dates or times mentioned.
23
- Ensure the summary flows logically and is ready to be used as context for the next turn in the conversation.
24
- Please keep the summary under ${wordLimit} words.
25
-
26
- Conversation History to Summarize:
27
- ---
28
- ${formattedHistory}
29
- ---
30
- `.trim();
31
- }
32
- /**
33
- * Default configuration for the ContextManager.
34
- */
35
- export const defaultContextConfig = {
36
- highWaterMarkWords: 3000,
37
- lowWaterMarkWords: 800,
38
- summarizationModel: "gemini-2.5-flash",
39
- summarizationProvider: "googlevertex",
40
- getSummarizationPrompt: getDefaultSummarizationPrompt,
41
- estimateWordCount: estimateWordCount,
42
- };
@@ -1,20 +0,0 @@
1
- export interface ChatMessage {
2
- /** Role of the message sender */
3
- role: "user" | "assistant" | "system";
4
- /** Content of the message */
5
- content: string;
6
- /** Cached word count for performance */
7
- wordCount?: number;
8
- }
9
- /**
10
- * Defines the configuration for the ContextManager.
11
- * This allows for easy customization of the summarization behavior.
12
- */
13
- export interface ContextManagerConfig {
14
- highWaterMarkWords: number;
15
- lowWaterMarkWords: number;
16
- summarizationModel: string;
17
- summarizationProvider: string;
18
- getSummarizationPrompt: (history: ChatMessage[], wordLimit: number) => string;
19
- estimateWordCount: (history: ChatMessage[]) => number;
20
- }
@@ -1 +0,0 @@
1
- export {};
@@ -1,7 +0,0 @@
1
- import type { ChatMessage } from "./types.js";
2
- /**
3
- * Formats a chat history array into a single string for use in a prompt.
4
- * @param history The array of ChatMessage objects.
5
- * @returns A formatted string representing the conversation.
6
- */
7
- export declare function formatHistoryToString(history: ChatMessage[]): string;
@@ -1,8 +0,0 @@
1
- /**
2
- * Formats a chat history array into a single string for use in a prompt.
3
- * @param history The array of ChatMessage objects.
4
- * @returns A formatted string representing the conversation.
5
- */
6
- export function formatHistoryToString(history) {
7
- return history.map((msg) => `${msg.role}: ${msg.content}`).join("\n\n");
8
- }