@juspay/neurolink 7.43.0 → 7.44.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,3 +1,9 @@
1
+ ## [7.44.0](https://github.com/juspay/neurolink/compare/v7.43.0...v7.44.0) (2025-09-24)
2
+
3
+ ### Features
4
+
5
+ - **(sdk):** Integrate mem0 for better context ([78edf08](https://github.com/juspay/neurolink/commit/78edf08467432988c968eb06f510f0198b253665))
6
+
1
7
  ## [7.43.0](https://github.com/juspay/neurolink/compare/v7.42.0...v7.43.0) (2025-09-23)
2
8
 
3
9
  ### Features
@@ -851,21 +857,18 @@ Co-authored-by: sachin.sharma <sachin.sharma@juspay.in>
851
857
  ### 🎯 Major Feature: Dynamic Model Configuration System
852
858
 
853
859
  - **⚡ Revolutionary Model Management**: Introduced dynamic model configuration system replacing static enums
854
-
855
860
  - **Self-Updating Models**: New models automatically available without code updates
856
861
  - **Cost Optimization**: Automatic selection of cheapest models for tasks
857
862
  - **Smart Resolution**: Fuzzy matching, aliases, and capability-based search
858
863
  - **Multi-Source Loading**: Configuration from API → GitHub → local with fallback
859
864
 
860
865
  - **💰 Cost Intelligence**: Built-in cost optimization and model selection algorithms
861
-
862
866
  - **Current Leader**: Gemini 2.0 Flash at $0.000075/1K input tokens
863
867
  - **Capability Mapping**: Find models by features (functionCalling, vision, code-execution)
864
868
  - **Real-Time Pricing**: Always current model costs and performance data
865
869
  - **Budget Controls**: Maximum price filtering and cost-aware selection
866
870
 
867
871
  - **🔧 Production-Ready Infrastructure**: Complete system with validation and monitoring
868
-
869
872
  - **Model Configuration Server**: REST API with search capabilities (`scripts/model-server.js`)
870
873
  - **Zod Schema Validation**: Type-safe runtime configuration validation
871
874
  - **Comprehensive Testing**: Full test suite for all dynamic model functionality
@@ -917,19 +920,16 @@ Co-authored-by: sachin.sharma <sachin.sharma@juspay.in>
917
920
  ### Bug Fixes - MCP System Restoration
918
921
 
919
922
  - **🔧 Fixed Built-in Tool Loading**: Resolved critical circular dependency issues preventing default tools from loading
920
-
921
923
  - **Root Cause**: Circular dependency between `config.ts` and `unified-registry.ts` preventing proper initialization
922
924
  - **Solution**: Implemented dynamic imports and restructured initialization chain
923
925
  - **Result**: Built-in tools restored from 0 → 3 tools (100% recovery rate)
924
926
 
925
927
  - **⏰ Fixed Time Tool Functionality**: Time tool now properly available and returns accurate real-time data
926
-
927
928
  - Fixed tool registration and execution pathway
928
929
  - Proper timezone handling and formatting
929
930
  - Verified accuracy against system time
930
931
 
931
932
  - **🔍 Enhanced External Tool Discovery**: 58+ external MCP tools now discoverable via comprehensive auto-discovery
932
-
933
933
  - Auto-discovery across VS Code, Claude Desktop, Cursor, Windsurf
934
934
  - Proper placeholder system for lazy activation
935
935
  - Unified registry integration
@@ -984,7 +984,6 @@ Co-authored-by: sachin.sharma <sachin.sharma@juspay.in>
984
984
  ### Features
985
985
 
986
986
  - **🛠️ Enhanced CLI with Ollama Commands**: New Ollama-specific management commands
987
-
988
987
  - `neurolink ollama list-models` - List installed local models
989
988
  - `neurolink ollama pull <model>` - Download models locally
990
989
  - `neurolink ollama remove <model>` - Remove installed models
@@ -1065,14 +1064,12 @@ neurolink generate-text "test" --debug
1065
1064
  ### Patch Changes
1066
1065
 
1067
1066
  - **🔧 Production-Ready CLI Logging System**: Fixed critical logging system for clean production output
1068
-
1069
1067
  - **Issue**: CLI showed excessive debug output during normal operation, breaking demo presentations
1070
1068
  - **Root Cause**: Mixed console.log statements bypassed conditional logger system
1071
1069
  - **Solution**: Systematic replacement of all console.log with logger.debug across codebase
1072
1070
  - **Impact**: **Clean CLI output by default** with conditional debug available via `NEUROLINK_DEBUG=true`
1073
1071
 
1074
1072
  - **🔄 Enhanced Provider Fallback Logic**: Fixed incomplete provider fallback coverage
1075
-
1076
1073
  - **Issue**: Provider fallback only attempted 4 of 6 providers (missing Anthropic & Azure)
1077
1074
  - **Root Cause**: Incomplete provider array in NeuroLink class fallback logic
1078
1075
  - **Solution**: Updated to include all 6 providers: `['openai', 'vertex', 'bedrock', 'anthropic', 'azure', 'google-ai']`
@@ -1116,7 +1113,6 @@ NEUROLINK_DEBUG=true node dist/cli/cli/index.js generate-text "test" --max-token
1116
1113
  ### Patch Changes
1117
1114
 
1118
1115
  - **🔧 Critical CLI Dependency Fix**: Removed peer dependencies to ensure zero-friction CLI usage
1119
-
1120
1116
  - **Issue**: CLI commands failed when provider-specific SDK packages were peer dependencies
1121
1117
  - **Root Cause**: `npx` doesn't install peer dependencies, causing missing module errors
1122
1118
  - **Solution**: Moved ALL AI provider SDKs to regular dependencies
@@ -0,0 +1,44 @@
1
+ /**
2
+ * Mem0 Memory Initializer
3
+ * Simple initialization logic for mem0ai/oss integration
4
+ */
5
+ import type { MemoryConfig } from "mem0ai/oss";
6
+ /**
7
+ * Interface for mem0 Memory instance methods based on actual mem0ai/oss API
8
+ */
9
+ export interface Mem0Memory {
10
+ search(query: string, config: {
11
+ userId?: string;
12
+ limit?: number;
13
+ }): Promise<{
14
+ results: Array<{
15
+ memory: string;
16
+ id: string;
17
+ }>;
18
+ }>;
19
+ add(messages: string, config: {
20
+ userId?: string;
21
+ metadata?: Record<string, unknown>;
22
+ }): Promise<{
23
+ results: Array<{
24
+ id: string;
25
+ memory: string;
26
+ }>;
27
+ }>;
28
+ get(memoryId: string): Promise<{
29
+ id: string;
30
+ memory: string;
31
+ } | null>;
32
+ update(memoryId: string, data: string): Promise<{
33
+ message: string;
34
+ }>;
35
+ delete(memoryId: string): Promise<{
36
+ message: string;
37
+ }>;
38
+ history(memoryId: string): Promise<unknown[]>;
39
+ reset(): Promise<void>;
40
+ }
41
+ /**
42
+ * Initialize mem0 memory instance with configuration
43
+ */
44
+ export declare function initializeMem0(mem0Config: MemoryConfig): Promise<Mem0Memory | null>;
@@ -0,0 +1,42 @@
1
+ /**
2
+ * Mem0 Memory Initializer
3
+ * Simple initialization logic for mem0ai/oss integration
4
+ */
5
+ import { Memory } from "mem0ai/oss";
6
+ import { logger } from "../utils/logger.js";
7
+ /**
8
+ * Initialize mem0 memory instance with configuration
9
+ */
10
+ export async function initializeMem0(mem0Config) {
11
+ logger.debug("[mem0Initializer] Starting mem0 initialization");
12
+ try {
13
+ // Create Memory instance
14
+ const memory = new Memory(mem0Config);
15
+ logger.info("[mem0Initializer] Mem0 initialized successfully");
16
+ return memory;
17
+ }
18
+ catch (error) {
19
+ logger.warn("[mem0Initializer] Failed to initialize mem0, using fallback", {
20
+ error: error instanceof Error ? error.message : String(error),
21
+ });
22
+ return createFallbackMemory();
23
+ }
24
+ }
25
+ /**
26
+ * Create fallback memory implementation
27
+ */
28
+ function createFallbackMemory() {
29
+ return {
30
+ search: async () => ({ results: [] }),
31
+ add: async () => ({ results: [] }),
32
+ get: async () => null,
33
+ update: async () => ({
34
+ message: "Fallback memory does not support updates",
35
+ }),
36
+ delete: async () => ({
37
+ message: "Fallback memory does not support deletion",
38
+ }),
39
+ history: async () => [],
40
+ reset: async () => { },
41
+ };
42
+ }
@@ -70,6 +70,16 @@ export declare class NeuroLink {
70
70
  private conversationMemoryConfig?;
71
71
  private enableOrchestration;
72
72
  private hitlManager?;
73
+ private mem0Instance?;
74
+ private mem0Config?;
75
+ /**
76
+ * Simple sync config setup for mem0
77
+ */
78
+ private initializeMem0Config;
79
+ /**
80
+ * Async initialization called during generate/stream
81
+ */
82
+ private ensureMem0Ready;
73
83
  /**
74
84
  * Context storage for tool execution
75
85
  * This context will be merged with any runtime context passed by the AI model
@@ -142,6 +152,8 @@ export declare class NeuroLink {
142
152
  * Initialize HITL (Human-in-the-Loop) if enabled
143
153
  */
144
154
  private initializeHITL;
155
+ /** Format memory context for prompt inclusion */
156
+ private formatMemoryContext;
145
157
  /**
146
158
  * Set up HITL event forwarding to main emitter
147
159
  */
@@ -87,6 +87,40 @@ export class NeuroLink {
87
87
  enableOrchestration;
88
88
  // HITL (Human-in-the-Loop) support
89
89
  hitlManager;
90
+ // Mem0 memory instance and config for conversation context
91
+ mem0Instance;
92
+ mem0Config;
93
+ /**
94
+ * Simple sync config setup for mem0
95
+ */
96
+ initializeMem0Config() {
97
+ const config = this.conversationMemoryConfig?.conversationMemory;
98
+ if (!config?.mem0Enabled) {
99
+ return false;
100
+ }
101
+ this.mem0Config = config.mem0Config;
102
+ return true;
103
+ }
104
+ /**
105
+ * Async initialization called during generate/stream
106
+ */
107
+ async ensureMem0Ready() {
108
+ if (this.mem0Instance !== undefined) {
109
+ return this.mem0Instance;
110
+ }
111
+ if (!this.initializeMem0Config()) {
112
+ this.mem0Instance = null;
113
+ return null;
114
+ }
115
+ // Import and initialize from separate file
116
+ const { initializeMem0 } = await import("./memory/mem0Initializer.js");
117
+ if (!this.mem0Config) {
118
+ this.mem0Instance = null;
119
+ return null;
120
+ }
121
+ this.mem0Instance = await initializeMem0(this.mem0Config);
122
+ return this.mem0Instance;
123
+ }
90
124
  /**
91
125
  * Context storage for tool execution
92
126
  * This context will be merged with any runtime context passed by the AI model
@@ -309,6 +343,13 @@ export class NeuroLink {
309
343
  });
310
344
  }
311
345
  }
346
+ /** Format memory context for prompt inclusion */
347
+ formatMemoryContext(memoryContext, currentInput) {
348
+ return `Context from previous conversations:
349
+ ${memoryContext}
350
+
351
+ Current user's request: ${currentInput}`;
352
+ }
312
353
  /**
313
354
  * Set up HITL event forwarding to main emitter
314
355
  */
@@ -958,6 +999,31 @@ export class NeuroLink {
958
999
  if (!options.input?.text || typeof options.input.text !== "string") {
959
1000
  throw new Error("Input text is required and must be a non-empty string");
960
1001
  }
1002
+ if (this.conversationMemoryConfig?.conversationMemory?.mem0Enabled &&
1003
+ options.context?.userId) {
1004
+ try {
1005
+ const mem0 = await this.ensureMem0Ready();
1006
+ if (!mem0) {
1007
+ logger.debug("Mem0 not available, continuing without memory retrieval");
1008
+ }
1009
+ else {
1010
+ const memories = await mem0.search(options.input.text, {
1011
+ userId: options.context.userId,
1012
+ limit: 5,
1013
+ });
1014
+ if (memories?.results?.length > 0) {
1015
+ // Enhance the input with memory context
1016
+ const memoryContext = memories.results
1017
+ .map((m) => m.memory)
1018
+ .join("\n");
1019
+ options.input.text = this.formatMemoryContext(memoryContext, options.input.text);
1020
+ }
1021
+ }
1022
+ }
1023
+ catch (error) {
1024
+ logger.warn("Mem0 memory retrieval failed:", error);
1025
+ }
1026
+ }
961
1027
  const startTime = Date.now();
962
1028
  // Apply orchestration if enabled and no specific provider/model requested
963
1029
  if (this.enableOrchestration && !options.provider && !options.model) {
@@ -1090,6 +1156,37 @@ export class NeuroLink {
1090
1156
  }
1091
1157
  : undefined,
1092
1158
  };
1159
+ if (this.conversationMemoryConfig?.conversationMemory?.mem0Enabled &&
1160
+ options.context?.userId &&
1161
+ generateResult.content) {
1162
+ // Non-blocking memory storage - run in background
1163
+ setImmediate(async () => {
1164
+ try {
1165
+ const mem0 = await this.ensureMem0Ready();
1166
+ if (mem0) {
1167
+ // Store complete conversation turn (user + AI messages)
1168
+ const conversationTurn = [
1169
+ { role: "user", content: options.input.text },
1170
+ { role: "system", content: generateResult.content },
1171
+ ];
1172
+ await mem0.add(JSON.stringify(conversationTurn), {
1173
+ userId: options.context?.userId,
1174
+ metadata: {
1175
+ timestamp: new Date().toISOString(),
1176
+ provider: generateResult.provider,
1177
+ model: generateResult.model,
1178
+ type: "conversation_turn",
1179
+ async_mode: true,
1180
+ },
1181
+ });
1182
+ }
1183
+ }
1184
+ catch (error) {
1185
+ // Non-blocking: Log error but don't fail the generation
1186
+ logger.warn("Mem0 memory storage failed:", error);
1187
+ }
1188
+ });
1189
+ }
1093
1190
  return generateResult;
1094
1191
  }
1095
1192
  /**
@@ -1622,6 +1719,33 @@ export class NeuroLink {
1622
1719
  // Initialize MCP
1623
1720
  await this.initializeMCP();
1624
1721
  const _originalPrompt = options.input.text;
1722
+ if (this.conversationMemoryConfig?.conversationMemory?.mem0Enabled &&
1723
+ options.context?.userId) {
1724
+ try {
1725
+ const mem0 = await this.ensureMem0Ready();
1726
+ if (!mem0) {
1727
+ // Continue without memories if mem0 is not available
1728
+ logger.debug("Mem0 not available, continuing without memory retrieval");
1729
+ }
1730
+ else {
1731
+ const memories = await mem0.search(options.input.text, {
1732
+ userId: options.context.userId,
1733
+ limit: 5,
1734
+ });
1735
+ if (memories?.results?.length > 0) {
1736
+ // Enhance the input with memory context
1737
+ const memoryContext = memories.results
1738
+ .map((m) => m.memory)
1739
+ .join("\n");
1740
+ options.input.text = this.formatMemoryContext(memoryContext, options.input.text);
1741
+ }
1742
+ }
1743
+ }
1744
+ catch (error) {
1745
+ // Non-blocking: Log error but continue with streaming
1746
+ logger.warn("Mem0 memory retrieval failed:", error);
1747
+ }
1748
+ }
1625
1749
  // Apply orchestration if enabled and no specific provider/model requested
1626
1750
  if (this.enableOrchestration && !options.provider && !options.model) {
1627
1751
  try {
@@ -1687,6 +1811,36 @@ export class NeuroLink {
1687
1811
  });
1688
1812
  }
1689
1813
  }
1814
+ if (self.conversationMemoryConfig?.conversationMemory?.mem0Enabled &&
1815
+ enhancedOptions.context?.userId &&
1816
+ accumulatedContent.trim()) {
1817
+ // Non-blocking memory storage - run in background
1818
+ setImmediate(async () => {
1819
+ try {
1820
+ const mem0 = await self.ensureMem0Ready();
1821
+ if (mem0) {
1822
+ // Store complete conversation turn (user + AI messages)
1823
+ const conversationTurn = [
1824
+ { role: "user", content: originalPrompt },
1825
+ { role: "system", content: accumulatedContent.trim() },
1826
+ ];
1827
+ await mem0.add(JSON.stringify(conversationTurn), {
1828
+ userId: enhancedOptions.context?.userId,
1829
+ metadata: {
1830
+ timestamp: new Date().toISOString(),
1831
+ type: "conversation_turn_stream",
1832
+ userMessage: originalPrompt,
1833
+ async_mode: true,
1834
+ aiResponse: accumulatedContent.trim(),
1835
+ },
1836
+ });
1837
+ }
1838
+ }
1839
+ catch (error) {
1840
+ logger.warn("Mem0 memory storage failed:", error);
1841
+ }
1842
+ });
1843
+ }
1690
1844
  }
1691
1845
  })(this);
1692
1846
  const streamResult = await this.processStreamResult(mcpStream, enhancedOptions, factoryResult);
@@ -2,6 +2,10 @@
2
2
  * Conversation Memory Types for NeuroLink
3
3
  * Provides type-safe conversation storage and context management
4
4
  */
5
+ import type { MemoryConfig } from "mem0ai/oss";
6
+ /**
7
+ * Mem0 configuration interface matching mem0ai/oss MemoryConfig structure
8
+ */
5
9
  /**
6
10
  * Configuration for conversation memory feature
7
11
  */
@@ -22,6 +26,10 @@ export interface ConversationMemoryConfig {
22
26
  summarizationProvider?: string;
23
27
  /** Model to use for summarization */
24
28
  summarizationModel?: string;
29
+ /** Enable mem0 integration for conversation memory */
30
+ mem0Enabled?: boolean;
31
+ /** Configuration for mem0 integration */
32
+ mem0Config?: MemoryConfig;
25
33
  }
26
34
  /**
27
35
  * Complete memory for a conversation session
@@ -0,0 +1,44 @@
1
+ /**
2
+ * Mem0 Memory Initializer
3
+ * Simple initialization logic for mem0ai/oss integration
4
+ */
5
+ import type { MemoryConfig } from "mem0ai/oss";
6
+ /**
7
+ * Interface for mem0 Memory instance methods based on actual mem0ai/oss API
8
+ */
9
+ export interface Mem0Memory {
10
+ search(query: string, config: {
11
+ userId?: string;
12
+ limit?: number;
13
+ }): Promise<{
14
+ results: Array<{
15
+ memory: string;
16
+ id: string;
17
+ }>;
18
+ }>;
19
+ add(messages: string, config: {
20
+ userId?: string;
21
+ metadata?: Record<string, unknown>;
22
+ }): Promise<{
23
+ results: Array<{
24
+ id: string;
25
+ memory: string;
26
+ }>;
27
+ }>;
28
+ get(memoryId: string): Promise<{
29
+ id: string;
30
+ memory: string;
31
+ } | null>;
32
+ update(memoryId: string, data: string): Promise<{
33
+ message: string;
34
+ }>;
35
+ delete(memoryId: string): Promise<{
36
+ message: string;
37
+ }>;
38
+ history(memoryId: string): Promise<unknown[]>;
39
+ reset(): Promise<void>;
40
+ }
41
+ /**
42
+ * Initialize mem0 memory instance with configuration
43
+ */
44
+ export declare function initializeMem0(mem0Config: MemoryConfig): Promise<Mem0Memory | null>;
@@ -0,0 +1,42 @@
1
+ /**
2
+ * Mem0 Memory Initializer
3
+ * Simple initialization logic for mem0ai/oss integration
4
+ */
5
+ import { Memory } from "mem0ai/oss";
6
+ import { logger } from "../utils/logger.js";
7
+ /**
8
+ * Initialize mem0 memory instance with configuration
9
+ */
10
+ export async function initializeMem0(mem0Config) {
11
+ logger.debug("[mem0Initializer] Starting mem0 initialization");
12
+ try {
13
+ // Create Memory instance
14
+ const memory = new Memory(mem0Config);
15
+ logger.info("[mem0Initializer] Mem0 initialized successfully");
16
+ return memory;
17
+ }
18
+ catch (error) {
19
+ logger.warn("[mem0Initializer] Failed to initialize mem0, using fallback", {
20
+ error: error instanceof Error ? error.message : String(error),
21
+ });
22
+ return createFallbackMemory();
23
+ }
24
+ }
25
+ /**
26
+ * Create fallback memory implementation
27
+ */
28
+ function createFallbackMemory() {
29
+ return {
30
+ search: async () => ({ results: [] }),
31
+ add: async () => ({ results: [] }),
32
+ get: async () => null,
33
+ update: async () => ({
34
+ message: "Fallback memory does not support updates",
35
+ }),
36
+ delete: async () => ({
37
+ message: "Fallback memory does not support deletion",
38
+ }),
39
+ history: async () => [],
40
+ reset: async () => { },
41
+ };
42
+ }
@@ -70,6 +70,16 @@ export declare class NeuroLink {
70
70
  private conversationMemoryConfig?;
71
71
  private enableOrchestration;
72
72
  private hitlManager?;
73
+ private mem0Instance?;
74
+ private mem0Config?;
75
+ /**
76
+ * Simple sync config setup for mem0
77
+ */
78
+ private initializeMem0Config;
79
+ /**
80
+ * Async initialization called during generate/stream
81
+ */
82
+ private ensureMem0Ready;
73
83
  /**
74
84
  * Context storage for tool execution
75
85
  * This context will be merged with any runtime context passed by the AI model
@@ -142,6 +152,8 @@ export declare class NeuroLink {
142
152
  * Initialize HITL (Human-in-the-Loop) if enabled
143
153
  */
144
154
  private initializeHITL;
155
+ /** Format memory context for prompt inclusion */
156
+ private formatMemoryContext;
145
157
  /**
146
158
  * Set up HITL event forwarding to main emitter
147
159
  */
package/dist/neurolink.js CHANGED
@@ -87,6 +87,40 @@ export class NeuroLink {
87
87
  enableOrchestration;
88
88
  // HITL (Human-in-the-Loop) support
89
89
  hitlManager;
90
+ // Mem0 memory instance and config for conversation context
91
+ mem0Instance;
92
+ mem0Config;
93
+ /**
94
+ * Simple sync config setup for mem0
95
+ */
96
+ initializeMem0Config() {
97
+ const config = this.conversationMemoryConfig?.conversationMemory;
98
+ if (!config?.mem0Enabled) {
99
+ return false;
100
+ }
101
+ this.mem0Config = config.mem0Config;
102
+ return true;
103
+ }
104
+ /**
105
+ * Async initialization called during generate/stream
106
+ */
107
+ async ensureMem0Ready() {
108
+ if (this.mem0Instance !== undefined) {
109
+ return this.mem0Instance;
110
+ }
111
+ if (!this.initializeMem0Config()) {
112
+ this.mem0Instance = null;
113
+ return null;
114
+ }
115
+ // Import and initialize from separate file
116
+ const { initializeMem0 } = await import("./memory/mem0Initializer.js");
117
+ if (!this.mem0Config) {
118
+ this.mem0Instance = null;
119
+ return null;
120
+ }
121
+ this.mem0Instance = await initializeMem0(this.mem0Config);
122
+ return this.mem0Instance;
123
+ }
90
124
  /**
91
125
  * Context storage for tool execution
92
126
  * This context will be merged with any runtime context passed by the AI model
@@ -309,6 +343,13 @@ export class NeuroLink {
309
343
  });
310
344
  }
311
345
  }
346
+ /** Format memory context for prompt inclusion */
347
+ formatMemoryContext(memoryContext, currentInput) {
348
+ return `Context from previous conversations:
349
+ ${memoryContext}
350
+
351
+ Current user's request: ${currentInput}`;
352
+ }
312
353
  /**
313
354
  * Set up HITL event forwarding to main emitter
314
355
  */
@@ -958,6 +999,31 @@ export class NeuroLink {
958
999
  if (!options.input?.text || typeof options.input.text !== "string") {
959
1000
  throw new Error("Input text is required and must be a non-empty string");
960
1001
  }
1002
+ if (this.conversationMemoryConfig?.conversationMemory?.mem0Enabled &&
1003
+ options.context?.userId) {
1004
+ try {
1005
+ const mem0 = await this.ensureMem0Ready();
1006
+ if (!mem0) {
1007
+ logger.debug("Mem0 not available, continuing without memory retrieval");
1008
+ }
1009
+ else {
1010
+ const memories = await mem0.search(options.input.text, {
1011
+ userId: options.context.userId,
1012
+ limit: 5,
1013
+ });
1014
+ if (memories?.results?.length > 0) {
1015
+ // Enhance the input with memory context
1016
+ const memoryContext = memories.results
1017
+ .map((m) => m.memory)
1018
+ .join("\n");
1019
+ options.input.text = this.formatMemoryContext(memoryContext, options.input.text);
1020
+ }
1021
+ }
1022
+ }
1023
+ catch (error) {
1024
+ logger.warn("Mem0 memory retrieval failed:", error);
1025
+ }
1026
+ }
961
1027
  const startTime = Date.now();
962
1028
  // Apply orchestration if enabled and no specific provider/model requested
963
1029
  if (this.enableOrchestration && !options.provider && !options.model) {
@@ -1090,6 +1156,37 @@ export class NeuroLink {
1090
1156
  }
1091
1157
  : undefined,
1092
1158
  };
1159
+ if (this.conversationMemoryConfig?.conversationMemory?.mem0Enabled &&
1160
+ options.context?.userId &&
1161
+ generateResult.content) {
1162
+ // Non-blocking memory storage - run in background
1163
+ setImmediate(async () => {
1164
+ try {
1165
+ const mem0 = await this.ensureMem0Ready();
1166
+ if (mem0) {
1167
+ // Store complete conversation turn (user + AI messages)
1168
+ const conversationTurn = [
1169
+ { role: "user", content: options.input.text },
1170
+ { role: "system", content: generateResult.content },
1171
+ ];
1172
+ await mem0.add(JSON.stringify(conversationTurn), {
1173
+ userId: options.context?.userId,
1174
+ metadata: {
1175
+ timestamp: new Date().toISOString(),
1176
+ provider: generateResult.provider,
1177
+ model: generateResult.model,
1178
+ type: "conversation_turn",
1179
+ async_mode: true,
1180
+ },
1181
+ });
1182
+ }
1183
+ }
1184
+ catch (error) {
1185
+ // Non-blocking: Log error but don't fail the generation
1186
+ logger.warn("Mem0 memory storage failed:", error);
1187
+ }
1188
+ });
1189
+ }
1093
1190
  return generateResult;
1094
1191
  }
1095
1192
  /**
@@ -1622,6 +1719,33 @@ export class NeuroLink {
1622
1719
  // Initialize MCP
1623
1720
  await this.initializeMCP();
1624
1721
  const _originalPrompt = options.input.text;
1722
+ if (this.conversationMemoryConfig?.conversationMemory?.mem0Enabled &&
1723
+ options.context?.userId) {
1724
+ try {
1725
+ const mem0 = await this.ensureMem0Ready();
1726
+ if (!mem0) {
1727
+ // Continue without memories if mem0 is not available
1728
+ logger.debug("Mem0 not available, continuing without memory retrieval");
1729
+ }
1730
+ else {
1731
+ const memories = await mem0.search(options.input.text, {
1732
+ userId: options.context.userId,
1733
+ limit: 5,
1734
+ });
1735
+ if (memories?.results?.length > 0) {
1736
+ // Enhance the input with memory context
1737
+ const memoryContext = memories.results
1738
+ .map((m) => m.memory)
1739
+ .join("\n");
1740
+ options.input.text = this.formatMemoryContext(memoryContext, options.input.text);
1741
+ }
1742
+ }
1743
+ }
1744
+ catch (error) {
1745
+ // Non-blocking: Log error but continue with streaming
1746
+ logger.warn("Mem0 memory retrieval failed:", error);
1747
+ }
1748
+ }
1625
1749
  // Apply orchestration if enabled and no specific provider/model requested
1626
1750
  if (this.enableOrchestration && !options.provider && !options.model) {
1627
1751
  try {
@@ -1687,6 +1811,36 @@ export class NeuroLink {
1687
1811
  });
1688
1812
  }
1689
1813
  }
1814
+ if (self.conversationMemoryConfig?.conversationMemory?.mem0Enabled &&
1815
+ enhancedOptions.context?.userId &&
1816
+ accumulatedContent.trim()) {
1817
+ // Non-blocking memory storage - run in background
1818
+ setImmediate(async () => {
1819
+ try {
1820
+ const mem0 = await self.ensureMem0Ready();
1821
+ if (mem0) {
1822
+ // Store complete conversation turn (user + AI messages)
1823
+ const conversationTurn = [
1824
+ { role: "user", content: originalPrompt },
1825
+ { role: "system", content: accumulatedContent.trim() },
1826
+ ];
1827
+ await mem0.add(JSON.stringify(conversationTurn), {
1828
+ userId: enhancedOptions.context?.userId,
1829
+ metadata: {
1830
+ timestamp: new Date().toISOString(),
1831
+ type: "conversation_turn_stream",
1832
+ userMessage: originalPrompt,
1833
+ async_mode: true,
1834
+ aiResponse: accumulatedContent.trim(),
1835
+ },
1836
+ });
1837
+ }
1838
+ }
1839
+ catch (error) {
1840
+ logger.warn("Mem0 memory storage failed:", error);
1841
+ }
1842
+ });
1843
+ }
1690
1844
  }
1691
1845
  })(this);
1692
1846
  const streamResult = await this.processStreamResult(mcpStream, enhancedOptions, factoryResult);
@@ -2,6 +2,10 @@
2
2
  * Conversation Memory Types for NeuroLink
3
3
  * Provides type-safe conversation storage and context management
4
4
  */
5
+ import type { MemoryConfig } from "mem0ai/oss";
6
+ /**
7
+ * Mem0 configuration interface matching mem0ai/oss MemoryConfig structure
8
+ */
5
9
  /**
6
10
  * Configuration for conversation memory feature
7
11
  */
@@ -22,6 +26,10 @@ export interface ConversationMemoryConfig {
22
26
  summarizationProvider?: string;
23
27
  /** Model to use for summarization */
24
28
  summarizationModel?: string;
29
+ /** Enable mem0 integration for conversation memory */
30
+ mem0Enabled?: boolean;
31
+ /** Configuration for mem0 integration */
32
+ mem0Config?: MemoryConfig;
25
33
  }
26
34
  /**
27
35
  * Complete memory for a conversation session
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@juspay/neurolink",
3
- "version": "7.43.0",
3
+ "version": "7.44.0",
4
4
  "description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 9 major providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
5
5
  "author": {
6
6
  "name": "Juspay Technologies",
@@ -143,24 +143,24 @@
143
143
  },
144
144
  "dependencies": {
145
145
  "@ai-sdk/anthropic": "^1.2.12",
146
- "@ai-sdk/azure": "^1.3.24",
147
- "@ai-sdk/google": "^1.2.19",
148
- "@ai-sdk/google-vertex": "^2.2.0",
149
- "@ai-sdk/mistral": "^1.0.0",
150
- "@ai-sdk/openai": "^1.0.0",
146
+ "@ai-sdk/azure": "^1.3.25",
147
+ "@ai-sdk/google": "^1.2.22",
148
+ "@ai-sdk/google-vertex": "^2.2.27",
149
+ "@ai-sdk/mistral": "^1.2.8",
150
+ "@ai-sdk/openai": "^1.3.24",
151
151
  "@ai-sdk/provider": "^1.1.3",
152
152
  "@ai-sdk/provider-utils": "^2.2.8",
153
- "@aws-sdk/client-bedrock": "^3.876.0",
154
- "@aws-sdk/client-bedrock-runtime": "^3.876.0",
155
- "@aws-sdk/client-sagemaker": "^3.862.0",
156
- "@aws-sdk/client-sagemaker-runtime": "^3.862.0",
157
- "@aws-sdk/credential-provider-node": "^3.876.0",
153
+ "@aws-sdk/client-bedrock": "^3.886.0",
154
+ "@aws-sdk/client-bedrock-runtime": "^3.886.0",
155
+ "@aws-sdk/client-sagemaker": "^3.886.0",
156
+ "@aws-sdk/client-sagemaker-runtime": "^3.886.0",
157
+ "@aws-sdk/credential-provider-node": "^3.886.0",
158
158
  "@aws-sdk/types": "^3.862.0",
159
159
  "@google-cloud/vertexai": "^1.10.0",
160
- "@google/genai": "^1.16.0",
160
+ "@google/genai": "^1.19.0",
161
161
  "@google/generative-ai": "^0.24.1",
162
- "@huggingface/inference": "^2.8.0",
163
- "@modelcontextprotocol/sdk": "^1.13.0",
162
+ "@huggingface/inference": "^2.8.1",
163
+ "@modelcontextprotocol/sdk": "^1.17.5",
164
164
  "@opentelemetry/api": "^1.9.0",
165
165
  "@opentelemetry/auto-instrumentations-node": "^0.52.1",
166
166
  "@opentelemetry/exporter-logs-otlp-http": "^0.54.2",
@@ -174,20 +174,21 @@
174
174
  "@opentelemetry/sdk-logs": "^0.54.2",
175
175
  "@opentelemetry/sdk-metrics": "^1.30.1",
176
176
  "@opentelemetry/sdk-node": "^0.54.2",
177
- "@opentelemetry/semantic-conventions": "^1.34.0",
177
+ "@opentelemetry/semantic-conventions": "^1.37.0",
178
178
  "ai": "4.3.16",
179
- "chalk": "^5.3.0",
180
- "dotenv": "^16.5.0",
181
- "inquirer": "^9.2.15",
179
+ "chalk": "^5.6.2",
180
+ "dotenv": "^16.6.1",
181
+ "inquirer": "^9.3.7",
182
182
  "json-schema-to-zod": "^2.6.1",
183
- "mathjs": "^14.5.3",
183
+ "mathjs": "^14.7.0",
184
+ "mem0ai": "^2.1.38",
184
185
  "nanoid": "^5.1.5",
185
186
  "ollama-ai-provider": "^1.2.0",
186
187
  "ora": "^7.0.1",
187
188
  "p-limit": "^6.2.0",
188
189
  "reconnecting-eventsource": "^1.6.4",
189
190
  "redis": "^5.8.2",
190
- "undici": "^6.6.2",
191
+ "undici": "^6.21.3",
191
192
  "uuid": "^11.1.0",
192
193
  "ws": "^8.18.3",
193
194
  "yargs": "^17.7.2",
@@ -195,41 +196,41 @@
195
196
  "zod-to-json-schema": "^3.24.6"
196
197
  },
197
198
  "devDependencies": {
198
- "@biomejs/biome": "^2.1.4",
199
+ "@biomejs/biome": "^2.2.4",
199
200
  "@changesets/changelog-github": "^0.5.1",
200
- "@changesets/cli": "^2.26.2",
201
- "@eslint/js": "^9.0.0",
201
+ "@changesets/cli": "^2.29.7",
202
+ "@eslint/js": "^9.35.0",
202
203
  "@semantic-release/changelog": "^6.0.3",
203
- "@semantic-release/commit-analyzer": "^13.0.0",
204
+ "@semantic-release/commit-analyzer": "^13.0.1",
204
205
  "@semantic-release/git": "^10.0.1",
205
- "@semantic-release/github": "^11.0.0",
206
- "@semantic-release/npm": "^12.0.1",
207
- "@semantic-release/release-notes-generator": "^14.0.1",
208
- "@smithy/types": "^4.3.2",
209
- "@sveltejs/adapter-auto": "^6.0.0",
210
- "@sveltejs/kit": "^2.16.0",
211
- "@sveltejs/package": "^2.0.0",
212
- "@sveltejs/vite-plugin-svelte": "^5.0.0",
206
+ "@semantic-release/github": "^11.0.6",
207
+ "@semantic-release/npm": "^12.0.2",
208
+ "@semantic-release/release-notes-generator": "^14.1.0",
209
+ "@smithy/types": "^4.5.0",
210
+ "@sveltejs/adapter-auto": "^6.1.0",
211
+ "@sveltejs/kit": "^2.38.1",
212
+ "@sveltejs/package": "^2.5.0",
213
+ "@sveltejs/vite-plugin-svelte": "^5.1.1",
213
214
  "@types/cors": "^2.8.19",
214
215
  "@types/express": "^5.0.3",
215
- "@types/inquirer": "^9.0.7",
216
- "@types/node": "^20.0.0",
216
+ "@types/inquirer": "^9.0.9",
217
+ "@types/node": "^20.19.13",
217
218
  "@types/ws": "^8.18.1",
218
219
  "@types/yargs": "^17.0.33",
219
- "@typescript-eslint/eslint-plugin": "^8.0.0",
220
- "@typescript-eslint/parser": "^8.0.0",
220
+ "@typescript-eslint/eslint-plugin": "^8.43.0",
221
+ "@typescript-eslint/parser": "^8.43.0",
221
222
  "@vitest/coverage-v8": "^2.1.9",
222
223
  "conventional-changelog-conventionalcommits": "^9.1.0",
223
224
  "cors": "^2.8.5",
224
- "eslint": "^9.0.0",
225
+ "eslint": "^9.35.0",
225
226
  "express": "^5.1.0",
226
227
  "husky": "^9.1.7",
227
- "lint-staged": "^16.1.5",
228
- "playwright": "^1.52.0",
229
- "prettier": "^3.0.0",
230
- "publint": "^0.3.2",
231
- "puppeteer": "^24.10.0",
232
- "semantic-release": "^24.0.0",
228
+ "lint-staged": "^16.1.6",
229
+ "playwright": "^1.55.0",
230
+ "prettier": "^3.6.2",
231
+ "publint": "^0.3.12",
232
+ "puppeteer": "^24.20.0",
233
+ "semantic-release": "^24.2.8",
233
234
  "shell-quote": "^1.8.3",
234
235
  "svelte": "^5.0.0",
235
236
  "svelte-check": "^4.0.0",
@@ -280,13 +281,15 @@
280
281
  "onlyBuiltDependencies": [
281
282
  "esbuild",
282
283
  "protobufjs",
283
- "puppeteer"
284
+ "puppeteer",
285
+ "sqlite3"
284
286
  ],
285
287
  "overrides": {
286
288
  "esbuild@<=0.24.2": ">=0.25.0",
287
289
  "cookie@<0.7.0": ">=0.7.0",
288
290
  "@eslint/plugin-kit@<0.3.4": ">=0.3.4",
289
- "tmp@<=0.2.3": ">=0.2.4"
291
+ "tmp@<=0.2.3": ">=0.2.4",
292
+ "axios@<1.8.2": ">=1.8.2"
290
293
  }
291
294
  },
292
295
  "os": [