@juspay/neurolink 7.43.0 → 7.45.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/CHANGELOG.md +12 -10
  2. package/dist/cli/loop/optionsSchema.d.ts +1 -1
  3. package/dist/core/factory.d.ts +3 -1
  4. package/dist/core/factory.js +5 -3
  5. package/dist/factories/providerFactory.d.ts +3 -3
  6. package/dist/factories/providerFactory.js +3 -3
  7. package/dist/factories/providerRegistry.js +6 -6
  8. package/dist/lib/core/factory.d.ts +3 -1
  9. package/dist/lib/core/factory.js +5 -3
  10. package/dist/lib/factories/providerFactory.d.ts +3 -3
  11. package/dist/lib/factories/providerFactory.js +3 -3
  12. package/dist/lib/factories/providerRegistry.js +6 -6
  13. package/dist/lib/memory/mem0Initializer.d.ts +44 -0
  14. package/dist/lib/memory/mem0Initializer.js +42 -0
  15. package/dist/lib/neurolink.d.ts +12 -0
  16. package/dist/lib/neurolink.js +161 -3
  17. package/dist/lib/providers/amazonBedrock.d.ts +2 -1
  18. package/dist/lib/providers/amazonBedrock.js +6 -4
  19. package/dist/lib/providers/amazonSagemaker.d.ts +1 -1
  20. package/dist/lib/providers/amazonSagemaker.js +2 -2
  21. package/dist/lib/providers/googleVertex.d.ts +1 -1
  22. package/dist/lib/providers/googleVertex.js +9 -10
  23. package/dist/lib/providers/sagemaker/config.d.ts +7 -5
  24. package/dist/lib/providers/sagemaker/config.js +11 -6
  25. package/dist/lib/types/conversation.d.ts +8 -0
  26. package/dist/lib/types/generateTypes.d.ts +2 -0
  27. package/dist/lib/types/streamTypes.d.ts +1 -0
  28. package/dist/memory/mem0Initializer.d.ts +44 -0
  29. package/dist/memory/mem0Initializer.js +42 -0
  30. package/dist/neurolink.d.ts +12 -0
  31. package/dist/neurolink.js +161 -3
  32. package/dist/providers/amazonBedrock.d.ts +2 -1
  33. package/dist/providers/amazonBedrock.js +6 -4
  34. package/dist/providers/amazonSagemaker.d.ts +1 -1
  35. package/dist/providers/amazonSagemaker.js +2 -2
  36. package/dist/providers/googleVertex.d.ts +1 -1
  37. package/dist/providers/googleVertex.js +9 -10
  38. package/dist/providers/sagemaker/config.d.ts +7 -5
  39. package/dist/providers/sagemaker/config.js +11 -6
  40. package/dist/types/conversation.d.ts +8 -0
  41. package/dist/types/generateTypes.d.ts +2 -0
  42. package/dist/types/streamTypes.d.ts +1 -0
  43. package/package.json +48 -45
package/dist/neurolink.js CHANGED
@@ -87,6 +87,40 @@ export class NeuroLink {
87
87
  enableOrchestration;
88
88
  // HITL (Human-in-the-Loop) support
89
89
  hitlManager;
90
+ // Mem0 memory instance and config for conversation context
91
+ mem0Instance;
92
+ mem0Config;
93
+ /**
94
+ * Simple sync config setup for mem0
95
+ */
96
+ initializeMem0Config() {
97
+ const config = this.conversationMemoryConfig?.conversationMemory;
98
+ if (!config?.mem0Enabled) {
99
+ return false;
100
+ }
101
+ this.mem0Config = config.mem0Config;
102
+ return true;
103
+ }
104
+ /**
105
+ * Async initialization called during generate/stream
106
+ */
107
+ async ensureMem0Ready() {
108
+ if (this.mem0Instance !== undefined) {
109
+ return this.mem0Instance;
110
+ }
111
+ if (!this.initializeMem0Config()) {
112
+ this.mem0Instance = null;
113
+ return null;
114
+ }
115
+ // Import and initialize from separate file
116
+ const { initializeMem0 } = await import("./memory/mem0Initializer.js");
117
+ if (!this.mem0Config) {
118
+ this.mem0Instance = null;
119
+ return null;
120
+ }
121
+ this.mem0Instance = await initializeMem0(this.mem0Config);
122
+ return this.mem0Instance;
123
+ }
90
124
  /**
91
125
  * Context storage for tool execution
92
126
  * This context will be merged with any runtime context passed by the AI model
@@ -309,6 +343,13 @@ export class NeuroLink {
309
343
  });
310
344
  }
311
345
  }
346
+ /** Format memory context for prompt inclusion */
347
+ formatMemoryContext(memoryContext, currentInput) {
348
+ return `Context from previous conversations:
349
+ ${memoryContext}
350
+
351
+ Current user's request: ${currentInput}`;
352
+ }
312
353
  /**
313
354
  * Set up HITL event forwarding to main emitter
314
355
  */
@@ -958,6 +999,31 @@ export class NeuroLink {
958
999
  if (!options.input?.text || typeof options.input.text !== "string") {
959
1000
  throw new Error("Input text is required and must be a non-empty string");
960
1001
  }
1002
+ if (this.conversationMemoryConfig?.conversationMemory?.mem0Enabled &&
1003
+ options.context?.userId) {
1004
+ try {
1005
+ const mem0 = await this.ensureMem0Ready();
1006
+ if (!mem0) {
1007
+ logger.debug("Mem0 not available, continuing without memory retrieval");
1008
+ }
1009
+ else {
1010
+ const memories = await mem0.search(options.input.text, {
1011
+ userId: options.context.userId,
1012
+ limit: 5,
1013
+ });
1014
+ if (memories?.results?.length > 0) {
1015
+ // Enhance the input with memory context
1016
+ const memoryContext = memories.results
1017
+ .map((m) => m.memory)
1018
+ .join("\n");
1019
+ options.input.text = this.formatMemoryContext(memoryContext, options.input.text);
1020
+ }
1021
+ }
1022
+ }
1023
+ catch (error) {
1024
+ logger.warn("Mem0 memory retrieval failed:", error);
1025
+ }
1026
+ }
961
1027
  const startTime = Date.now();
962
1028
  // Apply orchestration if enabled and no specific provider/model requested
963
1029
  if (this.enableOrchestration && !options.provider && !options.model) {
@@ -1016,6 +1082,7 @@ export class NeuroLink {
1016
1082
  evaluationDomain: options.evaluationDomain,
1017
1083
  toolUsageContext: options.toolUsageContext,
1018
1084
  input: options.input, // This includes text, images, and content arrays
1085
+ region: options.region,
1019
1086
  };
1020
1087
  // Apply factory enhancement using centralized utilities
1021
1088
  const textOptions = enhanceTextGenerationOptions(baseOptions, factoryResult);
@@ -1090,6 +1157,37 @@ export class NeuroLink {
1090
1157
  }
1091
1158
  : undefined,
1092
1159
  };
1160
+ if (this.conversationMemoryConfig?.conversationMemory?.mem0Enabled &&
1161
+ options.context?.userId &&
1162
+ generateResult.content) {
1163
+ // Non-blocking memory storage - run in background
1164
+ setImmediate(async () => {
1165
+ try {
1166
+ const mem0 = await this.ensureMem0Ready();
1167
+ if (mem0) {
1168
+ // Store complete conversation turn (user + AI messages)
1169
+ const conversationTurn = [
1170
+ { role: "user", content: options.input.text },
1171
+ { role: "system", content: generateResult.content },
1172
+ ];
1173
+ await mem0.add(JSON.stringify(conversationTurn), {
1174
+ userId: options.context?.userId,
1175
+ metadata: {
1176
+ timestamp: new Date().toISOString(),
1177
+ provider: generateResult.provider,
1178
+ model: generateResult.model,
1179
+ type: "conversation_turn",
1180
+ async_mode: true,
1181
+ },
1182
+ });
1183
+ }
1184
+ }
1185
+ catch (error) {
1186
+ // Non-blocking: Log error but don't fail the generation
1187
+ logger.warn("Mem0 memory storage failed:", error);
1188
+ }
1189
+ });
1190
+ }
1093
1191
  return generateResult;
1094
1192
  }
1095
1193
  /**
@@ -1313,7 +1411,8 @@ export class NeuroLink {
1313
1411
  const conversationMessages = await getConversationMessages(this.conversationMemory, options);
1314
1412
  // Create provider and generate
1315
1413
  const provider = await AIProviderFactory.createProvider(providerName, options.model, !options.disableTools, // Pass disableTools as inverse of enableMCP
1316
- this);
1414
+ this, // Pass SDK instance
1415
+ options.region);
1317
1416
  // ADD: Emit connection events for all providers (Bedrock-compatible)
1318
1417
  this.emitter.emit("connected");
1319
1418
  this.emitter.emit("message", `${providerName} provider initialized successfully`);
@@ -1423,7 +1522,8 @@ export class NeuroLink {
1423
1522
  // Get conversation messages for context
1424
1523
  const conversationMessages = await getConversationMessages(this.conversationMemory, options);
1425
1524
  const provider = await AIProviderFactory.createProvider(providerName, options.model, !options.disableTools, // Pass disableTools as inverse of enableMCP
1426
- this);
1525
+ this, // Pass SDK instance
1526
+ options.region);
1427
1527
  // ADD: Emit connection events for successful provider creation (Bedrock-compatible)
1428
1528
  this.emitter.emit("connected");
1429
1529
  this.emitter.emit("message", `${providerName} provider initialized successfully`);
@@ -1622,6 +1722,33 @@ export class NeuroLink {
1622
1722
  // Initialize MCP
1623
1723
  await this.initializeMCP();
1624
1724
  const _originalPrompt = options.input.text;
1725
+ if (this.conversationMemoryConfig?.conversationMemory?.mem0Enabled &&
1726
+ options.context?.userId) {
1727
+ try {
1728
+ const mem0 = await this.ensureMem0Ready();
1729
+ if (!mem0) {
1730
+ // Continue without memories if mem0 is not available
1731
+ logger.debug("Mem0 not available, continuing without memory retrieval");
1732
+ }
1733
+ else {
1734
+ const memories = await mem0.search(options.input.text, {
1735
+ userId: options.context.userId,
1736
+ limit: 5,
1737
+ });
1738
+ if (memories?.results?.length > 0) {
1739
+ // Enhance the input with memory context
1740
+ const memoryContext = memories.results
1741
+ .map((m) => m.memory)
1742
+ .join("\n");
1743
+ options.input.text = this.formatMemoryContext(memoryContext, options.input.text);
1744
+ }
1745
+ }
1746
+ }
1747
+ catch (error) {
1748
+ // Non-blocking: Log error but continue with streaming
1749
+ logger.warn("Mem0 memory retrieval failed:", error);
1750
+ }
1751
+ }
1625
1752
  // Apply orchestration if enabled and no specific provider/model requested
1626
1753
  if (this.enableOrchestration && !options.provider && !options.model) {
1627
1754
  try {
@@ -1687,6 +1814,36 @@ export class NeuroLink {
1687
1814
  });
1688
1815
  }
1689
1816
  }
1817
+ if (self.conversationMemoryConfig?.conversationMemory?.mem0Enabled &&
1818
+ enhancedOptions.context?.userId &&
1819
+ accumulatedContent.trim()) {
1820
+ // Non-blocking memory storage - run in background
1821
+ setImmediate(async () => {
1822
+ try {
1823
+ const mem0 = await self.ensureMem0Ready();
1824
+ if (mem0) {
1825
+ // Store complete conversation turn (user + AI messages)
1826
+ const conversationTurn = [
1827
+ { role: "user", content: originalPrompt },
1828
+ { role: "system", content: accumulatedContent.trim() },
1829
+ ];
1830
+ await mem0.add(JSON.stringify(conversationTurn), {
1831
+ userId: enhancedOptions.context?.userId,
1832
+ metadata: {
1833
+ timestamp: new Date().toISOString(),
1834
+ type: "conversation_turn_stream",
1835
+ userMessage: originalPrompt,
1836
+ async_mode: true,
1837
+ aiResponse: accumulatedContent.trim(),
1838
+ },
1839
+ });
1840
+ }
1841
+ }
1842
+ catch (error) {
1843
+ logger.warn("Mem0 memory storage failed:", error);
1844
+ }
1845
+ });
1846
+ }
1690
1847
  }
1691
1848
  })(this);
1692
1849
  const streamResult = await this.processStreamResult(mcpStream, enhancedOptions, factoryResult);
@@ -1743,7 +1900,8 @@ export class NeuroLink {
1743
1900
  // Simplified placeholder - in the actual implementation this would contain the complex MCP stream logic
1744
1901
  const providerName = await getBestProvider(options.provider);
1745
1902
  const provider = await AIProviderFactory.createProvider(providerName, options.model, !options.disableTools, // Pass disableTools as inverse of enableMCP
1746
- this);
1903
+ this, // Pass SDK instance
1904
+ options.region);
1747
1905
  // Enable tool execution for the provider using BaseProvider method
1748
1906
  provider.setupToolExecutor({
1749
1907
  customTools: this.getCustomTools(),
@@ -6,7 +6,8 @@ import type { NeuroLink } from "../neurolink.js";
6
6
  export declare class AmazonBedrockProvider extends BaseProvider {
7
7
  private bedrockClient;
8
8
  private conversationHistory;
9
- constructor(modelName?: string, neurolink?: NeuroLink);
9
+ private region;
10
+ constructor(modelName?: string, neurolink?: NeuroLink, region?: string);
10
11
  /**
11
12
  * Perform initial health check to catch credential/connectivity issues early
12
13
  * This prevents the health check failure we saw in production logs
@@ -7,8 +7,10 @@ import { convertZodToJsonSchema } from "../utils/schemaConversion.js";
7
7
  export class AmazonBedrockProvider extends BaseProvider {
8
8
  bedrockClient;
9
9
  conversationHistory = [];
10
- constructor(modelName, neurolink) {
10
+ region;
11
+ constructor(modelName, neurolink, region) {
11
12
  super(modelName, "bedrock", neurolink);
13
+ this.region = region || process.env.AWS_REGION || "us-east-1";
12
14
  logger.debug("[AmazonBedrockProvider] Starting constructor with extensive logging for debugging");
13
15
  // Log environment variables for debugging
14
16
  logger.debug(`[AmazonBedrockProvider] Environment check: AWS_REGION=${process.env.AWS_REGION || "undefined"}, AWS_ACCESS_KEY_ID=${process.env.AWS_ACCESS_KEY_ID ? "SET" : "undefined"}, AWS_SECRET_ACCESS_KEY=${process.env.AWS_SECRET_ACCESS_KEY ? "SET" : "undefined"}`);
@@ -17,14 +19,14 @@ export class AmazonBedrockProvider extends BaseProvider {
17
19
  // Absolutely no proxy interference - let AWS SDK handle everything natively
18
20
  logger.debug("[AmazonBedrockProvider] Creating BedrockRuntimeClient with clean configuration");
19
21
  this.bedrockClient = new BedrockRuntimeClient({
20
- region: process.env.AWS_REGION || "us-east-1",
22
+ region: this.region,
21
23
  // Clean configuration - AWS SDK will handle credentials via:
22
24
  // 1. IAM roles (preferred in production)
23
25
  // 2. Environment variables
24
26
  // 3. AWS config files
25
27
  // 4. Instance metadata
26
28
  });
27
- logger.debug(`[AmazonBedrockProvider] Successfully created BedrockRuntimeClient with model: ${this.modelName}, region: ${process.env.AWS_REGION || "us-east-1"}`);
29
+ logger.debug(`[AmazonBedrockProvider] Successfully created BedrockRuntimeClient with model: ${this.modelName}, region: ${this.region}`);
28
30
  // Immediate health check to catch credential issues early
29
31
  this.performInitialHealthCheck();
30
32
  }
@@ -39,7 +41,7 @@ export class AmazonBedrockProvider extends BaseProvider {
39
41
  */
40
42
  async performInitialHealthCheck() {
41
43
  const bedrockClient = new BedrockClient({
42
- region: process.env.AWS_REGION || "us-east-1",
44
+ region: this.region,
43
45
  });
44
46
  try {
45
47
  logger.debug("[AmazonBedrockProvider] Starting initial health check to validate credentials and connectivity");
@@ -16,7 +16,7 @@ export declare class AmazonSageMakerProvider extends BaseProvider {
16
16
  private sagemakerModel;
17
17
  private sagemakerConfig;
18
18
  private modelConfig;
19
- constructor(modelName?: string, endpointName?: string);
19
+ constructor(modelName?: string, endpointName?: string, region?: string);
20
20
  protected getProviderName(): AIProviderName;
21
21
  protected getDefaultModel(): string;
22
22
  protected getAISDKModel(): LanguageModelV1;
@@ -17,11 +17,11 @@ export class AmazonSageMakerProvider extends BaseProvider {
17
17
  sagemakerModel;
18
18
  sagemakerConfig;
19
19
  modelConfig;
20
- constructor(modelName, endpointName) {
20
+ constructor(modelName, endpointName, region) {
21
21
  super(modelName, "sagemaker");
22
22
  try {
23
23
  // Load and validate configuration
24
- this.sagemakerConfig = getSageMakerConfig();
24
+ this.sagemakerConfig = getSageMakerConfig(region);
25
25
  this.modelConfig = getSageMakerModelConfig(endpointName || getDefaultSageMakerEndpoint());
26
26
  // Create the proper LanguageModel (v2) implementation
27
27
  this.sagemakerModel = new SageMakerLanguageModel(this.modelName, this.sagemakerConfig, this.modelConfig);
@@ -25,7 +25,7 @@ export declare class GoogleVertexProvider extends BaseProvider {
25
25
  private static readonly MAX_CACHE_SIZE;
26
26
  private static maxTokensCache;
27
27
  private static maxTokensCacheTime;
28
- constructor(modelName?: string, _providerName?: string, sdk?: unknown);
28
+ constructor(modelName?: string, _providerName?: string, sdk?: unknown, region?: string);
29
29
  protected getProviderName(): AIProviderName;
30
30
  protected getDefaultModel(): string;
31
31
  /**
@@ -46,10 +46,10 @@ const hasGoogleCredentials = () => {
46
46
  process.env.GOOGLE_AUTH_PRIVATE_KEY));
47
47
  };
48
48
  // Enhanced Vertex settings creation with authentication fallback and proxy support
49
- const createVertexSettings = async () => {
49
+ const createVertexSettings = async (region) => {
50
50
  const baseSettings = {
51
51
  project: getVertexProjectId(),
52
- location: getVertexLocation(),
52
+ location: region || getVertexLocation(),
53
53
  fetch: createProxyFetch(),
54
54
  };
55
55
  // 🎯 OPTION 2: Create credentials file from environment variables at runtime
@@ -157,8 +157,7 @@ const createVertexSettings = async () => {
157
157
  private_key: requiredEnvVars.private_key.replace(/\\n/g, "\n"),
158
158
  client_email: requiredEnvVars.client_email,
159
159
  client_id: requiredEnvVars.client_id || "",
160
- auth_uri: requiredEnvVars.auth_uri ||
161
- "https://accounts.google.com/o/oauth2/auth",
160
+ auth_uri: requiredEnvVars.auth_uri || "https://accounts.google.com/o/oauth2/auth",
162
161
  token_uri: requiredEnvVars.token_uri || "https://oauth2.googleapis.com/token",
163
162
  auth_provider_x509_cert_url: requiredEnvVars.auth_provider_x509_cert_url ||
164
163
  "https://www.googleapis.com/oauth2/v1/certs",
@@ -199,8 +198,8 @@ const createVertexSettings = async () => {
199
198
  return baseSettings;
200
199
  };
201
200
  // Create Anthropic-specific Vertex settings with the same authentication and proxy support
202
- const createVertexAnthropicSettings = async () => {
203
- const baseVertexSettings = await createVertexSettings();
201
+ const createVertexAnthropicSettings = async (region) => {
202
+ const baseVertexSettings = await createVertexSettings(region);
204
203
  // GoogleVertexAnthropicProviderSettings extends GoogleVertexProviderSettings
205
204
  // so we can use the same settings with proper typing
206
205
  return {
@@ -241,7 +240,7 @@ export class GoogleVertexProvider extends BaseProvider {
241
240
  // Memory-managed cache for maxTokens handling decisions to optimize streaming performance
242
241
  static maxTokensCache = new Map();
243
242
  static maxTokensCacheTime = 0;
244
- constructor(modelName, _providerName, sdk) {
243
+ constructor(modelName, _providerName, sdk, region) {
245
244
  super(modelName, "vertex", sdk);
246
245
  // Validate Google Cloud credentials - now using consolidated utility
247
246
  if (!hasGoogleCredentials()) {
@@ -249,7 +248,7 @@ export class GoogleVertexProvider extends BaseProvider {
249
248
  }
250
249
  // Initialize Google Cloud configuration
251
250
  this.projectId = getVertexProjectId();
252
- this.location = getVertexLocation();
251
+ this.location = region || getVertexLocation();
253
252
  logger.debug("Google Vertex AI BaseProvider v2 initialized", {
254
253
  modelName: this.modelName,
255
254
  projectId: this.projectId,
@@ -360,7 +359,7 @@ export class GoogleVertexProvider extends BaseProvider {
360
359
  message: "Starting Vertex settings creation with network configuration analysis",
361
360
  });
362
361
  try {
363
- const vertexSettings = await createVertexSettings();
362
+ const vertexSettings = await createVertexSettings(this.location);
364
363
  const vertexSettingsEndTime = process.hrtime.bigint();
365
364
  const vertexSettingsDurationNs = vertexSettingsEndTime - vertexSettingsStartTime;
366
365
  logger.debug(`[GoogleVertexProvider] ✅ LOG_POINT_V009_VERTEX_SETTINGS_SUCCESS`, {
@@ -920,7 +919,7 @@ export class GoogleVertexProvider extends BaseProvider {
920
919
  projectId: projectValidation.projectId,
921
920
  region: projectValidation.region,
922
921
  });
923
- const vertexAnthropicSettings = await createVertexAnthropicSettings();
922
+ const vertexAnthropicSettings = await createVertexAnthropicSettings(this.location);
924
923
  // 7. Settings Validation
925
924
  if (!vertexAnthropicSettings.project ||
926
925
  !vertexAnthropicSettings.location) {
@@ -8,15 +8,17 @@ import type { SageMakerConfig, SageMakerModelConfig } from "./types.js";
8
8
  /**
9
9
  * Load and validate SageMaker configuration from environment variables
10
10
  *
11
- * Environment variable priority:
12
- * 1. SAGEMAKER_* variables (highest priority)
13
- * 2. AWS_* variables (standard AWS SDK variables)
14
- * 3. Default values (lowest priority)
11
+ * Region priority:
12
+ * 1. region parameter (highest priority)
13
+ * 2. SAGEMAKER_REGION environment variable
14
+ * 3. AWS_REGION environment variable
15
+ * 4. Default value "us-east-1" (lowest priority)
15
16
  *
17
+ * @param region - Optional region parameter override
16
18
  * @returns Validated SageMaker configuration
17
19
  * @throws {Error} When required configuration is missing or invalid
18
20
  */
19
- export declare function getSageMakerConfig(): SageMakerConfig;
21
+ export declare function getSageMakerConfig(region?: string): SageMakerConfig;
20
22
  /**
21
23
  * Load and validate SageMaker model configuration
22
24
  *
@@ -45,21 +45,26 @@ const modelConfigCache = new Map();
45
45
  /**
46
46
  * Load and validate SageMaker configuration from environment variables
47
47
  *
48
- * Environment variable priority:
49
- * 1. SAGEMAKER_* variables (highest priority)
50
- * 2. AWS_* variables (standard AWS SDK variables)
51
- * 3. Default values (lowest priority)
48
+ * Region priority:
49
+ * 1. region parameter (highest priority)
50
+ * 2. SAGEMAKER_REGION environment variable
51
+ * 3. AWS_REGION environment variable
52
+ * 4. Default value "us-east-1" (lowest priority)
52
53
  *
54
+ * @param region - Optional region parameter override
53
55
  * @returns Validated SageMaker configuration
54
56
  * @throws {Error} When required configuration is missing or invalid
55
57
  */
56
- export function getSageMakerConfig() {
58
+ export function getSageMakerConfig(region) {
57
59
  // Return cached config if available
58
60
  if (configCache) {
59
61
  return configCache;
60
62
  }
61
63
  const config = {
62
- region: process.env.SAGEMAKER_REGION || process.env.AWS_REGION || "us-east-1",
64
+ region: region ||
65
+ process.env.SAGEMAKER_REGION ||
66
+ process.env.AWS_REGION ||
67
+ "us-east-1",
63
68
  accessKeyId: process.env.AWS_ACCESS_KEY_ID || "",
64
69
  secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY || "",
65
70
  sessionToken: process.env.AWS_SESSION_TOKEN,
@@ -2,6 +2,10 @@
2
2
  * Conversation Memory Types for NeuroLink
3
3
  * Provides type-safe conversation storage and context management
4
4
  */
5
+ import type { MemoryConfig } from "mem0ai/oss";
6
+ /**
7
+ * Mem0 configuration interface matching mem0ai/oss MemoryConfig structure
8
+ */
5
9
  /**
6
10
  * Configuration for conversation memory feature
7
11
  */
@@ -22,6 +26,10 @@ export interface ConversationMemoryConfig {
22
26
  summarizationProvider?: string;
23
27
  /** Model to use for summarization */
24
28
  summarizationModel?: string;
29
+ /** Enable mem0 integration for conversation memory */
30
+ mem0Enabled?: boolean;
31
+ /** Configuration for mem0 integration */
32
+ mem0Config?: MemoryConfig;
25
33
  }
26
34
  /**
27
35
  * Complete memory for a conversation session
@@ -22,6 +22,7 @@ export type GenerateOptions = {
22
22
  };
23
23
  provider?: AIProviderName | string;
24
24
  model?: string;
25
+ region?: string;
25
26
  temperature?: number;
26
27
  maxTokens?: number;
27
28
  systemPrompt?: string;
@@ -143,6 +144,7 @@ export type TextGenerationOptions = {
143
144
  };
144
145
  provider?: AIProviderName;
145
146
  model?: string;
147
+ region?: string;
146
148
  temperature?: number;
147
149
  maxTokens?: number;
148
150
  systemPrompt?: string;
@@ -137,6 +137,7 @@ export interface StreamOptions {
137
137
  };
138
138
  provider?: AIProviderName | string;
139
139
  model?: string;
140
+ region?: string;
140
141
  temperature?: number;
141
142
  maxTokens?: number;
142
143
  systemPrompt?: string;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@juspay/neurolink",
3
- "version": "7.43.0",
3
+ "version": "7.45.0",
4
4
  "description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 9 major providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
5
5
  "author": {
6
6
  "name": "Juspay Technologies",
@@ -143,24 +143,24 @@
143
143
  },
144
144
  "dependencies": {
145
145
  "@ai-sdk/anthropic": "^1.2.12",
146
- "@ai-sdk/azure": "^1.3.24",
147
- "@ai-sdk/google": "^1.2.19",
148
- "@ai-sdk/google-vertex": "^2.2.0",
149
- "@ai-sdk/mistral": "^1.0.0",
150
- "@ai-sdk/openai": "^1.0.0",
146
+ "@ai-sdk/azure": "^1.3.25",
147
+ "@ai-sdk/google": "^1.2.22",
148
+ "@ai-sdk/google-vertex": "^2.2.27",
149
+ "@ai-sdk/mistral": "^1.2.8",
150
+ "@ai-sdk/openai": "^1.3.24",
151
151
  "@ai-sdk/provider": "^1.1.3",
152
152
  "@ai-sdk/provider-utils": "^2.2.8",
153
- "@aws-sdk/client-bedrock": "^3.876.0",
154
- "@aws-sdk/client-bedrock-runtime": "^3.876.0",
155
- "@aws-sdk/client-sagemaker": "^3.862.0",
156
- "@aws-sdk/client-sagemaker-runtime": "^3.862.0",
157
- "@aws-sdk/credential-provider-node": "^3.876.0",
153
+ "@aws-sdk/client-bedrock": "^3.886.0",
154
+ "@aws-sdk/client-bedrock-runtime": "^3.886.0",
155
+ "@aws-sdk/client-sagemaker": "^3.886.0",
156
+ "@aws-sdk/client-sagemaker-runtime": "^3.886.0",
157
+ "@aws-sdk/credential-provider-node": "^3.886.0",
158
158
  "@aws-sdk/types": "^3.862.0",
159
159
  "@google-cloud/vertexai": "^1.10.0",
160
- "@google/genai": "^1.16.0",
160
+ "@google/genai": "^1.19.0",
161
161
  "@google/generative-ai": "^0.24.1",
162
- "@huggingface/inference": "^2.8.0",
163
- "@modelcontextprotocol/sdk": "^1.13.0",
162
+ "@huggingface/inference": "^2.8.1",
163
+ "@modelcontextprotocol/sdk": "^1.17.5",
164
164
  "@opentelemetry/api": "^1.9.0",
165
165
  "@opentelemetry/auto-instrumentations-node": "^0.52.1",
166
166
  "@opentelemetry/exporter-logs-otlp-http": "^0.54.2",
@@ -174,20 +174,21 @@
174
174
  "@opentelemetry/sdk-logs": "^0.54.2",
175
175
  "@opentelemetry/sdk-metrics": "^1.30.1",
176
176
  "@opentelemetry/sdk-node": "^0.54.2",
177
- "@opentelemetry/semantic-conventions": "^1.34.0",
177
+ "@opentelemetry/semantic-conventions": "^1.37.0",
178
178
  "ai": "4.3.16",
179
- "chalk": "^5.3.0",
180
- "dotenv": "^16.5.0",
181
- "inquirer": "^9.2.15",
179
+ "chalk": "^5.6.2",
180
+ "dotenv": "^16.6.1",
181
+ "inquirer": "^9.3.7",
182
182
  "json-schema-to-zod": "^2.6.1",
183
- "mathjs": "^14.5.3",
183
+ "mathjs": "^14.7.0",
184
+ "mem0ai": "^2.1.38",
184
185
  "nanoid": "^5.1.5",
185
186
  "ollama-ai-provider": "^1.2.0",
186
187
  "ora": "^7.0.1",
187
188
  "p-limit": "^6.2.0",
188
189
  "reconnecting-eventsource": "^1.6.4",
189
190
  "redis": "^5.8.2",
190
- "undici": "^6.6.2",
191
+ "undici": "^6.21.3",
191
192
  "uuid": "^11.1.0",
192
193
  "ws": "^8.18.3",
193
194
  "yargs": "^17.7.2",
@@ -195,41 +196,41 @@
195
196
  "zod-to-json-schema": "^3.24.6"
196
197
  },
197
198
  "devDependencies": {
198
- "@biomejs/biome": "^2.1.4",
199
+ "@biomejs/biome": "^2.2.4",
199
200
  "@changesets/changelog-github": "^0.5.1",
200
- "@changesets/cli": "^2.26.2",
201
- "@eslint/js": "^9.0.0",
201
+ "@changesets/cli": "^2.29.7",
202
+ "@eslint/js": "^9.35.0",
202
203
  "@semantic-release/changelog": "^6.0.3",
203
- "@semantic-release/commit-analyzer": "^13.0.0",
204
+ "@semantic-release/commit-analyzer": "^13.0.1",
204
205
  "@semantic-release/git": "^10.0.1",
205
- "@semantic-release/github": "^11.0.0",
206
- "@semantic-release/npm": "^12.0.1",
207
- "@semantic-release/release-notes-generator": "^14.0.1",
208
- "@smithy/types": "^4.3.2",
209
- "@sveltejs/adapter-auto": "^6.0.0",
210
- "@sveltejs/kit": "^2.16.0",
211
- "@sveltejs/package": "^2.0.0",
212
- "@sveltejs/vite-plugin-svelte": "^5.0.0",
206
+ "@semantic-release/github": "^11.0.6",
207
+ "@semantic-release/npm": "^12.0.2",
208
+ "@semantic-release/release-notes-generator": "^14.1.0",
209
+ "@smithy/types": "^4.5.0",
210
+ "@sveltejs/adapter-auto": "^6.1.0",
211
+ "@sveltejs/kit": "^2.38.1",
212
+ "@sveltejs/package": "^2.5.0",
213
+ "@sveltejs/vite-plugin-svelte": "^5.1.1",
213
214
  "@types/cors": "^2.8.19",
214
215
  "@types/express": "^5.0.3",
215
- "@types/inquirer": "^9.0.7",
216
- "@types/node": "^20.0.0",
216
+ "@types/inquirer": "^9.0.9",
217
+ "@types/node": "^20.19.13",
217
218
  "@types/ws": "^8.18.1",
218
219
  "@types/yargs": "^17.0.33",
219
- "@typescript-eslint/eslint-plugin": "^8.0.0",
220
- "@typescript-eslint/parser": "^8.0.0",
220
+ "@typescript-eslint/eslint-plugin": "^8.43.0",
221
+ "@typescript-eslint/parser": "^8.43.0",
221
222
  "@vitest/coverage-v8": "^2.1.9",
222
223
  "conventional-changelog-conventionalcommits": "^9.1.0",
223
224
  "cors": "^2.8.5",
224
- "eslint": "^9.0.0",
225
+ "eslint": "^9.35.0",
225
226
  "express": "^5.1.0",
226
227
  "husky": "^9.1.7",
227
- "lint-staged": "^16.1.5",
228
- "playwright": "^1.52.0",
229
- "prettier": "^3.0.0",
230
- "publint": "^0.3.2",
231
- "puppeteer": "^24.10.0",
232
- "semantic-release": "^24.0.0",
228
+ "lint-staged": "^16.1.6",
229
+ "playwright": "^1.55.0",
230
+ "prettier": "^3.6.2",
231
+ "publint": "^0.3.12",
232
+ "puppeteer": "^24.20.0",
233
+ "semantic-release": "^24.2.8",
233
234
  "shell-quote": "^1.8.3",
234
235
  "svelte": "^5.0.0",
235
236
  "svelte-check": "^4.0.0",
@@ -280,13 +281,15 @@
280
281
  "onlyBuiltDependencies": [
281
282
  "esbuild",
282
283
  "protobufjs",
283
- "puppeteer"
284
+ "puppeteer",
285
+ "sqlite3"
284
286
  ],
285
287
  "overrides": {
286
288
  "esbuild@<=0.24.2": ">=0.25.0",
287
289
  "cookie@<0.7.0": ">=0.7.0",
288
290
  "@eslint/plugin-kit@<0.3.4": ">=0.3.4",
289
- "tmp@<=0.2.3": ">=0.2.4"
291
+ "tmp@<=0.2.3": ">=0.2.4",
292
+ "axios@<1.8.2": ">=1.8.2"
290
293
  }
291
294
  },
292
295
  "os": [