@juspay/neurolink 8.3.0 → 8.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/README.md +1 -0
  3. package/dist/adapters/providerImageAdapter.d.ts +1 -1
  4. package/dist/adapters/providerImageAdapter.js +62 -0
  5. package/dist/agent/directTools.d.ts +0 -72
  6. package/dist/agent/directTools.js +3 -74
  7. package/dist/cli/commands/config.d.ts +18 -18
  8. package/dist/cli/factories/commandFactory.js +1 -0
  9. package/dist/constants/enums.d.ts +1 -0
  10. package/dist/constants/enums.js +3 -1
  11. package/dist/constants/tokens.d.ts +3 -0
  12. package/dist/constants/tokens.js +3 -0
  13. package/dist/core/baseProvider.d.ts +56 -53
  14. package/dist/core/baseProvider.js +107 -1095
  15. package/dist/core/constants.d.ts +3 -0
  16. package/dist/core/constants.js +6 -3
  17. package/dist/core/modelConfiguration.js +10 -0
  18. package/dist/core/modules/GenerationHandler.d.ts +63 -0
  19. package/dist/core/modules/GenerationHandler.js +230 -0
  20. package/dist/core/modules/MessageBuilder.d.ts +39 -0
  21. package/dist/core/modules/MessageBuilder.js +179 -0
  22. package/dist/core/modules/StreamHandler.d.ts +52 -0
  23. package/dist/core/modules/StreamHandler.js +103 -0
  24. package/dist/core/modules/TelemetryHandler.d.ts +64 -0
  25. package/dist/core/modules/TelemetryHandler.js +170 -0
  26. package/dist/core/modules/ToolsManager.d.ts +98 -0
  27. package/dist/core/modules/ToolsManager.js +521 -0
  28. package/dist/core/modules/Utilities.d.ts +88 -0
  29. package/dist/core/modules/Utilities.js +329 -0
  30. package/dist/factories/providerRegistry.js +1 -1
  31. package/dist/lib/adapters/providerImageAdapter.d.ts +1 -1
  32. package/dist/lib/adapters/providerImageAdapter.js +62 -0
  33. package/dist/lib/agent/directTools.d.ts +0 -72
  34. package/dist/lib/agent/directTools.js +3 -74
  35. package/dist/lib/constants/enums.d.ts +1 -0
  36. package/dist/lib/constants/enums.js +3 -1
  37. package/dist/lib/constants/tokens.d.ts +3 -0
  38. package/dist/lib/constants/tokens.js +3 -0
  39. package/dist/lib/core/baseProvider.d.ts +56 -53
  40. package/dist/lib/core/baseProvider.js +107 -1095
  41. package/dist/lib/core/constants.d.ts +3 -0
  42. package/dist/lib/core/constants.js +6 -3
  43. package/dist/lib/core/modelConfiguration.js +10 -0
  44. package/dist/lib/core/modules/GenerationHandler.d.ts +63 -0
  45. package/dist/lib/core/modules/GenerationHandler.js +231 -0
  46. package/dist/lib/core/modules/MessageBuilder.d.ts +39 -0
  47. package/dist/lib/core/modules/MessageBuilder.js +180 -0
  48. package/dist/lib/core/modules/StreamHandler.d.ts +52 -0
  49. package/dist/lib/core/modules/StreamHandler.js +104 -0
  50. package/dist/lib/core/modules/TelemetryHandler.d.ts +64 -0
  51. package/dist/lib/core/modules/TelemetryHandler.js +171 -0
  52. package/dist/lib/core/modules/ToolsManager.d.ts +98 -0
  53. package/dist/lib/core/modules/ToolsManager.js +522 -0
  54. package/dist/lib/core/modules/Utilities.d.ts +88 -0
  55. package/dist/lib/core/modules/Utilities.js +330 -0
  56. package/dist/lib/factories/providerRegistry.js +1 -1
  57. package/dist/lib/mcp/servers/agent/directToolsServer.js +0 -1
  58. package/dist/lib/memory/mem0Initializer.d.ts +32 -1
  59. package/dist/lib/memory/mem0Initializer.js +55 -2
  60. package/dist/lib/models/modelRegistry.js +44 -0
  61. package/dist/lib/neurolink.d.ts +1 -1
  62. package/dist/lib/neurolink.js +43 -10
  63. package/dist/lib/providers/amazonBedrock.js +59 -10
  64. package/dist/lib/providers/anthropic.js +2 -30
  65. package/dist/lib/providers/azureOpenai.js +2 -24
  66. package/dist/lib/providers/googleAiStudio.js +2 -24
  67. package/dist/lib/providers/googleVertex.js +2 -45
  68. package/dist/lib/providers/huggingFace.js +3 -31
  69. package/dist/lib/providers/litellm.d.ts +1 -1
  70. package/dist/lib/providers/litellm.js +110 -44
  71. package/dist/lib/providers/mistral.js +5 -32
  72. package/dist/lib/providers/ollama.d.ts +1 -0
  73. package/dist/lib/providers/ollama.js +476 -129
  74. package/dist/lib/providers/openAI.js +2 -28
  75. package/dist/lib/providers/openaiCompatible.js +3 -31
  76. package/dist/lib/types/content.d.ts +16 -113
  77. package/dist/lib/types/content.js +16 -2
  78. package/dist/lib/types/conversation.d.ts +3 -17
  79. package/dist/lib/types/generateTypes.d.ts +2 -2
  80. package/dist/lib/types/index.d.ts +2 -0
  81. package/dist/lib/types/index.js +2 -0
  82. package/dist/lib/types/multimodal.d.ts +282 -0
  83. package/dist/lib/types/multimodal.js +101 -0
  84. package/dist/lib/types/streamTypes.d.ts +2 -2
  85. package/dist/lib/utils/imageProcessor.d.ts +1 -1
  86. package/dist/lib/utils/messageBuilder.js +25 -2
  87. package/dist/lib/utils/multimodalOptionsBuilder.d.ts +1 -1
  88. package/dist/lib/utils/pdfProcessor.d.ts +9 -0
  89. package/dist/lib/utils/pdfProcessor.js +67 -9
  90. package/dist/mcp/servers/agent/directToolsServer.js +0 -1
  91. package/dist/memory/mem0Initializer.d.ts +32 -1
  92. package/dist/memory/mem0Initializer.js +55 -2
  93. package/dist/models/modelRegistry.js +44 -0
  94. package/dist/neurolink.d.ts +1 -1
  95. package/dist/neurolink.js +43 -10
  96. package/dist/providers/amazonBedrock.js +59 -10
  97. package/dist/providers/anthropic.js +2 -30
  98. package/dist/providers/azureOpenai.js +2 -24
  99. package/dist/providers/googleAiStudio.js +2 -24
  100. package/dist/providers/googleVertex.js +2 -45
  101. package/dist/providers/huggingFace.js +3 -31
  102. package/dist/providers/litellm.d.ts +1 -1
  103. package/dist/providers/litellm.js +110 -44
  104. package/dist/providers/mistral.js +5 -32
  105. package/dist/providers/ollama.d.ts +1 -0
  106. package/dist/providers/ollama.js +476 -129
  107. package/dist/providers/openAI.js +2 -28
  108. package/dist/providers/openaiCompatible.js +3 -31
  109. package/dist/types/content.d.ts +16 -113
  110. package/dist/types/content.js +16 -2
  111. package/dist/types/conversation.d.ts +3 -17
  112. package/dist/types/generateTypes.d.ts +2 -2
  113. package/dist/types/index.d.ts +2 -0
  114. package/dist/types/index.js +2 -0
  115. package/dist/types/multimodal.d.ts +282 -0
  116. package/dist/types/multimodal.js +100 -0
  117. package/dist/types/streamTypes.d.ts +2 -2
  118. package/dist/utils/imageProcessor.d.ts +1 -1
  119. package/dist/utils/messageBuilder.js +25 -2
  120. package/dist/utils/multimodalOptionsBuilder.d.ts +1 -1
  121. package/dist/utils/pdfProcessor.d.ts +9 -0
  122. package/dist/utils/pdfProcessor.js +67 -9
  123. package/package.json +5 -2
@@ -0,0 +1,101 @@
1
+ /**
2
+ * Multimodal Content Types for NeuroLink
3
+ *
4
+ * Central registry for all multimodal input/output types.
5
+ * This file consolidates types from content.ts and conversation.ts
6
+ * to provide a single source of truth for multimodal functionality.
7
+ *
8
+ * @module types/multimodal
9
+ *
10
+ * @example Basic Multimodal Input
11
+ * ```typescript
12
+ * import type { MultimodalInput } from './types/multimodal.js';
13
+ *
14
+ * const input: MultimodalInput = {
15
+ * text: "What's in this image?",
16
+ * images: [imageBuffer, "https://example.com/image.jpg"],
17
+ * pdfFiles: [pdfBuffer]
18
+ * };
19
+ * ```
20
+ *
21
+ * @example Audio/Video Input (Future)
22
+ * ```typescript
23
+ * const avInput: MultimodalInput = {
24
+ * text: "Transcribe this audio and analyze this video",
25
+ * audioFiles: [audioBuffer],
26
+ * videoFiles: ["path/to/video.mp4"]
27
+ * };
28
+ * ```
29
+ *
30
+ * @example Advanced Content Array
31
+ * ```typescript
32
+ * const advanced: MultimodalInput = {
33
+ * text: "irrelevant", // ignored when content[] is provided
34
+ * content: [
35
+ * { type: "text", text: "Analyze these items:" },
36
+ * { type: "image", data: imageBuffer, mediaType: "image/jpeg" },
37
+ * { type: "pdf", data: pdfBuffer, metadata: { filename: "report.pdf" } }
38
+ * ]
39
+ * };
40
+ * ```
41
+ */
42
+ // ============================================
43
+ // TYPE GUARDS
44
+ // ============================================
45
+ /**
46
+ * Type guard to check if content is TextContent
47
+ */
48
+ export function isTextContent(content) {
49
+ return content.type === "text";
50
+ }
51
+ /**
52
+ * Type guard to check if content is ImageContent
53
+ */
54
+ export function isImageContent(content) {
55
+ return content.type === "image";
56
+ }
57
+ /**
58
+ * Type guard to check if content is CSVContent
59
+ */
60
+ export function isCSVContent(content) {
61
+ return content.type === "csv";
62
+ }
63
+ /**
64
+ * Type guard to check if content is PDFContent
65
+ */
66
+ export function isPDFContent(content) {
67
+ return content.type === "pdf";
68
+ }
69
+ /**
70
+ * Type guard to check if content is AudioContent
71
+ */
72
+ export function isAudioContent(content) {
73
+ return content.type === "audio";
74
+ }
75
+ /**
76
+ * Type guard to check if content is VideoContent
77
+ */
78
+ export function isVideoContent(content) {
79
+ return content.type === "video";
80
+ }
81
+ /**
82
+ * Type guard to check if input contains multimodal content
83
+ * Now includes audio and video detection
84
+ */
85
+ export function isMultimodalInput(input) {
86
+ const maybeInput = input;
87
+ return !!(maybeInput?.images?.length ||
88
+ maybeInput?.csvFiles?.length ||
89
+ maybeInput?.pdfFiles?.length ||
90
+ maybeInput?.files?.length ||
91
+ maybeInput?.content?.length ||
92
+ maybeInput?.audioFiles?.length ||
93
+ maybeInput?.videoFiles?.length);
94
+ }
95
+ /**
96
+ * Type guard to check if message content is multimodal (array)
97
+ */
98
+ export function isMultimodalMessageContent(content) {
99
+ return Array.isArray(content);
100
+ }
101
+ //# sourceMappingURL=multimodal.js.map
@@ -1,7 +1,7 @@
1
1
  import type { Tool } from "ai";
2
2
  import type { ValidationSchema, StandardRecord } from "./typeAliases.js";
3
3
  import type { AIModelProviderConfig } from "./providers.js";
4
- import type { TextContent, ImageContent } from "./content.js";
4
+ import type { Content } from "./content.js";
5
5
  import type { AnalyticsData, ToolExecutionEvent, ToolExecutionSummary } from "../types/index.js";
6
6
  import { AIProviderName } from "../constants/enums.js";
7
7
  import type { TokenUsage } from "./analytics.js";
@@ -129,7 +129,7 @@ export type StreamOptions = {
129
129
  csvFiles?: Array<Buffer | string>;
130
130
  pdfFiles?: Array<Buffer | string>;
131
131
  files?: Array<Buffer | string>;
132
- content?: Array<TextContent | ImageContent>;
132
+ content?: Content[];
133
133
  };
134
134
  output?: {
135
135
  format?: "text" | "structured" | "json";
@@ -2,7 +2,7 @@
2
2
  * Image processing utilities for multimodal support
3
3
  * Handles format conversion for different AI providers
4
4
  */
5
- import type { ProcessedImage } from "../types/content.js";
5
+ import type { ProcessedImage } from "../types/multimodal.js";
6
6
  import type { FileProcessingResult } from "../types/fileTypes.js";
7
7
  /**
8
8
  * Image processor class for handling provider-specific image formatting
@@ -473,6 +473,26 @@ export async function buildMultimodalMessagesArray(options, provider, model) {
473
473
  if (hasConversationHistory) {
474
474
  systemPrompt = `${systemPrompt.trim()}${CONVERSATION_INSTRUCTIONS}`;
475
475
  }
476
+ // Add file handling guidance when multimodal files are present
477
+ const hasCSVFiles = (options.input.csvFiles && options.input.csvFiles.length > 0) ||
478
+ (options.input.files &&
479
+ options.input.files.some((f) => typeof f === "string" ? f.toLowerCase().endsWith(".csv") : false));
480
+ const hasPDFFiles = pdfFiles.length > 0;
481
+ if (hasCSVFiles || hasPDFFiles) {
482
+ const fileTypes = [];
483
+ if (hasPDFFiles) {
484
+ fileTypes.push("PDFs");
485
+ }
486
+ if (hasCSVFiles) {
487
+ fileTypes.push("CSVs");
488
+ }
489
+ systemPrompt += `\n\nIMPORTANT FILE HANDLING INSTRUCTIONS:
490
+ - File content (${fileTypes.join(", ")}, images) is already processed and included in this message
491
+ - DO NOT use GitHub tools (get_file_contents, search_code, etc.) for local files - they only work for remote repository files
492
+ - Analyze the provided file content directly without attempting to fetch or read files using tools
493
+ - GitHub MCP tools are ONLY for remote repository operations, not local filesystem access
494
+ - Use the file content shown in this message for your analysis`;
495
+ }
476
496
  // Add system message if we have one
477
497
  if (systemPrompt.trim()) {
478
498
  messages.push({
@@ -739,7 +759,10 @@ async function convertMultimodalToProviderFormat(text, images, pdfFiles, provide
739
759
  });
740
760
  }
741
761
  }
742
- // Add PDFs using Vercel AI SDK standard format (works for all providers)
762
+ // Add PDFs using Vercel AI SDK standard format (works for all providers except Mistral)
763
+ // NOTE: Mistral API has a fundamental limitation - it does NOT support PDFs in any form.
764
+ // The API strictly requires image content to start with data:image/, rejecting data:application/pdf
765
+ // See: MISTRAL_PDF_FIX_SUMMARY.md for full investigation details
743
766
  content.push(...pdfFiles.map((pdf) => {
744
767
  logger.info(`[PDF] ✅ Added to content (Vercel AI SDK format): ${pdf.filename}`);
745
768
  return {
@@ -769,6 +792,6 @@ function extractFilename(file, index = 0) {
769
792
  return `file-${index + 1}`;
770
793
  }
771
794
  function buildCSVToolInstructions(filePath) {
772
- return `\n**IMPORTANT**: For counting, aggregation, or statistical operations, use the analyzeCSV tool with filePath="${filePath}". The tool reads the file directly - do NOT pass CSV content.\n\nExample: analyzeCSV(filePath="${filePath}", operation="count_by_column", column="merchant_id")\n\n`;
795
+ return `\n**NOTE**: You can perform calculations directly on the CSV data shown above. For advanced operations on the full file (counting by column, grouping, etc.), you may optionally use the analyzeCSV tool with filePath="${filePath}".\n\nExample: analyzeCSV(filePath="${filePath}", operation="count_by_column", column="merchant_id")\n\n`;
773
796
  }
774
797
  //# sourceMappingURL=messageBuilder.js.map
@@ -45,7 +45,7 @@ export declare function buildMultimodalOptions(options: StreamOptions, providerN
45
45
  input: {
46
46
  text: string;
47
47
  images: (string | Buffer<ArrayBufferLike>)[] | undefined;
48
- content: (import("../types/content.js").TextContent | import("../types/content.js").ImageContent)[] | undefined;
48
+ content: import("../types/multimodal.js").Content[] | undefined;
49
49
  files: (string | Buffer<ArrayBufferLike>)[] | undefined;
50
50
  csvFiles: (string | Buffer<ArrayBufferLike>)[] | undefined;
51
51
  pdfFiles: (string | Buffer<ArrayBufferLike>)[] | undefined;
@@ -7,4 +7,13 @@ export declare class PDFProcessor {
7
7
  private static isValidPDF;
8
8
  private static extractBasicMetadata;
9
9
  static estimateTokens(pageCount: number, mode?: "text-only" | "visual"): number;
10
+ static convertPDFToImages(pdfBuffer: Buffer, options?: {
11
+ maxPages?: number;
12
+ scale?: number;
13
+ format?: "png" | "jpeg";
14
+ quality?: number;
15
+ }): Promise<Array<{
16
+ buffer: Buffer;
17
+ pageNumber: number;
18
+ }>>;
10
19
  }
@@ -1,4 +1,6 @@
1
1
  import { logger } from "./logger.js";
2
+ import * as pdfjs from "pdfjs-dist/legacy/build/pdf.mjs";
3
+ import { createCanvas } from "canvas";
2
4
  const PDF_PROVIDER_CONFIGS = {
3
5
  anthropic: {
4
6
  maxSizeMB: 5,
@@ -87,7 +89,7 @@ const PDF_PROVIDER_CONFIGS = {
87
89
  mistral: {
88
90
  maxSizeMB: 10,
89
91
  maxPages: 100,
90
- supportsNative: true,
92
+ supportsNative: false,
91
93
  requiresCitations: false,
92
94
  apiType: "files-api",
93
95
  },
@@ -115,16 +117,14 @@ export class PDFProcessor {
115
117
  if (!this.isValidPDF(content)) {
116
118
  throw new Error("Invalid PDF file format. File must start with %PDF- header.");
117
119
  }
118
- if (!config || !config.supportsNative) {
119
- const supportedProviders = Object.keys(PDF_PROVIDER_CONFIGS)
120
- .filter((p) => PDF_PROVIDER_CONFIGS[p].supportsNative)
121
- .join(", ");
122
- throw new Error(`PDF files are not currently supported with ${provider} provider.\n` +
123
- `Supported providers: ${supportedProviders}\n` +
120
+ if (!config) {
121
+ const supportedProviders = Object.keys(PDF_PROVIDER_CONFIGS).join(", ");
122
+ throw new Error(`PDF files are not configured for ${provider} provider.\n` +
123
+ `Configured providers: ${supportedProviders}\n` +
124
124
  `Current provider: ${provider}\n\n` +
125
125
  `Options:\n` +
126
- `1. Switch to a supported provider (--provider openai or --provider vertex)\n` +
127
- `2. Convert your PDF to text manually`);
126
+ `1. Switch to a configured provider (--provider openai or --provider vertex)\n` +
127
+ `2. Contact support to add ${provider} PDF configuration`);
128
128
  }
129
129
  const sizeMB = content.length / (1024 * 1024);
130
130
  if (sizeMB > config.maxSizeMB) {
@@ -195,5 +195,63 @@ export class PDFProcessor {
195
195
  return Math.ceil((pageCount / 3) * 7000);
196
196
  }
197
197
  }
198
+ static async convertPDFToImages(pdfBuffer, options) {
199
+ const maxPages = options?.maxPages || 10;
200
+ const scale = options?.scale || 2.0;
201
+ const format = options?.format || "png";
202
+ const quality = options?.quality || 0.9;
203
+ let pdfDocument = null;
204
+ try {
205
+ const loadingTask = pdfjs.getDocument({
206
+ data: new Uint8Array(pdfBuffer),
207
+ useSystemFonts: true,
208
+ standardFontDataUrl: `https://cdn.jsdelivr.net/npm/pdfjs-dist@${pdfjs.version}/standard_fonts/`,
209
+ });
210
+ pdfDocument = await loadingTask.promise;
211
+ const numPages = Math.min(pdfDocument.numPages, maxPages);
212
+ const images = [];
213
+ logger.info(`[PDF→Image] Converting ${numPages} page(s) from PDF (total: ${pdfDocument.numPages})`);
214
+ for (let pageNum = 1; pageNum <= numPages; pageNum++) {
215
+ const page = await pdfDocument.getPage(pageNum);
216
+ const viewport = page.getViewport({ scale });
217
+ const canvas = createCanvas(viewport.width, viewport.height);
218
+ const context = canvas.getContext("2d");
219
+ await page.render({
220
+ canvasContext: context,
221
+ viewport,
222
+ // @ts-expect-error - canvas type mismatch between node-canvas and pdfjs-dist
223
+ canvas: canvas,
224
+ }).promise;
225
+ const imageBuffer = format === "png"
226
+ ? canvas.toBuffer("image/png")
227
+ : canvas.toBuffer("image/jpeg", { quality });
228
+ images.push({ buffer: imageBuffer, pageNumber: pageNum });
229
+ logger.debug(`[PDF→Image] ✅ Converted page ${pageNum}/${numPages} (${(imageBuffer.length / 1024).toFixed(1)}KB)`);
230
+ }
231
+ if (pdfDocument.numPages > maxPages) {
232
+ logger.warn(`[PDF→Image] PDF has ${pdfDocument.numPages} pages, converted only first ${maxPages} pages`);
233
+ }
234
+ logger.info(`[PDF→Image] ✅ Successfully converted ${images.length} page(s) to images`);
235
+ return images;
236
+ }
237
+ catch (error) {
238
+ logger.error(`[PDF→Image] ❌ Failed to convert PDF to images:`, error instanceof Error ? error.message : String(error));
239
+ throw new Error(`PDF to image conversion failed: ${error instanceof Error ? error.message : String(error)}`);
240
+ }
241
+ finally {
242
+ // Ensure pdfDocument is destroyed regardless of success or failure
243
+ if (pdfDocument) {
244
+ try {
245
+ pdfDocument.destroy();
246
+ logger.debug("[PDF→Image] PDF document resources cleaned up");
247
+ }
248
+ catch (destroyError) {
249
+ logger.warn("[PDF→Image] Error destroying PDF document:", destroyError instanceof Error
250
+ ? destroyError.message
251
+ : String(destroyError));
252
+ }
253
+ }
254
+ }
255
+ }
198
256
  }
199
257
  //# sourceMappingURL=pdfProcessor.js.map
@@ -135,7 +135,6 @@ function getToolCategory(toolName) {
135
135
  case "readFile":
136
136
  case "writeFile":
137
137
  case "listDirectory":
138
- case "searchFiles":
139
138
  return "filesystem";
140
139
  case "websearchGrounding":
141
140
  return "search";
@@ -8,8 +8,39 @@ import { MemoryClient } from "mem0ai";
8
8
  */
9
9
  export interface Mem0Config {
10
10
  apiKey: string;
11
+ /**
12
+ * Optional organization ID - if not provided, will be auto-populated from ping() response
13
+ */
14
+ organizationId?: string;
15
+ /**
16
+ * Optional project ID - if not provided, will be auto-populated from ping() response
17
+ */
18
+ projectId?: string;
19
+ /**
20
+ * Whether to update project-level custom instructions during initialization
21
+ * Default: false (don't update project settings)
22
+ *
23
+ * Note: organizationId and projectId are NOT required - they will be auto-populated
24
+ * from the mem0 API via ping() if not provided
25
+ */
26
+ updateProjectSettings?: boolean;
27
+ /**
28
+ * Custom instructions and categories for mem0 extraction behavior
29
+ * Only used if updateProjectSettings is true
30
+ */
31
+ customPrompts?: {
32
+ /**
33
+ * Custom instructions for how mem0 should extract and store memories
34
+ * This applies to ALL memories added to the project
35
+ */
36
+ custom_instructions?: string;
37
+ /**
38
+ * Custom categories for organizing memories
39
+ */
40
+ custom_categories?: Array<Record<string, unknown>>;
41
+ };
11
42
  }
12
43
  /**
13
- * Initialize mem0 memory instance with cloud API
44
+ * Initialize mem0 memory instance with cloud API and optional project settings
14
45
  */
15
46
  export declare function initializeMem0(mem0Config: Mem0Config): Promise<MemoryClient | null>;
@@ -5,7 +5,7 @@
5
5
  import { MemoryClient } from "mem0ai";
6
6
  import { logger } from "../utils/logger.js";
7
7
  /**
8
- * Initialize mem0 memory instance with cloud API
8
+ * Initialize mem0 memory instance with cloud API and optional project settings
9
9
  */
10
10
  export async function initializeMem0(mem0Config) {
11
11
  // Guard: skip initialization if API key is missing
@@ -18,8 +18,61 @@ export async function initializeMem0(mem0Config) {
18
18
  // Create MemoryClient instance with cloud API
19
19
  const client = new MemoryClient({
20
20
  apiKey: mem0Config.apiKey,
21
+ organizationId: mem0Config.organizationId,
22
+ projectId: mem0Config.projectId,
23
+ });
24
+ // Track whether project settings were actually updated (not just requested)
25
+ let projectSettingsUpdated = false;
26
+ // Update project-level settings if requested
27
+ if (mem0Config.updateProjectSettings && mem0Config.customPrompts) {
28
+ // Build update payload - only include fields that are actually provided
29
+ const updatePayload = {};
30
+ if (mem0Config.customPrompts.custom_instructions &&
31
+ mem0Config.customPrompts.custom_instructions.trim() !== "") {
32
+ updatePayload.custom_instructions =
33
+ mem0Config.customPrompts.custom_instructions;
34
+ }
35
+ if (Array.isArray(mem0Config.customPrompts.custom_categories) &&
36
+ mem0Config.customPrompts.custom_categories.length > 0) {
37
+ updatePayload.custom_categories =
38
+ mem0Config.customPrompts.custom_categories;
39
+ }
40
+ // Only proceed if there's something to update
41
+ if (Object.keys(updatePayload).length > 0) {
42
+ try {
43
+ // Note: updateProject() internally calls ping() first, which auto-populates
44
+ // organizationId and projectId from the server, so they're not required
45
+ await client.updateProject(updatePayload);
46
+ projectSettingsUpdated = true; // Only set to true on successful update
47
+ logger.info("[mem0Initializer] Project settings updated successfully", {
48
+ hasInstructions: !!updatePayload.custom_instructions,
49
+ hasCategories: !!updatePayload.custom_categories,
50
+ // Note: These IDs are auto-populated by ping() inside updateProject()
51
+ organizationId: client.organizationId,
52
+ projectId: client.projectId,
53
+ });
54
+ }
55
+ catch (error) {
56
+ logger.warn("[mem0Initializer] Failed to update project settings", {
57
+ error: error instanceof Error ? error.message : String(error),
58
+ hint: "Ensure your MEM0_API_KEY has permission to update project settings",
59
+ });
60
+ // Continue initialization even if project update fails
61
+ // projectSettingsUpdated remains false
62
+ }
63
+ }
64
+ else {
65
+ logger.warn("[mem0Initializer] updateProjectSettings=true but no custom instructions or categories provided - nothing to update");
66
+ }
67
+ }
68
+ else if (mem0Config.updateProjectSettings && !mem0Config.customPrompts) {
69
+ logger.warn("[mem0Initializer] updateProjectSettings=true but customPrompts not provided - nothing to update");
70
+ }
71
+ logger.info("[mem0Initializer] Mem0 cloud API initialized successfully", {
72
+ hasOrgId: !!client.organizationId,
73
+ hasProjectId: !!client.projectId,
74
+ projectSettingsUpdated,
21
75
  });
22
- logger.info("[mem0Initializer] Mem0 cloud API initialized successfully");
23
76
  return client;
24
77
  }
25
78
  catch (error) {
@@ -188,6 +188,50 @@ export const MODEL_REGISTRY = {
188
188
  category: "general",
189
189
  },
190
190
  // Anthropic Models
191
+ [AnthropicModels.CLAUDE_4_5_HAIKU]: {
192
+ id: AnthropicModels.CLAUDE_4_5_HAIKU,
193
+ name: "Claude 4.5 Haiku",
194
+ provider: AIProviderName.ANTHROPIC,
195
+ description: "Latest fast and efficient Claude model with vision support",
196
+ capabilities: {
197
+ vision: true,
198
+ functionCalling: true,
199
+ codeGeneration: true,
200
+ reasoning: true,
201
+ multimodal: true,
202
+ streaming: true,
203
+ jsonMode: false,
204
+ },
205
+ pricing: {
206
+ inputCostPer1K: 0.001,
207
+ outputCostPer1K: 0.005,
208
+ currency: "USD",
209
+ },
210
+ performance: {
211
+ speed: "fast",
212
+ quality: "high",
213
+ accuracy: "high",
214
+ },
215
+ limits: {
216
+ maxContextTokens: 200000,
217
+ maxOutputTokens: 64000,
218
+ maxRequestsPerMinute: 100,
219
+ },
220
+ useCases: {
221
+ coding: 8,
222
+ creative: 8,
223
+ analysis: 8,
224
+ conversation: 9,
225
+ reasoning: 8,
226
+ translation: 8,
227
+ summarization: 9,
228
+ },
229
+ aliases: ["claude-4.5-haiku", "claude-haiku-latest", "haiku-4.5"],
230
+ deprecated: false,
231
+ isLocal: false,
232
+ releaseDate: "2025-10-15",
233
+ category: "general",
234
+ },
191
235
  [AnthropicModels.CLAUDE_3_5_SONNET]: {
192
236
  id: AnthropicModels.CLAUDE_3_5_SONNET,
193
237
  name: "Claude 3.5 Sonnet",
@@ -137,7 +137,7 @@ export declare class NeuroLink {
137
137
  /** Extract memory context from search results */
138
138
  private extractMemoryContext;
139
139
  /** Store conversation turn in mem0 */
140
- private storeConversationTurn;
140
+ private storeMem0ConversationTurn;
141
141
  /**
142
142
  * Set up HITL event forwarding to main emitter
143
143
  */
package/dist/neurolink.js CHANGED
@@ -402,9 +402,12 @@ Current user's request: ${currentInput}`;
402
402
  .join("\n");
403
403
  }
404
404
  /** Store conversation turn in mem0 */
405
- async storeConversationTurn(mem0, userContent, userId, metadata) {
406
- // Store user message only, reducing latency in mem0
407
- const conversationTurn = [{ role: "user", content: userContent }];
405
+ async storeMem0ConversationTurn(mem0, userContent, aiResponse, userId, metadata) {
406
+ // Store both user message and AI response for better context extraction
407
+ const conversationTurn = [
408
+ { role: "user", content: userContent },
409
+ { role: "assistant", content: aiResponse },
410
+ ];
408
411
  await mem0.add(conversationTurn, {
409
412
  user_id: userId,
410
413
  metadata,
@@ -455,7 +458,7 @@ Current user's request: ${currentInput}`;
455
458
  try {
456
459
  this.externalServerManager = new ExternalServerManager({
457
460
  maxServers: 20,
458
- defaultTimeout: 15000,
461
+ defaultTimeout: 30000, // Increased from 15s to 30s for proxy latency (e.g., LiteLLM)
459
462
  enableAutoRestart: true,
460
463
  enablePerformanceMonitoring: true,
461
464
  }, {
@@ -1366,7 +1369,7 @@ Current user's request: ${currentInput}`;
1366
1369
  try {
1367
1370
  const mem0 = await this.ensureMem0Ready();
1368
1371
  if (mem0) {
1369
- await this.storeConversationTurn(mem0, originalPrompt, options.context?.userId, {
1372
+ await this.storeMem0ConversationTurn(mem0, originalPrompt, generateResult.content, options.context?.userId, {
1370
1373
  timestamp: new Date().toISOString(),
1371
1374
  provider: generateResult.provider,
1372
1375
  model: generateResult.model,
@@ -1963,6 +1966,29 @@ Current user's request: ${currentInput}`;
1963
1966
  // Continue with original options if orchestration fails
1964
1967
  }
1965
1968
  }
1969
+ // 🔧 AUTO-DISABLE TOOLS: For Ollama models that don't support tools (same logic as generate())
1970
+ // This prevents overwhelming smaller models with massive tool descriptions in the system message
1971
+ if ((options.provider === "ollama" ||
1972
+ options.provider?.toLowerCase().includes("ollama")) &&
1973
+ !options.disableTools) {
1974
+ const { ModelConfigurationManager } = await import("./core/modelConfiguration.js");
1975
+ const modelConfig = ModelConfigurationManager.getInstance();
1976
+ const ollamaConfig = modelConfig.getProviderConfiguration("ollama");
1977
+ const toolCapableModels = ollamaConfig?.modelBehavior?.toolCapableModels || [];
1978
+ // Only disable tools if we have explicit evidence the model doesn't support them
1979
+ // If toolCapableModels is empty or model is not specified, don't make assumptions
1980
+ const modelName = options.model;
1981
+ if (toolCapableModels.length > 0 && modelName) {
1982
+ const modelSupportsTools = toolCapableModels.some((capableModel) => modelName.toLowerCase().includes(capableModel.toLowerCase()));
1983
+ if (!modelSupportsTools) {
1984
+ options.disableTools = true;
1985
+ logger.debug("Auto-disabled tools for Ollama model that doesn't support them (stream)", {
1986
+ model: options.model,
1987
+ toolCapableModels: toolCapableModels.slice(0, 3), // Show first 3 for brevity
1988
+ });
1989
+ }
1990
+ }
1991
+ }
1966
1992
  factoryResult = processStreamingFactoryOptions(options);
1967
1993
  enhancedOptions = createCleanStreamOptions(options);
1968
1994
  if (options.input?.text) {
@@ -2014,11 +2040,9 @@ Current user's request: ${currentInput}`;
2014
2040
  try {
2015
2041
  const mem0 = await self.ensureMem0Ready();
2016
2042
  if (mem0) {
2017
- await self.storeConversationTurn(mem0, originalPrompt, enhancedOptions.context?.userId, {
2043
+ await self.storeMem0ConversationTurn(mem0, originalPrompt, accumulatedContent.trim(), enhancedOptions.context?.userId, {
2018
2044
  timestamp: new Date().toISOString(),
2019
2045
  type: "conversation_turn_stream",
2020
- userMessage: originalPrompt,
2021
- aiResponse: accumulatedContent.trim(),
2022
2046
  });
2023
2047
  }
2024
2048
  }
@@ -2091,17 +2115,26 @@ Current user's request: ${currentInput}`;
2091
2115
  customTools: this.getCustomTools(),
2092
2116
  executeTool: this.executeTool.bind(this),
2093
2117
  }, "NeuroLink.createMCPStream");
2118
+ // 🔧 FIX: Get available tools and create tool-aware system prompt
2119
+ // Use SAME pattern as tryMCPGeneration (generate mode)
2120
+ const availableTools = await this.getAllAvailableTools();
2121
+ const enhancedSystemPrompt = this.createToolAwareSystemPrompt(options.systemPrompt, availableTools);
2094
2122
  // Get conversation messages for context
2095
2123
  const conversationMessages = await getConversationMessages(this.conversationMemory, {
2096
2124
  prompt: options.input.text,
2097
2125
  context: options.context,
2098
2126
  });
2099
- // Let provider handle tools and system prompt automatically via Vercel AI SDK
2100
- // This ensures proper tool integration in stream mode
2127
+ // 🔧 FIX: Pass enhanced system prompt to real streaming
2128
+ // Tools will be accessed through the streamText call in executeStream
2101
2129
  const streamResult = await provider.stream({
2102
2130
  ...options,
2131
+ systemPrompt: enhancedSystemPrompt, // Use enhanced prompt with tool descriptions
2103
2132
  conversationMessages,
2104
2133
  });
2134
+ logger.debug("[createMCPStream] Stream created successfully", {
2135
+ provider: providerName,
2136
+ systemPromptPassedLength: enhancedSystemPrompt.length,
2137
+ });
2105
2138
  return { stream: streamResult.stream, provider: providerName };
2106
2139
  }
2107
2140
  /**