@juspay/neurolink 9.55.11 → 9.56.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -631,6 +631,11 @@ export class RedisConversationMemoryManager {
631
631
  * Applies sendToolPreview toggle and hydrates result.result for backward compat
632
632
  */
633
633
  async buildContextMessages(sessionId, userId, enableSummarization, requestId) {
634
+ logger.debug("[RedisConversationMemoryManager] Building context messages", {
635
+ sessionId,
636
+ userId,
637
+ enableSummarization,
638
+ });
634
639
  await this.ensureInitialized();
635
640
  if (!this.redisClient) {
636
641
  logger.warn("[RedisConversationMemoryManager] Redis client not available in buildContextMessages");
@@ -654,6 +659,11 @@ export class RedisConversationMemoryManager {
654
659
  const redisKey = getSessionKey(this.redisConfig, sessionId, userId);
655
660
  const conversationData = await withTimeout(redisClient.get(redisKey), REDIS_TIMEOUT_MS);
656
661
  const conversation = deserializeConversation(conversationData || null);
662
+ logger.debug("[RedisConversationMemoryManager] Retrieved conversation for context building", {
663
+ sessionId,
664
+ userId,
665
+ conversationFound: !!conversation,
666
+ });
657
667
  if (!conversation) {
658
668
  span.setAttribute("session.found", false);
659
669
  span.setStatus({ code: SpanStatusCode.OK });
@@ -672,6 +682,12 @@ export class RedisConversationMemoryManager {
672
682
  lastActivity: new Date(conversation.updatedAt).getTime(),
673
683
  };
674
684
  const contextMessages = buildContextFromPointer(session, requestId);
685
+ logger.debug("[RedisConversationMemoryManager] Built context messages from pointer", {
686
+ sessionId,
687
+ userId,
688
+ contextMessageCount: contextMessages.length,
689
+ pointerMessageId: session.summarizedUpToMessageId || "none",
690
+ });
675
691
  const sendToolPreview = this.config?.contextCompaction?.sendToolPreview === true;
676
692
  // Map tool_result messages: apply preview toggle + hydrate result.result
677
693
  const finalMessages = contextMessages.map((msg) => {
@@ -695,6 +711,15 @@ export class RedisConversationMemoryManager {
695
711
  }
696
712
  hydratedResult = { ...msg.result, result: parsedResult };
697
713
  }
714
+ logger.debug("[RedisConversationMemoryManager] Processing tool_result message for context", {
715
+ sessionId,
716
+ userId,
717
+ messageId: msg.id,
718
+ sendToolPreview,
719
+ hasPreview: !!msg.metadata?.toolOutputPreview,
720
+ contentLength: content ? String(content).length : 0,
721
+ resultHydrated: hydratedResult !== msg.result,
722
+ });
698
723
  return { ...msg, content, result: hydratedResult };
699
724
  });
700
725
  // Tool messages now have real content and participate in context properly.
@@ -911,12 +936,16 @@ export class RedisConversationMemoryManager {
911
936
  const titleGenerator = new NeuroLink({
912
937
  conversationMemory: { enabled: false },
913
938
  });
914
- const titlePrompt = `Generate a clear, concise, and descriptive title (5–8 words maximum) for a conversation based on the following user message.
939
+ const defaultTitlePrompt = `Generate a clear, concise, and descriptive title (20-25 letters maximum) for a conversation based on the following user message.
915
940
  The title must meaningfully reflect the topic or intent of the message.
916
941
  Do not output anything unrelated, vague, or generic.
917
942
  Do not say you cannot create a title. Always return a valid title.
918
943
 
919
944
  User message: "${userMessage}"`;
945
+ const customPrompt = process.env.NEUROLINK_TITLE_PROMPT;
946
+ const titlePrompt = customPrompt
947
+ ? customPrompt.replace(/\$\{userMessage\}/g, userMessage)
948
+ : defaultTitlePrompt;
920
949
  const result = await titleGenerator.generate({
921
950
  input: { text: titlePrompt },
922
951
  provider: this.config.summarizationProvider || "vertex",
@@ -631,6 +631,11 @@ export class RedisConversationMemoryManager {
631
631
  * Applies sendToolPreview toggle and hydrates result.result for backward compat
632
632
  */
633
633
  async buildContextMessages(sessionId, userId, enableSummarization, requestId) {
634
+ logger.debug("[RedisConversationMemoryManager] Building context messages", {
635
+ sessionId,
636
+ userId,
637
+ enableSummarization,
638
+ });
634
639
  await this.ensureInitialized();
635
640
  if (!this.redisClient) {
636
641
  logger.warn("[RedisConversationMemoryManager] Redis client not available in buildContextMessages");
@@ -654,6 +659,11 @@ export class RedisConversationMemoryManager {
654
659
  const redisKey = getSessionKey(this.redisConfig, sessionId, userId);
655
660
  const conversationData = await withTimeout(redisClient.get(redisKey), REDIS_TIMEOUT_MS);
656
661
  const conversation = deserializeConversation(conversationData || null);
662
+ logger.debug("[RedisConversationMemoryManager] Retrieved conversation for context building", {
663
+ sessionId,
664
+ userId,
665
+ conversationFound: !!conversation,
666
+ });
657
667
  if (!conversation) {
658
668
  span.setAttribute("session.found", false);
659
669
  span.setStatus({ code: SpanStatusCode.OK });
@@ -672,6 +682,12 @@ export class RedisConversationMemoryManager {
672
682
  lastActivity: new Date(conversation.updatedAt).getTime(),
673
683
  };
674
684
  const contextMessages = buildContextFromPointer(session, requestId);
685
+ logger.debug("[RedisConversationMemoryManager] Built context messages from pointer", {
686
+ sessionId,
687
+ userId,
688
+ contextMessageCount: contextMessages.length,
689
+ pointerMessageId: session.summarizedUpToMessageId || "none",
690
+ });
675
691
  const sendToolPreview = this.config?.contextCompaction?.sendToolPreview === true;
676
692
  // Map tool_result messages: apply preview toggle + hydrate result.result
677
693
  const finalMessages = contextMessages.map((msg) => {
@@ -695,6 +711,15 @@ export class RedisConversationMemoryManager {
695
711
  }
696
712
  hydratedResult = { ...msg.result, result: parsedResult };
697
713
  }
714
+ logger.debug("[RedisConversationMemoryManager] Processing tool_result message for context", {
715
+ sessionId,
716
+ userId,
717
+ messageId: msg.id,
718
+ sendToolPreview,
719
+ hasPreview: !!msg.metadata?.toolOutputPreview,
720
+ contentLength: content ? String(content).length : 0,
721
+ resultHydrated: hydratedResult !== msg.result,
722
+ });
698
723
  return { ...msg, content, result: hydratedResult };
699
724
  });
700
725
  // Tool messages now have real content and participate in context properly.
@@ -911,12 +936,16 @@ export class RedisConversationMemoryManager {
911
936
  const titleGenerator = new NeuroLink({
912
937
  conversationMemory: { enabled: false },
913
938
  });
914
- const titlePrompt = `Generate a clear, concise, and descriptive title (5–8 words maximum) for a conversation based on the following user message.
939
+ const defaultTitlePrompt = `Generate a clear, concise, and descriptive title (20-25 letters maximum) for a conversation based on the following user message.
915
940
  The title must meaningfully reflect the topic or intent of the message.
916
941
  Do not output anything unrelated, vague, or generic.
917
942
  Do not say you cannot create a title. Always return a valid title.
918
943
 
919
944
  User message: "${userMessage}"`;
945
+ const customPrompt = process.env.NEUROLINK_TITLE_PROMPT;
946
+ const titlePrompt = customPrompt
947
+ ? customPrompt.replace(/\$\{userMessage\}/g, userMessage)
948
+ : defaultTitlePrompt;
920
949
  const result = await titleGenerator.generate({
921
950
  input: { text: titlePrompt },
922
951
  provider: this.config.summarizationProvider || "vertex",
@@ -4486,6 +4486,16 @@ Current user's request: ${currentInput}`;
4486
4486
  * @throws {Error} When conversation memory operations fail (if enabled)
4487
4487
  */
4488
4488
  async stream(options) {
4489
+ logger.debug("[NeuroLink] stream() called with options", {
4490
+ provider: options.provider,
4491
+ model: options.model,
4492
+ inputLength: options.input?.text?.length || 0,
4493
+ disableTools: options.disableTools,
4494
+ enableAnalytics: options.enableAnalytics,
4495
+ enableEvaluation: options.enableEvaluation,
4496
+ contextKeys: options.context ? Object.keys(options.context) : [],
4497
+ optionKeys: Object.keys(options),
4498
+ });
4489
4499
  return metricsTraceContextStorage.run(this.createMetricsTraceContext(), () => this.executeStreamRequest({ ...options }));
4490
4500
  }
4491
4501
  async executeStreamRequest(options) {
@@ -4578,8 +4588,26 @@ Current user's request: ${currentInput}`;
4578
4588
  }
4579
4589
  async runStandardStreamRequest(params) {
4580
4590
  const { options, streamSpan, spanStartTime, startTime, hrTimeStart, streamId, originalPrompt, } = params;
4591
+ logger.debug("[NeuroLink] Running standard stream request", {
4592
+ streamId,
4593
+ provider: options.provider,
4594
+ model: options.model,
4595
+ inputLength: options.input?.text?.length || 0,
4596
+ disableTools: options.disableTools,
4597
+ enableAnalytics: options.enableAnalytics,
4598
+ enableEvaluation: options.enableEvaluation,
4599
+ contextKeys: options.context ? Object.keys(options.context) : [],
4600
+ optionKeys: Object.keys(options),
4601
+ sessionId: options.context?.sessionId,
4602
+ });
4581
4603
  try {
4582
4604
  const { enhancedOptions, factoryResult } = await this.prepareStreamOptions(options, streamId, startTime, hrTimeStart);
4605
+ logger.debug("[NeuroLink] Stream options prepared", {
4606
+ streamId,
4607
+ options: enhancedOptions,
4608
+ factoryResult,
4609
+ sessionId: enhancedOptions.context?.sessionId,
4610
+ });
4583
4611
  const { stream: mcpStream, provider: providerName, usage: streamUsage, model: streamModel, finishReason: streamFinishReason, toolCalls: streamToolCalls, toolResults: streamToolResults, analytics: streamAnalytics, } = await this.createMCPStream(enhancedOptions);
4584
4612
  const streamState = {
4585
4613
  finishReason: streamFinishReason ?? "stop",
@@ -4669,6 +4697,16 @@ Current user's request: ${currentInput}`;
4669
4697
  });
4670
4698
  }
4671
4699
  catch (error) {
4700
+ logger.debug("[NeuroLink.stream] Stream error occurred", {
4701
+ error: error instanceof Error ? error.message : String(error),
4702
+ name: error instanceof Error ? error.name : "UnknownError",
4703
+ provider: providerName,
4704
+ model: enhancedOptions.model,
4705
+ chunkCount,
4706
+ totalLength: accumulatedContent.length,
4707
+ durationMs: Date.now() - streamStartTime,
4708
+ sessionId,
4709
+ });
4672
4710
  streamError = error;
4673
4711
  self.emitter.emit("stream:error", {
4674
4712
  type: "stream:error",
@@ -4687,6 +4725,16 @@ Current user's request: ${currentInput}`;
4687
4725
  throw error;
4688
4726
  }
4689
4727
  finally {
4728
+ logger.debug("[NeuroLink.stream] Stream finished, performing cleanup", {
4729
+ provider: providerName,
4730
+ model: enhancedOptions.model,
4731
+ totalChunks: chunkCount,
4732
+ totalLength: accumulatedContent.length,
4733
+ durationMs: Date.now() - streamStartTime,
4734
+ fallbackAttempted: metadata.fallbackAttempted,
4735
+ guardrailsBlocked: metadata.guardrailsBlocked,
4736
+ error: metadata.error,
4737
+ });
4690
4738
  self._disableToolCacheForCurrentRequest = false;
4691
4739
  cleanupListeners();
4692
4740
  streamSpan.setAttribute("neurolink.response_time_ms", Date.now() - spanStartTime);
@@ -5084,6 +5132,11 @@ Current user's request: ${currentInput}`;
5084
5132
  */
5085
5133
  async storeStreamConversationMemory(params) {
5086
5134
  const { enhancedOptions, providerName, originalPrompt, accumulatedContent, startTime, eventSequence, } = params;
5135
+ logger.debug("[NeuroLink.stream] Preparing to store conversation turn in memory", {
5136
+ options: JSON.stringify(enhancedOptions),
5137
+ sessionId: enhancedOptions.context
5138
+ ?.sessionId,
5139
+ });
5087
5140
  // Guard: skip storing if no meaningful content was produced (no text AND no tool activity)
5088
5141
  const hasToolEvents = eventSequence.some((e) => e.type === "tool:start" || e.type === "tool:end");
5089
5142
  if (!accumulatedContent.trim() && !hasToolEvents) {
@@ -5093,6 +5146,12 @@ Current user's request: ${currentInput}`;
5093
5146
  });
5094
5147
  return;
5095
5148
  }
5149
+ logger.debug("[NeuroLink.stream] Storing conversation turn in memory", {
5150
+ options: JSON.stringify(enhancedOptions),
5151
+ sessionId: enhancedOptions.context
5152
+ ?.sessionId,
5153
+ conversationMemoryExists: this.conversationMemory ? true : false,
5154
+ });
5096
5155
  // Store memory after stream consumption is complete
5097
5156
  if (this.conversationMemory && enhancedOptions.context?.sessionId) {
5098
5157
  const sessionId = enhancedOptions.context
@@ -27,8 +27,21 @@ export function applyConversationMemoryDefaults(userConfig) {
27
27
  * Get conversation history as message array, summarizing if needed.
28
28
  */
29
29
  export async function getConversationMessages(conversationMemory, options) {
30
+ logger.debug("[conversationMemoryUtils] getConversationMessages called", {
31
+ hasMemory: !!conversationMemory,
32
+ memoryType: conversationMemory?.constructor?.name || "NONE",
33
+ hasContext: !!options.context,
34
+ enableSummarization: options.enableSummarization ?? false,
35
+ options: JSON.stringify(options, null, 2),
36
+ });
30
37
  if (!conversationMemory || !options.context) {
31
- logger.warn("[conversationMemoryUtils] No memory or context, returning empty messages");
38
+ logger.warn("[conversationMemoryUtils] No memory or context, returning empty messages", {
39
+ hasMemory: !!conversationMemory,
40
+ memoryType: conversationMemory?.constructor?.name || "NONE",
41
+ hasContext: !!options.context,
42
+ enableSummarization: options.enableSummarization ?? false,
43
+ options: JSON.stringify(options, null, 2),
44
+ });
32
45
  return [];
33
46
  }
34
47
  const sessionId = options.context?.sessionId;
@@ -397,6 +397,9 @@ function toModelMessage(message) {
397
397
  if (message.role === "user" ||
398
398
  message.role === "assistant" ||
399
399
  message.role === "system") {
400
+ if (message.content.trim() === "") {
401
+ return null;
402
+ }
400
403
  return {
401
404
  role: message.role,
402
405
  content: message.content,
package/dist/neurolink.js CHANGED
@@ -4486,6 +4486,16 @@ Current user's request: ${currentInput}`;
4486
4486
  * @throws {Error} When conversation memory operations fail (if enabled)
4487
4487
  */
4488
4488
  async stream(options) {
4489
+ logger.debug("[NeuroLink] stream() called with options", {
4490
+ provider: options.provider,
4491
+ model: options.model,
4492
+ inputLength: options.input?.text?.length || 0,
4493
+ disableTools: options.disableTools,
4494
+ enableAnalytics: options.enableAnalytics,
4495
+ enableEvaluation: options.enableEvaluation,
4496
+ contextKeys: options.context ? Object.keys(options.context) : [],
4497
+ optionKeys: Object.keys(options),
4498
+ });
4489
4499
  return metricsTraceContextStorage.run(this.createMetricsTraceContext(), () => this.executeStreamRequest({ ...options }));
4490
4500
  }
4491
4501
  async executeStreamRequest(options) {
@@ -4578,8 +4588,26 @@ Current user's request: ${currentInput}`;
4578
4588
  }
4579
4589
  async runStandardStreamRequest(params) {
4580
4590
  const { options, streamSpan, spanStartTime, startTime, hrTimeStart, streamId, originalPrompt, } = params;
4591
+ logger.debug("[NeuroLink] Running standard stream request", {
4592
+ streamId,
4593
+ provider: options.provider,
4594
+ model: options.model,
4595
+ inputLength: options.input?.text?.length || 0,
4596
+ disableTools: options.disableTools,
4597
+ enableAnalytics: options.enableAnalytics,
4598
+ enableEvaluation: options.enableEvaluation,
4599
+ contextKeys: options.context ? Object.keys(options.context) : [],
4600
+ optionKeys: Object.keys(options),
4601
+ sessionId: options.context?.sessionId,
4602
+ });
4581
4603
  try {
4582
4604
  const { enhancedOptions, factoryResult } = await this.prepareStreamOptions(options, streamId, startTime, hrTimeStart);
4605
+ logger.debug("[NeuroLink] Stream options prepared", {
4606
+ streamId,
4607
+ options: enhancedOptions,
4608
+ factoryResult,
4609
+ sessionId: enhancedOptions.context?.sessionId,
4610
+ });
4583
4611
  const { stream: mcpStream, provider: providerName, usage: streamUsage, model: streamModel, finishReason: streamFinishReason, toolCalls: streamToolCalls, toolResults: streamToolResults, analytics: streamAnalytics, } = await this.createMCPStream(enhancedOptions);
4584
4612
  const streamState = {
4585
4613
  finishReason: streamFinishReason ?? "stop",
@@ -4669,6 +4697,16 @@ Current user's request: ${currentInput}`;
4669
4697
  });
4670
4698
  }
4671
4699
  catch (error) {
4700
+ logger.debug("[NeuroLink.stream] Stream error occurred", {
4701
+ error: error instanceof Error ? error.message : String(error),
4702
+ name: error instanceof Error ? error.name : "UnknownError",
4703
+ provider: providerName,
4704
+ model: enhancedOptions.model,
4705
+ chunkCount,
4706
+ totalLength: accumulatedContent.length,
4707
+ durationMs: Date.now() - streamStartTime,
4708
+ sessionId,
4709
+ });
4672
4710
  streamError = error;
4673
4711
  self.emitter.emit("stream:error", {
4674
4712
  type: "stream:error",
@@ -4687,6 +4725,16 @@ Current user's request: ${currentInput}`;
4687
4725
  throw error;
4688
4726
  }
4689
4727
  finally {
4728
+ logger.debug("[NeuroLink.stream] Stream finished, performing cleanup", {
4729
+ provider: providerName,
4730
+ model: enhancedOptions.model,
4731
+ totalChunks: chunkCount,
4732
+ totalLength: accumulatedContent.length,
4733
+ durationMs: Date.now() - streamStartTime,
4734
+ fallbackAttempted: metadata.fallbackAttempted,
4735
+ guardrailsBlocked: metadata.guardrailsBlocked,
4736
+ error: metadata.error,
4737
+ });
4690
4738
  self._disableToolCacheForCurrentRequest = false;
4691
4739
  cleanupListeners();
4692
4740
  streamSpan.setAttribute("neurolink.response_time_ms", Date.now() - spanStartTime);
@@ -5084,6 +5132,11 @@ Current user's request: ${currentInput}`;
5084
5132
  */
5085
5133
  async storeStreamConversationMemory(params) {
5086
5134
  const { enhancedOptions, providerName, originalPrompt, accumulatedContent, startTime, eventSequence, } = params;
5135
+ logger.debug("[NeuroLink.stream] Preparing to store conversation turn in memory", {
5136
+ options: JSON.stringify(enhancedOptions),
5137
+ sessionId: enhancedOptions.context
5138
+ ?.sessionId,
5139
+ });
5087
5140
  // Guard: skip storing if no meaningful content was produced (no text AND no tool activity)
5088
5141
  const hasToolEvents = eventSequence.some((e) => e.type === "tool:start" || e.type === "tool:end");
5089
5142
  if (!accumulatedContent.trim() && !hasToolEvents) {
@@ -5093,6 +5146,12 @@ Current user's request: ${currentInput}`;
5093
5146
  });
5094
5147
  return;
5095
5148
  }
5149
+ logger.debug("[NeuroLink.stream] Storing conversation turn in memory", {
5150
+ options: JSON.stringify(enhancedOptions),
5151
+ sessionId: enhancedOptions.context
5152
+ ?.sessionId,
5153
+ conversationMemoryExists: this.conversationMemory ? true : false,
5154
+ });
5096
5155
  // Store memory after stream consumption is complete
5097
5156
  if (this.conversationMemory && enhancedOptions.context?.sessionId) {
5098
5157
  const sessionId = enhancedOptions.context
@@ -27,8 +27,21 @@ export function applyConversationMemoryDefaults(userConfig) {
27
27
  * Get conversation history as message array, summarizing if needed.
28
28
  */
29
29
  export async function getConversationMessages(conversationMemory, options) {
30
+ logger.debug("[conversationMemoryUtils] getConversationMessages called", {
31
+ hasMemory: !!conversationMemory,
32
+ memoryType: conversationMemory?.constructor?.name || "NONE",
33
+ hasContext: !!options.context,
34
+ enableSummarization: options.enableSummarization ?? false,
35
+ options: JSON.stringify(options, null, 2),
36
+ });
30
37
  if (!conversationMemory || !options.context) {
31
- logger.warn("[conversationMemoryUtils] No memory or context, returning empty messages");
38
+ logger.warn("[conversationMemoryUtils] No memory or context, returning empty messages", {
39
+ hasMemory: !!conversationMemory,
40
+ memoryType: conversationMemory?.constructor?.name || "NONE",
41
+ hasContext: !!options.context,
42
+ enableSummarization: options.enableSummarization ?? false,
43
+ options: JSON.stringify(options, null, 2),
44
+ });
32
45
  return [];
33
46
  }
34
47
  const sessionId = options.context?.sessionId;
@@ -397,6 +397,9 @@ function toModelMessage(message) {
397
397
  if (message.role === "user" ||
398
398
  message.role === "assistant" ||
399
399
  message.role === "system") {
400
+ if (message.content.trim() === "") {
401
+ return null;
402
+ }
400
403
  return {
401
404
  role: message.role,
402
405
  content: message.content,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@juspay/neurolink",
3
- "version": "9.55.11",
3
+ "version": "9.56.1",
4
4
  "packageManager": "pnpm@10.15.1",
5
5
  "description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 13 providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
6
6
  "author": {