@peopl-health/nexus 3.0.2 → 3.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -43,9 +43,14 @@ class NexusMessaging {
43
43
  };
44
44
  // Message processing with check-after strategy
45
45
  this.processingLocks = new Map(); // Per-chat locks to prevent parallel processing
46
+ this.activeRequests = new Map(); // Track active AI requests per chat
47
+ this.abandonedRuns = new Set(); // Track runs that should be ignored
46
48
  this.batchingConfig = {
47
49
  enabled: config.messageBatching?.enabled ?? true, // Enabled by default with check-after
48
- checkDelayMs: config.messageBatching?.checkDelayMs ?? 100 // Delay before checking for new messages
50
+ abortOnNewMessage: config.messageBatching?.abortOnNewMessage ?? true, // Abort ongoing AI calls when new messages arrive
51
+ immediateRestart: config.messageBatching?.immediateRestart ?? true, // Start new processing immediately without waiting
52
+ batchWindowMs: config.messageBatching?.batchWindowMs ?? 2000, // Wait up to 2s for message bursts
53
+ maxBatchWait: config.messageBatching?.maxBatchWait ?? 5000 // Maximum time to wait for batching
49
54
  };
50
55
  }
51
56
 
@@ -348,6 +353,7 @@ class NexusMessaging {
348
353
  if (this.messageStorage) {
349
354
  await this.messageStorage.saveMessage({
350
355
  ...messageData,
356
+ messageId: messageData.id,
351
357
  timestamp: new Date(),
352
358
  fromMe: false,
353
359
  origin: 'patient'
@@ -651,49 +657,78 @@ class NexusMessaging {
651
657
  * Handle message with check-after strategy - process immediately, check for new messages after
652
658
  */
653
659
  async _handleWithCheckAfter(chatId) {
654
- // If already processing this chat, just return (message is saved, will be picked up)
660
+ const typingInterval = await this._startTypingRefresh(chatId);
661
+
655
662
  if (this.processingLocks.has(chatId)) {
656
663
  logger.info(`[CheckAfter] Already processing ${chatId}, new message will be included`);
664
+
665
+ if (this.batchingConfig.abortOnNewMessage && this.activeRequests.has(chatId)) {
666
+ const runId = this.activeRequests.get(chatId);
667
+ this.abandonedRuns.add(runId);
668
+ logger.info(`[CheckAfter] Marked run ${runId} as abandoned for ${chatId}`);
669
+
670
+ if (this.batchingConfig.immediateRestart) {
671
+ this.processingLocks.delete(chatId);
672
+ this.activeRequests.delete(chatId);
673
+
674
+ logger.info(`[CheckAfter] Starting immediate reprocessing for ${chatId}`);
675
+ await this._processWithLock(chatId, null);
676
+ }
677
+ }
657
678
  return;
658
679
  }
659
680
 
660
- await this._processWithLock(chatId);
681
+ await this._processWithLock(chatId, typingInterval);
661
682
  }
662
683
 
663
684
  /**
664
685
  * Process messages with per-chat lock and check-after logic
665
686
  */
666
- async _processWithLock(chatId) {
687
+ async _processWithLock(chatId, existingTypingInterval = null) {
667
688
  this.processingLocks.set(chatId, true);
668
- let typingInterval = null;
689
+ let typingInterval = existingTypingInterval;
690
+ let runId = null;
669
691
 
670
692
  try {
671
- typingInterval = await this._startTypingRefresh(chatId);
672
- logger.info(`[CheckAfter] Processing messages for ${chatId}`);
673
-
674
- // Process with assistant
675
- const result = await replyAssistant(chatId);
676
- const botResponse = typeof result === 'string' ? result : result?.output;
677
- const tools_executed = typeof result === 'object' ? result?.tools_executed : undefined;
678
-
679
- // Small delay to catch very recent DB writes
680
- await new Promise(resolve => setTimeout(resolve, this.batchingConfig.checkDelayMs));
693
+ if (!typingInterval) {
694
+ typingInterval = await this._startTypingRefresh(chatId);
695
+ }
696
+
697
+ const startTime = Date.now();
698
+ let messageCount = await this._getUnprocessedMessageCount(chatId);
699
+ let lastCount = messageCount;
700
+
701
+ while (Date.now() - startTime < this.batchingConfig.batchWindowMs) {
702
+ await new Promise(resolve => setTimeout(resolve, 500));
703
+ const newCount = await this._getUnprocessedMessageCount(chatId);
704
+
705
+ if (newCount > lastCount) {
706
+ lastCount = newCount;
707
+ logger.info(`[Batching] New message detected for ${chatId}, extending wait`);
708
+ }
709
+
710
+ if (Date.now() - startTime >= this.batchingConfig.maxBatchWait) {
711
+ logger.info(`[Batching] Max wait reached for ${chatId}`);
712
+ break;
713
+ }
714
+ }
715
+
716
+ logger.info(`[CheckAfter] Processing ${lastCount} messages for ${chatId} after batching`);
681
717
 
682
- // Check for new unprocessed messages
683
- const hasNewMessages = await Message.exists({
684
- numero: chatId,
685
- processed: false,
686
- from_me: false
687
- });
718
+ runId = `run_${Date.now()}_${Math.random().toString(36).substring(7)}`;
719
+ this.activeRequests.set(chatId, runId);
688
720
 
689
- if (hasNewMessages) {
690
- logger.info(`[CheckAfter] New messages detected for ${chatId}, discarding response and reprocessing`);
691
- if (typingInterval) clearInterval(typingInterval);
692
- // Recursively process with new messages
693
- return await this._processWithLock(chatId);
721
+ const result = await replyAssistant(chatId, null, null, { runId });
722
+
723
+ if (this.abandonedRuns.has(runId)) {
724
+ logger.info(`[CheckAfter] Discarding abandoned run ${runId} for ${chatId}`);
725
+ this.abandonedRuns.delete(runId);
726
+ return;
694
727
  }
728
+
729
+ const botResponse = typeof result === 'string' ? result : result?.output;
730
+ const tools_executed = typeof result === 'object' ? result?.tools_executed : undefined;
695
731
 
696
- // No new messages - send response and mark processed
697
732
  if (botResponse) {
698
733
  await this.sendMessage({
699
734
  code: chatId,
@@ -711,9 +746,25 @@ class NexusMessaging {
711
746
  } finally {
712
747
  if (typingInterval) clearInterval(typingInterval);
713
748
  this.processingLocks.delete(chatId);
749
+ this.activeRequests.delete(chatId);
750
+ if (this.abandonedRuns.size > 100) {
751
+ this.abandonedRuns.clear();
752
+ }
714
753
  }
715
754
  }
716
755
 
756
+ /**
757
+ * Get count of unprocessed messages for a chat
758
+ */
759
+ async _getUnprocessedMessageCount(chatId) {
760
+ const { Message } = require('../models/messageModel');
761
+ return await Message.countDocuments({
762
+ numero: chatId,
763
+ processed: false,
764
+ from_me: false
765
+ });
766
+ }
767
+
717
768
  /**
718
769
  * Start typing indicator refresh interval
719
770
  */
@@ -725,10 +776,16 @@ class NexusMessaging {
725
776
  const lastMessage = await Message.findOne({
726
777
  numero: chatId,
727
778
  from_me: false,
728
- message_id: { $exists: true, $ne: null }
779
+ processed: false,
780
+ message_id: { $exists: true, $ne: null, $not: /^pending-/ }
729
781
  }).sort({ createdAt: -1 });
730
782
 
731
- if (!lastMessage?.message_id) return null;
783
+ if (!lastMessage?.message_id) {
784
+ logger.debug(`[_startTypingRefresh] No valid message for typing indicator: ${chatId}`);
785
+ return null;
786
+ }
787
+
788
+ logger.debug(`[_startTypingRefresh] Starting typing indicator for message: ${lastMessage.message_id}`);
732
789
 
733
790
  return setInterval(() =>
734
791
  this.provider.sendTypingIndicator(lastMessage.message_id).catch(err =>
@@ -80,6 +80,12 @@ messageSchema.index({ numero: 1, createdAt: -1 });
80
80
  messageSchema.index({ numero: 1, processed: 1, origin: 1 }, { name: 'numero_processed_origin_idx' });
81
81
  messageSchema.index({ numero: 1, createdAt: -1, processed: 1 }, { name: 'numero_created_processed_idx' });
82
82
 
83
+ // Indexes for conversation aggregation queries
84
+ messageSchema.index({ group_id: 1, createdAt: 1 }, { name: 'conversation_sort_idx' });
85
+ messageSchema.index({ group_id: 1, from_me: 1, read: 1 }, { name: 'unread_filter_idx' });
86
+ messageSchema.index({ group_id: 1, numero: 1, createdAt: -1 }, { name: 'conversation_lookup_idx' });
87
+ messageSchema.index({ createdAt: -1 }, { name: 'global_sort_idx' });
88
+
83
89
  messageSchema.pre('save', function (next) {
84
90
  if (this.timestamp) {
85
91
  this.timestamp = moment.tz(this.timestamp, 'America/Mexico_City').toDate();
@@ -6,6 +6,7 @@ const {
6
6
  } = require('./OpenAIResponsesProviderTools');
7
7
  const { DefaultConversationManager } = require('../services/DefaultConversationManager');
8
8
  const { logger } = require('../utils/logger');
9
+ const { getCurrentMexicoDateTime } = require('../utils/dateUtils');
9
10
 
10
11
  const CONVERSATION_PREFIX = 'conv_';
11
12
  const RESPONSE_PREFIX = 'resp_';
@@ -202,7 +203,8 @@ class OpenAIResponsesProvider {
202
203
  const clinicalData = await this.conversationManager.getClinicalData(thread.code);
203
204
  const promptVariables = clinicalData ? {
204
205
  clinical_context: clinicalData.clinicalContext || '',
205
- last_symptoms: clinicalData.lastSymptoms || ''
206
+ last_symptoms: clinicalData.lastSymptoms || '',
207
+ current_date: getCurrentMexicoDateTime(),
206
208
  } : null;
207
209
 
208
210
  // Execute with built context
@@ -287,10 +289,7 @@ class OpenAIResponsesProvider {
287
289
  const makeAPICall = (inputData) => retryWithBackoff(() =>
288
290
  this.client.responses.create({
289
291
  prompt: promptConfig,
290
- model: model || this.defaults.responseModel,
291
- instructions: additionalInstructions || instructions,
292
292
  input: inputData,
293
- metadata, top_p: topP, temperature, max_output_tokens: maxOutputTokens,
294
293
  truncation: truncationStrategy,
295
294
  }), { providerName: PROVIDER_NAME });
296
295
 
@@ -1,5 +1,5 @@
1
1
  const { ConversationManager } = require('./ConversationManager');
2
- const { getLastNMessages } = require('../helpers/messageHelper');
2
+ const { getLastNMessages, formatMessage } = require('../helpers/messageHelper');
3
3
  const { handlePendingFunctionCalls: handlePendingFunctionCallsUtil } = require('../providers/OpenAIResponsesProviderTools');
4
4
  const { getRecordByFilter } = require('./airtableService');
5
5
  const { Follow_Up_ID } = require('../config/airtableConfig');
@@ -23,10 +23,13 @@ class DefaultConversationManager extends ConversationManager {
23
23
  return additionalMessages;
24
24
  }
25
25
 
26
- const messageContext = allMessages.reverse().map(msg => ({
27
- role: msg.origin === 'patient' ? 'user' : 'assistant',
28
- content: msg.body || msg.content || ''
29
- }));
26
+ const messageContext = allMessages.reverse().map(msg => {
27
+ const formattedContent = formatMessage(msg);
28
+ return {
29
+ role: msg.origin === 'patient' ? 'user' : 'assistant',
30
+ content: formattedContent || msg.body || msg.content || ''
31
+ };
32
+ });
30
33
 
31
34
  return [...additionalMessages, ...messageContext];
32
35
  } catch (error) {
@@ -41,22 +41,23 @@ const fetchConversationData = async (filter, skip, limit) => {
41
41
  },
42
42
  { $project: { threadInfo: 0 } },
43
43
  ...(filter === 'pending-review' ? [{ $match: { $or: [{ review: false }, { review: null }] } }] : []),
44
- { $sort: { 'latestMessage.createdAt': -1, 'latestMessage.timestamp': -1 } },
44
+ { $sort: { 'latestMessage.createdAt': -1 } },
45
45
  { $skip: skip },
46
46
  { $limit: limit }
47
47
  ];
48
48
 
49
49
  const startTime = Date.now();
50
50
  const [conversations, contactNames, unreadCounts, totalResult] = await Promise.all([
51
- Message.aggregate(pipeline),
52
- Message.aggregate([{ $match: { ...baseMatch, from_me: false } }, { $sort: { createdAt: -1 } }, { $group: { _id: '$numero', name: { $first: '$nombre_whatsapp' } } }]),
53
- Message.aggregate([{ $match: { ...baseMatch, ...unreadMatch } }, { $group: { _id: '$numero', unreadCount: { $sum: 1 } } }]),
51
+ Message.aggregate(pipeline, { allowDiskUse: true }),
52
+ Message.aggregate([{ $match: { ...baseMatch, from_me: false } }, { $sort: { createdAt: -1 } }, { $group: { _id: '$numero', name: { $first: '$nombre_whatsapp' } } }], { allowDiskUse: true }),
53
+ Message.aggregate([{ $match: { ...baseMatch, ...unreadMatch } }, { $group: { _id: '$numero', unreadCount: { $sum: 1 } } }], { allowDiskUse: true }),
54
54
  Message.aggregate(
55
55
  filter === 'no-response'
56
- ? [{ $match: baseMatch }, { $project: { numero: 1, from_me: 1, createdAt: 1, timestamp: 1 } }, { $sort: { createdAt: -1 } }, { $group: { _id: '$numero', latestMessage: { $first: '$$ROOT' } } }, { $match: { 'latestMessage.from_me': false } }, { $count: 'total' }]
56
+ ? [{ $match: baseMatch }, { $project: { numero: 1, from_me: 1, createdAt: 1 } }, { $sort: { createdAt: -1 } }, { $group: { _id: '$numero', latestMessage: { $first: '$$ROOT' } } }, { $match: { 'latestMessage.from_me': false } }, { $count: 'total' }]
57
57
  : filter === 'pending-review'
58
58
  ? [{ $match: baseMatch }, { $group: { _id: '$numero' } }, { $lookup: { from: 'threads', localField: '_id', foreignField: 'code', as: 'threadInfo' } }, { $addFields: { review: { $arrayElemAt: ['$threadInfo.review', 0] } } }, { $match: { $or: [{ review: false }, { review: null }] } }, { $count: 'total' }]
59
- : [{ $match: filterConditions }, { $group: { _id: '$numero' } }, { $count: 'total' }]
59
+ : [{ $match: filterConditions }, { $group: { _id: '$numero' } }, { $count: 'total' }],
60
+ { allowDiskUse: true }
60
61
  )
61
62
  ]);
62
63
 
@@ -18,9 +18,17 @@ const dateAndTimeFromStart = (startTime) => {
18
18
  };
19
19
  };
20
20
 
21
+ const getCurrentMexicoDateTime = () => {
22
+ return moment()
23
+ .tz('America/Mexico_City')
24
+ .locale('es')
25
+ .format('dddd, D [de] MMMM [de] YYYY [a las] h:mm A');
26
+ };
27
+
21
28
  module.exports = {
22
29
  ISO_DATE,
23
30
  parseStartTime,
24
31
  addDays,
25
- dateAndTimeFromStart
32
+ dateAndTimeFromStart,
33
+ getCurrentMexicoDateTime
26
34
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@peopl-health/nexus",
3
- "version": "3.0.2",
3
+ "version": "3.0.4",
4
4
  "description": "Core messaging and assistant library for WhatsApp communication platforms",
5
5
  "keywords": [
6
6
  "whatsapp",
@@ -105,7 +105,7 @@
105
105
  "peerDependencies": {
106
106
  "@anthropic-ai/sdk": "^0.32.0",
107
107
  "baileys": "^6.4.0",
108
- "express": "4.22.1",
108
+ "express": "^4.22.1",
109
109
  "openai": "6.7.0",
110
110
  "twilio": "5.6.0"
111
111
  },