@peopl-health/nexus 3.3.2 → 3.3.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -314,31 +314,48 @@ const searchConversationsController = async (req, res) => {
314
314
 
315
315
  const escapedQuery = query.replace(/\+/g, '\\+');
316
316
 
317
- // Search through all conversations in the database
317
+ // Prioritize matches on numero/nombre_whatsapp; body search is a last resort.
318
318
  const conversations = await Message.aggregate([
319
- { $match: {
320
- group_id: null,
321
- $or: [
322
- { numero: { $regex: escapedQuery, $options: 'i' } },
323
- { nombre_whatsapp: { $regex: escapedQuery, $options: 'i' } },
324
- { body: { $regex: escapedQuery, $options: 'i' } }
325
- ]
326
- }},
327
- { $project: {
328
- numero: 1,
329
- body: 1,
330
- createdAt: 1,
331
- timestamp: 1,
332
- media: 1,
333
- nombre_whatsapp: 1,
334
- from_me: 1
335
- }},
336
- { $group: {
337
- _id: '$numero',
338
- latestMessage: { $first: '$$ROOT' },
339
- messageCount: { $sum: 1 }
340
- }},
341
- { $sort: { 'latestMessage.createdAt': -1 } },
319
+ {
320
+ $facet: {
321
+ primary: [
322
+ { $match: {
323
+ group_id: null,
324
+ $or: [
325
+ { numero: { $regex: escapedQuery, $options: 'i' } },
326
+ { nombre_whatsapp: { $regex: escapedQuery, $options: 'i' } }
327
+ ]
328
+ }},
329
+ { $sort: { createdAt: -1, timestamp: -1 } },
330
+ { $group: {
331
+ _id: '$numero',
332
+ latestMessage: { $first: '$$ROOT' },
333
+ messageCount: { $sum: 1 }
334
+ }},
335
+ { $addFields: { priority: 1 } }
336
+ ],
337
+ secondary: [
338
+ { $match: {
339
+ group_id: null,
340
+ body: { $regex: escapedQuery, $options: 'i' }
341
+ }},
342
+ { $sort: { createdAt: -1, timestamp: -1 } },
343
+ { $group: {
344
+ _id: '$numero',
345
+ latestMessage: { $first: '$$ROOT' },
346
+ messageCount: { $sum: 1 }
347
+ }},
348
+ { $addFields: { priority: 2 } }
349
+ ]
350
+ }
351
+ },
352
+ { $project: { merged: { $concatArrays: ['$primary', '$secondary'] } } },
353
+ { $unwind: '$merged' },
354
+ { $replaceRoot: { newRoot: '$merged' } },
355
+ { $sort: { priority: 1, 'latestMessage.createdAt': -1 } },
356
+ // Deduplicate numbers keeping highest-priority (name/number) result first
357
+ { $group: { _id: '$_id', doc: { $first: '$$ROOT' } } },
358
+ { $replaceRoot: { newRoot: '$doc' } },
342
359
  { $limit: parsedLimit }
343
360
  ]);
344
361
 
@@ -741,4 +758,4 @@ module.exports = {
741
758
  searchConversationsController,
742
759
  sendTemplateToNewNumberController,
743
760
  getOpenAIThreadMessagesController
744
- };
761
+ };
@@ -563,6 +563,12 @@ class NexusMessaging {
563
563
  prompt,
564
564
  response_id
565
565
  });
566
+ } else {
567
+ await this.sendMessage({
568
+ code: 'whatsapp:+51951538602',
569
+ body: `Error no se genero respuesta ${from}`,
570
+ origin: 'assistant'
571
+ });
566
572
  }
567
573
  } catch (error) {
568
574
  logger.error('Error in handleMediaWithAssistant:', error);
@@ -102,5 +102,4 @@ module.exports = {
102
102
  updateMessageStatus,
103
103
  handleStatusCallback,
104
104
  getMessageStatus
105
- };
106
-
105
+ };
@@ -11,6 +11,26 @@ async function handle24HourWindowError(message, messageSid) {
11
11
  try {
12
12
  if (!message?.body || !message?.numero) return;
13
13
 
14
+ if (message?.statusInfo?.recoveryTemplateSid || message?.statusInfo?.recoverySentAt) {
15
+ logger.info('[TemplateRecovery] Recovery already completed or in progress', { messageSid });
16
+ return;
17
+ }
18
+
19
+ // Guard: avoid duplicate recovery for the same message (race-safe)
20
+ try {
21
+ const claim = await Message.updateOne(
22
+ { message_id: messageSid, 'statusInfo.recoveryStartedAt': { $exists: false } },
23
+ { $set: { 'statusInfo.recoveryStartedAt': new Date() } }
24
+ );
25
+ if (!claim.modifiedCount && !claim.nModified) {
26
+ logger.info('[TemplateRecovery] Recovery already in progress or completed', { messageSid });
27
+ return;
28
+ }
29
+ } catch (claimErr) {
30
+ logger.warn('[TemplateRecovery] Could not set recovery start flag; aborting to avoid duplicates', { messageSid, error: claimErr.message });
31
+ return;
32
+ }
33
+
14
34
  const messaging = getDefaultInstance();
15
35
  const provider = messaging?.getProvider();
16
36
  if (!provider?.createTemplate) return;
@@ -45,8 +65,26 @@ async function handle24HourWindowError(message, messageSid) {
45
65
  const approvalStatus = status?.approvalRequest?.status?.toUpperCase();
46
66
 
47
67
  if (approvalStatus === 'APPROVED') {
48
- await sendMessage({ code: message.numero, contentSid: twilioContent.sid, variables: {} });
49
- logger.info('[TemplateRecovery] Template sent', { messageSid, templateSid: twilioContent.sid });
68
+ const claimSend = await Message.updateOne(
69
+ { message_id: messageSid, 'statusInfo.recoverySentAt': { $exists: false } },
70
+ { $set: { 'statusInfo.recoverySentAt': new Date() } }
71
+ );
72
+
73
+ if (!claimSend.modifiedCount && !claimSend.nModified) {
74
+ logger.info('[TemplateRecovery] Template already sent, skipping duplicate send', { messageSid, templateSid: twilioContent.sid });
75
+ return;
76
+ }
77
+
78
+ try {
79
+ await sendMessage({ code: message.numero, contentSid: twilioContent.sid, variables: {} });
80
+ logger.info('[TemplateRecovery] Template sent', { messageSid, templateSid: twilioContent.sid });
81
+ } catch (sendErr) {
82
+ await Message.updateOne(
83
+ { message_id: messageSid },
84
+ { $unset: { 'statusInfo.recoverySentAt': '' } }
85
+ );
86
+ logger.error('[TemplateRecovery] Error sending approved template', { messageSid, templateSid: twilioContent.sid, error: sendErr.message });
87
+ }
50
88
  } else if (approvalStatus === 'REJECTED') {
51
89
  logger.warn('[TemplateRecovery] Template rejected', { messageSid, templateSid: twilioContent.sid });
52
90
  } else {
@@ -72,7 +72,10 @@ const messageSchema = new mongoose.Schema({
72
72
  },
73
73
  errorCode: { type: String, default: null },
74
74
  errorMessage: { type: String, default: null },
75
- updatedAt: { type: Date, default: null }
75
+ updatedAt: { type: Date, default: null },
76
+ recoveryTemplateSid: { type: String, default: null },
77
+ recoveryStartedAt: { type: Date, default: null },
78
+ recoverySentAt: { type: Date, default: null }
76
79
  },
77
80
  prompt: { type: Object, default: null },
78
81
  response_id: { type: String, default: null }
@@ -7,6 +7,7 @@ const {
7
7
  } = require('./OpenAIResponsesProviderTools');
8
8
  const { DefaultMemoryManager } = require('../memory/DefaultMemoryManager');
9
9
  const { getRecordByFilter } = require('../services/airtableService');
10
+ const { getLastNMessages } = require('../helpers/messageHelper');
10
11
  const { logger } = require('../utils/logger');
11
12
  const { getCurrentMexicoDateTime } = require('../utils/dateUtils');
12
13
 
@@ -203,6 +204,12 @@ class OpenAIResponsesProvider {
203
204
  }
204
205
  });
205
206
 
207
+ const lastMessage = await getLastNMessages(thread.code, 1);
208
+ const metadata = {
209
+ numero: thread.code,
210
+ message_id: message?.message_id || (lastMessage.length > 0 ? lastMessage[0].message_id : null) || null
211
+ };
212
+
206
213
  logger.info('[OpenAIResponsesProvider] Context built', {
207
214
  conversationId,
208
215
  assistantId,
@@ -228,6 +235,7 @@ class OpenAIResponsesProvider {
228
235
  promptVariables,
229
236
  promptVersion,
230
237
  assistant,
238
+ metadata,
231
239
  ...config
232
240
  });
233
241
 
@@ -268,87 +276,130 @@ class OpenAIResponsesProvider {
268
276
  }
269
277
  }
270
278
 
271
- async runConversation({
272
- threadId,
273
- assistantId,
274
- additionalMessages = [],
275
- context = null,
276
- instructions = null,
277
- additionalInstructions = null,
278
- metadata = {},
279
- topP,
280
- temperature,
281
- maxOutputTokens,
282
- truncationStrategy = 'auto',
283
- model,
284
- assistant,
285
- toolMetadata,
286
- promptVersion = null,
287
- promptVariables = null
288
- } = {}) {
289
- try {
290
- let totalRetries = 0;
291
- let allToolsExecuted = [];
292
-
293
- const devRecord = await getRecordByFilter(Config_ID, 'responses', `{prompt_id} = "${assistantId}"`);
294
- let devContent = devRecord?.[0]?.content || '';
295
- if (promptVariables) devContent = devContent.replace(/\{\{(\w+)\}\}/g, (_, key) => promptVariables[key] ?? '');
296
-
297
- const messages = (context || this._convertItemsToApiFormat(additionalMessages))
298
- .filter(item => item.type !== 'function_call' && item.type !== 'function_call_output');
299
- const input = [{ role: 'developer', content: devContent }, ...messages];
300
-
301
- const promptConfig = { id: assistantId };
302
- if (promptVariables) promptConfig.variables = promptVariables;
303
- if (promptVersion) promptConfig.version = String(promptVersion);
304
- logger.info('[OpenAIResponsesProvider] Prompt config', { promptConfig });
305
-
306
- const baseInstructions = instructions || additionalInstructions || '';
307
- const fullInstructions = baseInstructions;
308
- //? `${baseInstructions}\n\n${this.defaults.brevityInstruction}`
309
- //: this.defaults.brevityInstruction;
310
-
311
- const makeAPICall = (inputData) => retryWithBackoff(() =>
312
- this.client.responses.create({
313
- prompt: promptConfig,
314
- input: inputData,
315
- instructions: fullInstructions
316
- }), { providerName: PROVIDER_NAME });
317
-
318
- const { result: response, retries } = await makeAPICall(input);
319
- logger.info('[OpenAIResponsesProvider] Run response', { response });
320
- totalRetries += retries;
321
- let finalResponse = response;
322
-
323
- // Handle function calls following OpenAI pattern
324
- if (assistant && response.output) {
325
- const functionCalls = response.output.filter(item => item.type === 'function_call');
279
+ async runConversation(config = {}) {
280
+ const { threadId, assistantId } = config;
281
+
282
+ const maxRetries = parseInt(process.env.MAX_CONVERSATION_RETRIES || '3', 10);
283
+
284
+ for (let attempt = 1; attempt <= maxRetries; attempt++) {
285
+ try {
286
+ logger.info('[OpenAIResponsesProvider] Conversation attempt', { attempt, maxRetries, threadId, assistantId });
287
+
288
+ const result = await this._executeConversation(config);
289
+
290
+ if (result.output_text && result.output_text.trim().length > 0) {
291
+ logger.info('[OpenAIResponsesProvider] Conversation successful', {
292
+ attempt,
293
+ outputLength: result.output_text.length,
294
+ toolsExecuted: result.tools_executed?.length || 0
295
+ });
296
+ return result;
297
+ }
298
+
299
+ logger.warn('[OpenAIResponsesProvider] Empty output, retrying', { attempt, maxRetries });
326
300
 
327
- if (functionCalls.length > 0) {
328
- const { outputs, toolsExecuted } = await handleFunctionCallsUtil(functionCalls, assistant, toolMetadata || { thread_id: threadId, assistant_id: assistantId });
329
-
330
- input.push(...response.output);
331
- input.push(...outputs);
332
- allToolsExecuted.push(...toolsExecuted);
333
-
334
- const { result: followUp, retries: followUpRetries } = await makeAPICall(input);
335
- totalRetries += followUpRetries;
336
- finalResponse = followUp;
301
+ if (attempt === maxRetries) {
302
+ throw new Error(`Conversation failed after ${attempt} attempts - no valid output generated`);
337
303
  }
304
+
305
+ await new Promise(resolve => setTimeout(resolve, 500));
306
+ } catch (error) {
307
+ logger.error('[OpenAIResponsesProvider] Conversation attempt failed', { attempt, maxRetries, error: error.message });
308
+
309
+ if (attempt === maxRetries) {
310
+ throw error;
311
+ }
312
+
313
+ await new Promise(resolve => setTimeout(resolve, 500));
338
314
  }
315
+ }
316
+ }
339
317
 
340
- return {
341
- ...finalResponse,
342
- thread_id: threadId,
343
- assistant_id: assistantId,
344
- object: finalResponse.object || 'response',
345
- tools_executed: allToolsExecuted,
346
- retries: totalRetries,
347
- };
348
- } catch (error) {
349
- logger.error('[OpenAIResponsesProvider] Error running conversation:', error);
350
- throw error;
318
+ async _executeConversation(config = {}) {
319
+ const {
320
+ threadId,
321
+ assistantId,
322
+ additionalMessages = [],
323
+ context = null,
324
+ instructions = null,
325
+ additionalInstructions = null,
326
+ metadata = {},
327
+ topP,
328
+ temperature,
329
+ maxOutputTokens,
330
+ truncationStrategy = 'auto',
331
+ model,
332
+ assistant,
333
+ toolMetadata,
334
+ promptVersion = null,
335
+ promptVariables = null
336
+ } = config;
337
+
338
+ let totalRetries = 0;
339
+ let allToolsExecuted = [];
340
+
341
+ const devRecord = await getRecordByFilter(Config_ID, 'responses', `{prompt_id} = "${assistantId}"`);
342
+ let devContent = devRecord?.[0]?.content || '';
343
+ if (promptVariables) devContent = devContent.replace(/\{\{(\w+)\}\}/g, (_, key) => promptVariables[key] ?? '');
344
+
345
+ const messages = (context || this._convertItemsToApiFormat(additionalMessages))
346
+ .filter(item => item.type !== 'function_call' && item.type !== 'function_call_output');
347
+ const input = [{ role: 'developer', content: devContent }, ...messages];
348
+
349
+ const promptConfig = { id: assistantId };
350
+ if (promptVariables) promptConfig.variables = promptVariables;
351
+ if (promptVersion) promptConfig.version = String(promptVersion);
352
+
353
+ const baseInstructions = instructions || additionalInstructions || '';
354
+
355
+ const makeAPICall = (inputData) => retryWithBackoff(() =>
356
+ this.client.responses.create({
357
+ prompt: promptConfig,
358
+ input: inputData,
359
+ instructions: baseInstructions,
360
+ metadata: metadata
361
+ }), { providerName: PROVIDER_NAME });
362
+
363
+ const { result: response, retries } = await makeAPICall(input);
364
+ totalRetries += retries;
365
+ let finalResponse = response;
366
+
367
+ // Handle function calls with multi-round support
368
+ if (assistant && response.output) {
369
+ let currentInput = [...input];
370
+ let round = 1;
371
+ const maxRounds = parseInt(process.env.MAX_FUNCTION_ROUNDS || '5', 10);
372
+
373
+ while (round <= maxRounds) {
374
+ const functionCalls = finalResponse.output.filter(item => item.type === 'function_call');
375
+ if (functionCalls.length === 0) break;
376
+
377
+ const { outputs, toolsExecuted } = await handleFunctionCallsUtil(
378
+ functionCalls,
379
+ assistant,
380
+ toolMetadata || { thread_id: threadId, assistant_id: assistantId }
381
+ );
382
+
383
+ currentInput.push(...finalResponse.output);
384
+ currentInput.push(...outputs);
385
+ allToolsExecuted.push(...toolsExecuted);
386
+
387
+ const { result: followUp, retries: followUpRetries } = await makeAPICall(currentInput);
388
+ totalRetries += followUpRetries;
389
+ finalResponse = followUp;
390
+
391
+ round++;
392
+ }
351
393
  }
394
+
395
+ return {
396
+ ...finalResponse,
397
+ thread_id: threadId,
398
+ assistant_id: assistantId,
399
+ object: finalResponse.object || 'response',
400
+ tools_executed: allToolsExecuted,
401
+ retries: totalRetries,
402
+ };
352
403
  }
353
404
 
354
405
  /**
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@peopl-health/nexus",
3
- "version": "3.3.2",
3
+ "version": "3.3.4",
4
4
  "description": "Core messaging and assistant library for WhatsApp communication platforms",
5
5
  "keywords": [
6
6
  "whatsapp",