@peopl-health/nexus 2.4.9 → 2.4.11-logs

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -197,7 +197,10 @@ class BaseAssistant {
197
197
  }
198
198
 
199
199
  try {
200
- const lastMessages = await Message.find({ numero: whatsappId })
200
+ const lastMessages = await Message.find({
201
+ numero: whatsappId,
202
+ interactive_type: { $ne: 'flow' }
203
+ })
201
204
  .sort({ createdAt: -1 })
202
205
  .limit(DEFAULT_MAX_HISTORICAL_MESSAGES);
203
206
 
@@ -1,10 +1,11 @@
1
1
  const mongoose = require('mongoose');
2
- const { fetchConversationData, processConversations } = require('../services/conversationService');
3
- const { sendMessage } = require('../core/NexusMessaging');
4
- const { Thread } = require('../models/threadModel');
5
2
  const llmConfig = require('../config/llmConfig');
6
3
  const { Historial_Clinico_ID } = require('../config/airtableConfig');
4
+ const { Thread } = require('../models/threadModel');
5
+ const { withThreadRecovery } = require('../helpers/threadRecoveryHelper');
7
6
  const { getRecordByFilter } = require('../services/airtableService');
7
+ const { fetchConversationData, processConversations } = require('../services/conversationService');
8
+ const { sendMessage } = require('../core/NexusMessaging');
8
9
  const { logger } = require('../utils/logger');
9
10
 
10
11
  const Message = mongoose.models.Message;
@@ -649,7 +650,7 @@ const getOpenAIThreadMessagesController = async (req, res) => {
649
650
  });
650
651
  }
651
652
 
652
- const thread = await Thread.findOne({
653
+ let thread = await Thread.findOne({
653
654
  code: phoneNumber,
654
655
  active: true
655
656
  }).sort({ createdAt: -1 });
@@ -662,7 +663,7 @@ const getOpenAIThreadMessagesController = async (req, res) => {
662
663
  });
663
664
  }
664
665
 
665
- const conversationId = thread.conversation_id;
666
+ let conversationId = thread.conversation_id;
666
667
  logger.info('Thread found - Conversation ID:', conversationId);
667
668
 
668
669
  const provider = llmConfig.getOpenAIProvider({ instantiate: true, variant });
@@ -683,8 +684,23 @@ const getOpenAIThreadMessagesController = async (req, res) => {
683
684
  logger.info('Including runId:', runId);
684
685
  }
685
686
 
687
+ let messages;
688
+ let threadRecreated = false;
689
+
686
690
  logger.info('Calling listMessages with params:', queryParams);
687
- const messages = await provider.listMessages(queryParams);
691
+ messages = await withThreadRecovery(
692
+ async (currentThread = thread) => {
693
+ if (currentThread !== thread) {
694
+ thread = currentThread;
695
+ conversationId = currentThread.getConversationId();
696
+ queryParams.threadId = conversationId;
697
+ threadRecreated = true;
698
+ }
699
+ return await provider.listMessages(queryParams);
700
+ },
701
+ thread,
702
+ variant
703
+ );
688
704
 
689
705
  logger.info(`Retrieved ${messages?.data?.length || 0} messages from OpenAI`);
690
706
 
@@ -696,6 +712,7 @@ const getOpenAIThreadMessagesController = async (req, res) => {
696
712
  assistantId: thread.assistant_id,
697
713
  messages: messages.data || messages,
698
714
  hasMore: messages.has_more || false,
715
+ threadRecreated,
699
716
  pagination: {
700
717
  limit: parseInt(limit),
701
718
  order
@@ -442,14 +442,17 @@ class NexusMessaging {
442
442
  return;
443
443
  }
444
444
 
445
- const response = await replyAssistant(from, body);
445
+ const result = await replyAssistant(from, body);
446
+ const response = typeof result === 'string' ? result : result?.output;
447
+ const tools_executed = typeof result === 'object' ? result?.tools_executed : undefined;
446
448
 
447
449
  if (response) {
448
450
  await this.sendMessage({
449
451
  code: from,
450
452
  body: response,
451
453
  processed: true,
452
- origin: 'assistant'
454
+ origin: 'assistant',
455
+ tools_executed
453
456
  });
454
457
  }
455
458
  } catch (error) {
@@ -506,14 +509,17 @@ class NexusMessaging {
506
509
  ? body
507
510
  : `Media received (${mediaDescriptor || 'attachment'})`;
508
511
 
509
- const response = await replyAssistant(from, fallbackMessage);
512
+ const result = await replyAssistant(from, fallbackMessage);
513
+ const response = typeof result === 'string' ? result : result?.output;
514
+ const tools_executed = typeof result === 'object' ? result?.tools_executed : undefined;
510
515
 
511
516
  if (response) {
512
517
  await this.sendMessage({
513
518
  code: from,
514
519
  body: response,
515
520
  processed: true,
516
- origin: 'assistant'
521
+ origin: 'assistant',
522
+ tools_executed
517
523
  });
518
524
  }
519
525
  } catch (error) {
@@ -647,13 +653,17 @@ class NexusMessaging {
647
653
  logger.info(`Processing batched messages from ${chatId} (including media if any)`);
648
654
 
649
655
  // Get assistant response
650
- const botResponse = await replyAssistant(chatId);
656
+ const result = await replyAssistant(chatId);
657
+ const botResponse = typeof result === 'string' ? result : result?.output;
658
+ const tools_executed = typeof result === 'object' ? result?.tools_executed : undefined;
659
+
651
660
  if (botResponse) {
652
661
  await this.sendMessage({
653
662
  code: chatId,
654
663
  body: botResponse,
655
664
  processed: true,
656
- origin: 'assistant'
665
+ origin: 'assistant',
666
+ tools_executed
657
667
  });
658
668
  }
659
669
 
@@ -3,6 +3,7 @@ const llmConfig = require('../config/llmConfig.js');
3
3
  const { Thread } = require('../models/threadModel.js');
4
4
  const { createProvider } = require('../providers/createProvider.js');
5
5
  const { withTracing } = require('../utils/tracingDecorator');
6
+ const { withThreadRecovery } = require('./threadRecoveryHelper');
6
7
 
7
8
  const { getRecordByFilter } = require('../services/airtableService.js');
8
9
  const { logger } = require('../utils/logger');
@@ -58,41 +59,52 @@ const runAssistantAndWait = async ({
58
59
  const variant = provider.getVariant ? provider.getVariant() : (process.env.VARIANT || 'assistants');
59
60
  const tools = assistant.getToolSchemas ? assistant.getToolSchemas() : (configTools || []);
60
61
 
61
- const runConfigWithAssistant = variant === 'responses'
62
- ? { ...conversationConfig, assistant }
63
- : conversationConfig;
64
-
65
- let run = await provider.runConversation({
66
- threadId: thread.getConversationId(),
67
- assistantId: thread.getAssistantId(),
68
- tools: tools.length > 0 ? tools : undefined,
69
- ...runConfigWithAssistant,
70
- });
71
-
72
- const filter = thread.code ? { code: thread.code, active: true } : null;
73
- if (filter) {
74
- await Thread.updateOne(filter, { $set: { run_id: run.id } });
75
- }
76
-
77
- const maxRetries = polling?.maxRetries ?? DEFAULT_MAX_RETRIES;
78
- let completed = false;
79
-
80
- try {
81
- logger.info('[runAssistantAndWait] Run started', { runId: run.id, threadId: thread.getConversationId(), assistantId: thread.getAssistantId() });
82
- ({run, completed} = await provider.checkRunStatus(assistant, thread.getConversationId(), run.id, 0, maxRetries));
83
- } finally {
84
- if (filter) {
85
- await Thread.updateOne(filter, { $set: { run_id: null } });
86
- }
87
- }
62
+ return await withThreadRecovery(
63
+ async (currentThread = thread) => {
64
+ const toolMetadata = { numero: currentThread.code, assistant_id: currentThread.getAssistantId() };
65
+ const runConfigWithAssistant = variant === 'responses'
66
+ ? { ...conversationConfig, assistant, toolMetadata }
67
+ : conversationConfig;
68
+
69
+ let run = await provider.runConversation({
70
+ threadId: currentThread.getConversationId(),
71
+ assistantId: currentThread.getAssistantId(),
72
+ tools: tools.length > 0 ? tools : undefined,
73
+ ...runConfigWithAssistant,
74
+ });
75
+
76
+ const filter = currentThread.code ? { code: currentThread.code, active: true } : null;
77
+ if (filter) {
78
+ await Thread.updateOne(filter, { $set: { run_id: run.id } });
79
+ }
80
+
81
+ const maxRetries = polling?.maxRetries ?? DEFAULT_MAX_RETRIES;
82
+ let completed = false;
83
+ let tools_executed = run.tools_executed || [];
84
+
85
+ try {
86
+ logger.info('[runAssistantAndWait] Run started', { runId: run.id, threadId: currentThread.getConversationId(), assistantId: currentThread.getAssistantId() });
87
+ const result = await provider.checkRunStatus(assistant, currentThread.getConversationId(), run.id, 0, maxRetries, false, toolMetadata);
88
+ run = result.run;
89
+ completed = result.completed;
90
+ tools_executed = [...tools_executed, ...(result.tools_executed || [])];
91
+ } finally {
92
+ if (filter) {
93
+ await Thread.updateOne(filter, { $set: { run_id: null } });
94
+ }
95
+ }
88
96
 
89
- if (!completed) {
90
- return { run: run, completed: false, output: '' };
91
- }
97
+ if (!completed) {
98
+ return { run: run, completed: false, output: '', tools_executed };
99
+ }
92
100
 
93
- const output = await provider.getRunText({ threadId: thread.getConversationId(), runId: run.id, fallback: '' });
101
+ const output = await provider.getRunText({ threadId: currentThread.getConversationId(), runId: run.id, fallback: '' });
94
102
 
95
- return { completed: true, output };
103
+ return { completed: true, output, tools_executed };
104
+ },
105
+ thread,
106
+ variant
107
+ );
96
108
  };
97
109
 
98
110
  const executeAssistantAttempt = async (thread, assistant, runConfig, attemptNumber) => {
@@ -113,13 +125,13 @@ const runAssistantWithRetries = async (thread, assistant, runConfig, patientRepl
113
125
  }
114
126
 
115
127
  const startTime = Date.now();
116
- let run, output, completed;
128
+ let run, output, completed, tools_executed;
117
129
  let retries = 0;
118
130
  const maxRetries = DEFAULT_MAX_RETRIES;
119
131
 
120
132
  do {
121
133
  retries++;
122
- ({ run, output, completed } = await withTracing(
134
+ ({ run, output, completed, tools_executed } = await withTracing(
123
135
  executeAssistantAttempt,
124
136
  'assistant_attempt',
125
137
  (thread, assistant, runConfig, attemptNumber) => ({
@@ -143,10 +155,10 @@ const runAssistantWithRetries = async (thread, assistant, runConfig, patientRepl
143
155
  const predictionTimeMs = Date.now() - startTime;
144
156
 
145
157
  if (run?.last_error) logger.warn('[runAssistantWithRetries] Run error', { error: run.last_error });
146
- logger.info('[runAssistantWithRetries] Run completed', { completed, outputLength: output?.length || 0 });
158
+ logger.info('[runAssistantWithRetries] Run completed', { completed, outputLength: output?.length || 0, toolsExecuted: tools_executed?.length || 0 });
147
159
  logger.info('[runAssistantWithRetries] TIMING', { predictionTimeMs, retries });
148
160
 
149
- return { run, output, completed, retries, predictionTimeMs };
161
+ return { run, output, completed, retries, predictionTimeMs, tools_executed };
150
162
  };
151
163
 
152
164
  module.exports = {
@@ -1,6 +1,7 @@
1
1
  const { PDFDocument } = require('pdf-lib');
2
2
  const { execFile } = require('child_process');
3
3
  const fs = require('fs').promises;
4
+ const fsSync = require('fs');
4
5
  const path = require('path');
5
6
  const sharp = require('sharp');
6
7
 
@@ -9,11 +10,11 @@ const { Message } = require('../models/messageModel.js');
9
10
  const { sanitizeFilename } = require('../utils/sanitizer.js');
10
11
  const { logger } = require('../utils/logger');
11
12
 
12
- async function convertPdfToImages(pdfName) {
13
+ async function convertPdfToImages(pdfName, existingPdfPath = null) {
13
14
  const outputDir = path.join(__dirname, 'assets', 'tmp');
14
15
 
15
16
  const sanitizedName = sanitizeFilename(pdfName);
16
- const pdfPath = path.join(outputDir, `${sanitizedName}.pdf`);
17
+ const pdfPath = existingPdfPath || path.join(outputDir, `${sanitizedName}.pdf`);
17
18
  const outputPattern = path.join(outputDir, sanitizedName);
18
19
 
19
20
  await fs.mkdir(outputDir, { recursive: true });
@@ -22,22 +23,52 @@ async function convertPdfToImages(pdfName) {
22
23
  const args = ['-jpeg', pdfPath, outputPattern];
23
24
  logger.info('[convertPdfToImages] Running: pdftoppm', args.join(' '));
24
25
 
25
- execFile('pdftoppm', args, (error, stdout, stderr) => {
26
+ const timeout = 30000;
27
+ let timedOut = false;
28
+
29
+ const child = execFile('pdftoppm', args, { timeout, maxBuffer: 10 * 1024 * 1024 }, (error, stdout, stderr) => {
30
+ if (timedOut) {
31
+ return;
32
+ }
33
+
26
34
  if (error) {
35
+ logger.error('[convertPdfToImages] Error details:', {
36
+ error: error.message,
37
+ stderr,
38
+ pdfPath,
39
+ pdfExists: fsSync.existsSync(pdfPath),
40
+ killed: error.killed,
41
+ signal: error.signal
42
+ });
27
43
  return reject(new Error(`Error splitting PDF: ${stderr || error.message}`));
28
44
  }
29
45
 
30
- fs.readdir(outputDir, (err, files) => {
31
- if (err) {
32
- return reject(new Error(`Error reading output directory: ${err.message}`));
33
- }
46
+ logger.info('[convertPdfToImages] pdftoppm completed successfully');
47
+
48
+ fs.readdir(outputDir)
49
+ .then(files => {
50
+ const jpgFiles = files
51
+ .filter(file => file.startsWith(sanitizedName) && file.endsWith('.jpg'))
52
+ .map(file => path.join(outputDir, file));
53
+
54
+ logger.info(`[convertPdfToImages] Found ${jpgFiles.length} image files`);
55
+ resolve(jpgFiles);
56
+ })
57
+ .catch(err => {
58
+ logger.error('[convertPdfToImages] Error reading output directory:', { error: err.message });
59
+ reject(new Error(`Error reading output directory: ${err.message}`));
60
+ });
61
+ });
34
62
 
35
- const jpgFiles = files
36
- .filter(file => file.startsWith(sanitizedName) && file.endsWith('.jpg'))
37
- .map(file => path.join(outputDir, file));
63
+ const timeoutId = setTimeout(() => {
64
+ timedOut = true;
65
+ child.kill('SIGTERM');
66
+ logger.error('[convertPdfToImages] Process timed out after 30 seconds', { pdfPath });
67
+ reject(new Error('PDF conversion timed out after 30 seconds'));
68
+ }, timeout);
38
69
 
39
- resolve(jpgFiles);
40
- });
70
+ child.on('exit', () => {
71
+ clearTimeout(timeoutId);
41
72
  });
42
73
  });
43
74
  }
@@ -130,44 +161,69 @@ const cleanupFiles = async (files) => {
130
161
  };
131
162
 
132
163
  async function downloadMediaAndCreateFile(code, reply) {
133
- const resultMedia = await Message.findOne({
134
- message_id: reply.message_id,
135
- timestamp: reply.timestamp,
136
- media: { $ne: null }
137
- });
164
+ try {
165
+ const resultMedia = await Message.findOne({
166
+ message_id: reply.message_id,
167
+ timestamp: reply.timestamp,
168
+ media: { $ne: null }
169
+ });
138
170
 
139
- if (!resultMedia) return [];
171
+ if (!resultMedia) return [];
140
172
 
141
- if (!resultMedia.media || !resultMedia.media.key) {
142
- logger.info('[downloadMediaAndCreateFile] No valid media found for message:', reply.message_id);
143
- return [];
144
- }
173
+ if (!resultMedia.media || !resultMedia.media.key) {
174
+ logger.info('[downloadMediaAndCreateFile] No valid media found for message:', reply.message_id);
175
+ return [];
176
+ }
145
177
 
146
- const { bucketName, key } = resultMedia.media;
147
- if (!bucketName || !key) return [];
148
-
149
- const [subType, fileName] = key.split('/');
150
-
151
- const sanitizedCode = sanitizeFilename(code);
152
- const sanitizedSubType = sanitizeFilename(subType);
153
- const sanitizedFileName = sanitizeFilename(fileName);
154
-
155
- const sourceFile = `${sanitizedCode}-${sanitizedSubType}-${sanitizedFileName}`;
156
- const downloadPath = path.join(__dirname, 'assets', 'tmp', sourceFile);
157
-
158
- await fs.mkdir(path.dirname(downloadPath), { recursive: true });
159
- await downloadFileFromS3(bucketName, key, downloadPath);
178
+ const { bucketName, key } = resultMedia.media;
179
+ if (!bucketName || !key) return [];
180
+
181
+ const [subType, fileName] = key.split('/');
182
+
183
+ const sanitizedCode = sanitizeFilename(code, 20);
184
+ const sanitizedSubType = sanitizeFilename(subType, 10);
185
+ const sanitizedFileName = sanitizeFilename(fileName, 50);
186
+
187
+ const sourceFile = `${sanitizedCode}-${sanitizedSubType}-${sanitizedFileName}`;
188
+ const downloadPath = path.join(__dirname, 'assets', 'tmp', sourceFile);
189
+
190
+ logger.info('[downloadMediaAndCreateFile] Downloading file', { sourceFile, downloadPath, bucketName, key });
191
+
192
+ await fs.mkdir(path.dirname(downloadPath), { recursive: true });
193
+ await downloadFileFromS3(bucketName, key, downloadPath);
160
194
 
161
- const { name: baseName } = path.parse(sourceFile);
162
- const fileNames = (subType === 'document' || subType === 'application')
163
- ? await convertPdfToImages(baseName)
164
- : [downloadPath];
195
+ const { name: baseName } = path.parse(sourceFile);
196
+ let fileNames = [];
197
+
198
+ if (subType === 'document' || subType === 'application') {
199
+ try {
200
+ fileNames = await convertPdfToImages(baseName, downloadPath);
201
+ logger.info('[downloadMediaAndCreateFile] PDF converted successfully', { imageCount: fileNames.length });
202
+ } catch (conversionError) {
203
+ logger.error('[downloadMediaAndCreateFile] PDF conversion failed:', {
204
+ error: conversionError.message,
205
+ sourceFile
206
+ });
207
+ fileNames = [];
208
+ } finally {
209
+ try {
210
+ await fs.unlink(downloadPath);
211
+ } catch (unlinkError) {
212
+ logger.warn('[downloadMediaAndCreateFile] Failed to delete PDF:', { error: unlinkError.message });
213
+ }
214
+ }
215
+ } else {
216
+ fileNames = [downloadPath];
217
+ }
165
218
 
166
- if (subType === 'document' || subType === 'application') {
167
- await fs.unlink(downloadPath);
219
+ return fileNames;
220
+ } catch (error) {
221
+ logger.error('[downloadMediaAndCreateFile] Error processing media:', {
222
+ error: error.message,
223
+ message_id: reply.message_id
224
+ });
225
+ return [];
168
226
  }
169
-
170
- return fileNames;
171
227
  }
172
228
 
173
229
  module.exports = {
@@ -1,7 +1,7 @@
1
1
  const llmConfig = require('../config/llmConfig.js');
2
2
  const { logger } = require('../utils/logger');
3
3
  const fs = require('fs');
4
- const mime = require('mime-types');
4
+ const path = require('path');
5
5
 
6
6
 
7
7
  async function analyzeImage(imagePath, isSticker = false, contentType = null) {
@@ -30,14 +30,30 @@ async function analyzeImage(imagePath, isSticker = false, contentType = null) {
30
30
  };
31
31
  }
32
32
 
33
+ // Determine mime type from file extension
33
34
  let mimeType = contentType;
34
35
  if (!mimeType) {
35
- if (imagePath.toLowerCase().endsWith('.webp')) {
36
- mimeType = 'image/webp';
37
- } else {
38
- mimeType = mime.lookup(imagePath) || 'image/jpeg';
39
- }
36
+ const ext = path.extname(imagePath).toLowerCase();
37
+ const mimeMap = {
38
+ '.jpg': 'image/jpeg',
39
+ '.jpeg': 'image/jpeg',
40
+ '.png': 'image/png',
41
+ '.gif': 'image/gif',
42
+ '.webp': 'image/webp'
43
+ };
44
+ mimeType = mimeMap[ext] || 'image/jpeg'; // Default to jpeg for pdftoppm output
45
+ }
46
+
47
+ // Validate that mime type is supported by Claude
48
+ const supportedMimeTypes = ['image/jpeg', 'image/png', 'image/gif', 'image/webp'];
49
+ if (!supportedMimeTypes.includes(mimeType)) {
50
+ logger.warn('[analyzeImage] Unsupported mime type, defaulting to image/jpeg:', {
51
+ originalMimeType: mimeType,
52
+ imagePath
53
+ });
54
+ mimeType = 'image/jpeg';
40
55
  }
56
+
41
57
  if (mimeType === 'image/vnd.wap.wbmp') {
42
58
  logger.info('Skipping image with MIME type:', mimeType);
43
59
  return {
@@ -49,6 +65,7 @@ async function analyzeImage(imagePath, isSticker = false, contentType = null) {
49
65
  };
50
66
  }
51
67
  // Read the image file and convert to base64
68
+ logger.info('[analyzeImage] Reading image file:', { imagePath: imagePath.split('/').pop() });
52
69
  const imageBuffer = await fs.promises.readFile(imagePath);
53
70
  const base64Image = imageBuffer.toString('base64');
54
71
 
@@ -77,6 +94,7 @@ async function analyzeImage(imagePath, isSticker = false, contentType = null) {
77
94
  },
78
95
  ],
79
96
  });
97
+ logger.info('[analyzeImage] Description received');
80
98
  const description = messageDescription.content[0].text;
81
99
 
82
100
  // For stickers, skip medical analysis and table extraction
@@ -5,6 +5,11 @@ const { logger } = require('../utils/logger');
5
5
  const addMessageToThread = async (reply, messagesChat, provider, thread) => {
6
6
  const threadId = thread.getConversationId();
7
7
 
8
+ if (reply.interactive_type === 'flow') {
9
+ logger.info(`[addMessageToThread] Skipping flow message (UI only) - ID: ${reply.message_id}`);
10
+ return;
11
+ }
12
+
8
13
  if (reply.origin === 'whatsapp_platform') {
9
14
  await provider.addMessage({
10
15
  threadId,
@@ -4,6 +4,7 @@ const { analyzeImage } = require('./llmsHelper.js');
4
4
  const { cleanupFiles, downloadMediaAndCreateFile } = require('./filesHelper.js');
5
5
  const { formatMessage } = require('./messageHelper.js');
6
6
  const { sanitizeLogMetadata } = require('../utils/sanitizer.js');
7
+ const { withTracing } = require('../utils/tracingDecorator.js');
7
8
 
8
9
  /**
9
10
  * Structured logging with PHI protection
@@ -56,7 +57,7 @@ const processTextMessage = (reply) => {
56
57
  return messagesChat;
57
58
  };
58
59
 
59
- const processImageFile = async (fileName, reply) => {
60
+ const processImageFileCore = async (fileName, reply) => {
60
61
  let imageAnalysis = null;
61
62
  let url = null;
62
63
  const messagesChat = [];
@@ -66,25 +67,21 @@ const processImageFile = async (fileName, reply) => {
66
67
  fileName.toLowerCase().includes('/sticker/');
67
68
 
68
69
  try {
69
- imageAnalysis = await analyzeImage(fileName, isSticker, reply.media?.contentType);
70
-
71
- logger.info('processImageFile', {
72
- message_id: reply.message_id,
73
- bucketName: reply.media?.bucketName,
74
- key: reply.media?.key,
75
- is_sticker: isSticker,
76
- medical_relevance: imageAnalysis?.medical_relevance,
77
- has_table: imageAnalysis?.has_table,
78
- analysis_type: imageAnalysis?.medical_analysis ? 'medical' : 'general'
79
- });
80
-
81
- logger.debug('processImageFile_analysis', { imageAnalysis });
70
+ imageAnalysis = await withTracing(
71
+ analyzeImage,
72
+ 'analyze_image',
73
+ () => ({ 'image.is_sticker': isSticker, 'image.message_id': reply.message_id })
74
+ )(fileName, isSticker, reply.media?.contentType);
82
75
 
83
76
  const invalidAnalysis = ['NOT_MEDICAL', 'QUALITY_INSUFFICIENT'];
84
77
 
85
78
  // Generate presigned URL only if medically relevant AND not a sticker
86
79
  if (imageAnalysis?.medical_relevance && !isSticker) {
87
- url = await generatePresignedUrl(reply.media.bucketName, reply.media.key);
80
+ url = await withTracing(
81
+ generatePresignedUrl,
82
+ 'generate_presigned_url',
83
+ () => ({ 'url.bucket': reply.media.bucketName })
84
+ )(reply.media.bucketName, reply.media.key);
88
85
  }
89
86
 
90
87
  // Add appropriate text based on analysis
@@ -104,6 +101,17 @@ const processImageFile = async (fileName, reply) => {
104
101
  text: imageAnalysis?.description || 'Image processed',
105
102
  });
106
103
  }
104
+
105
+ logger.info('processImageFile', {
106
+ message_id: reply.message_id,
107
+ is_sticker: isSticker,
108
+ medical_relevance: imageAnalysis?.medical_relevance,
109
+ has_table: imageAnalysis?.has_table,
110
+ analysis_type: imageAnalysis?.medical_analysis ? 'medical' : 'general'
111
+ });
112
+
113
+ logger.debug('processImageFile_analysis', { imageAnalysis });
114
+
107
115
  } catch (error) {
108
116
  logger.error('processImageFile', error, {
109
117
  message_id: reply.message_id,
@@ -119,15 +127,28 @@ const processImageFile = async (fileName, reply) => {
119
127
  return { messagesChat, url };
120
128
  };
121
129
 
122
- const processAudioFile = async (fileName, provider) => {
130
+ const processImageFile = withTracing(
131
+ processImageFileCore,
132
+ 'process_image_file',
133
+ (fileName, reply) => ({
134
+ 'image.message_id': reply.message_id,
135
+ 'image.has_media': !!reply.media
136
+ })
137
+ );
138
+
139
+ const processAudioFileCore = async (fileName, provider) => {
123
140
  const messagesChat = [];
124
141
 
125
142
  try {
126
- const audioTranscript = await provider.transcribeAudio({
127
- file: fs.createReadStream(fileName),
128
- responseFormat: 'text',
129
- language: 'es'
130
- });
143
+ const audioTranscript = await withTracing(
144
+ async () => provider.transcribeAudio({
145
+ file: fs.createReadStream(fileName),
146
+ responseFormat: 'text',
147
+ language: 'es'
148
+ }),
149
+ 'transcribe_audio',
150
+ () => ({ 'audio.file_name': fileName ? fileName.split('/').pop().replace(/^[^-]+-[^-]+-/, 'xxx-xxx-') : 'unknown' })
151
+ )();
131
152
 
132
153
  const transcriptText = audioTranscript?.text || audioTranscript;
133
154
  messagesChat.push({
@@ -156,7 +177,15 @@ const processAudioFile = async (fileName, provider) => {
156
177
  return messagesChat;
157
178
  };
158
179
 
159
- const processMediaFiles = async (code, reply, provider) => {
180
+ const processAudioFile = withTracing(
181
+ processAudioFileCore,
182
+ 'process_audio_file',
183
+ (fileName) => ({
184
+ 'audio.file_name': fileName ? fileName.split('/').pop().replace(/^[^-]+-[^-]+-/, 'xxx-xxx-') : 'unknown'
185
+ })
186
+ );
187
+
188
+ const processMediaFilesCore = async (code, reply, provider) => {
160
189
  let url = null;
161
190
  const messagesChat = [];
162
191
  const tempFiles = [];
@@ -165,22 +194,16 @@ const processMediaFiles = async (code, reply, provider) => {
165
194
  return { messagesChat, url, tempFiles };
166
195
  }
167
196
 
168
- logger.info('processMediaFiles', {
169
- message_id: reply.message_id,
170
- processing_media: true
171
- });
172
-
173
- const fileNames = await downloadMediaAndCreateFile(code, reply);
197
+ const fileNames = await withTracing(
198
+ downloadMediaAndCreateFile,
199
+ 'download_media',
200
+ () => ({ 'media.message_id': reply.message_id, 'media.type': reply.media?.mediaType })
201
+ )(code, reply);
174
202
  tempFiles.push(...fileNames);
175
203
 
176
204
  for (const fileName of fileNames) {
177
205
  const safeFileName = fileName ? fileName.split('/').pop().replace(/^[^-]+-[^-]+-/, 'xxx-xxx-') : 'unknown';
178
206
 
179
- logger.info('processMediaFiles_file', {
180
- message_id: reply.message_id,
181
- fileName: safeFileName
182
- });
183
-
184
207
  // Skip only WBMP files (unsupported format)
185
208
  if (fileName.toLowerCase().includes('.wbmp')) {
186
209
  logger.info('processMediaFiles_skip', {
@@ -209,21 +232,35 @@ const processMediaFiles = async (code, reply, provider) => {
209
232
  }
210
233
  }
211
234
 
235
+ logger.info('processMediaFiles_complete', {
236
+ message_id: reply.message_id,
237
+ file_count: fileNames.length
238
+ });
239
+
212
240
  return { messagesChat, url, tempFiles };
213
241
  };
214
242
 
215
- const processThreadMessage = async (code, replies, provider) => {
243
+ const processMediaFiles = withTracing(
244
+ processMediaFilesCore,
245
+ 'process_media_files',
246
+ (code, reply) => ({
247
+ 'media.message_id': reply.message_id,
248
+ 'media.is_media': reply.is_media
249
+ })
250
+ );
251
+
252
+ const processThreadMessageCore = async (code, replies, provider) => {
216
253
  const replyArray = Array.isArray(replies) ? replies : [replies];
217
254
 
218
255
  const results = await Promise.all(
219
256
  replyArray.map(async (reply, i) => {
220
257
  let tempFiles = [];
258
+
221
259
  try {
222
260
  const isPatient = reply.origin === 'patient';
223
- const [textMessages, mediaResult] = await Promise.all([
224
- Promise.resolve(processTextMessage(reply)),
225
- processMediaFiles(code, reply, provider)
226
- ]);
261
+
262
+ const textMessages = processTextMessage(reply);
263
+ const mediaResult = await processMediaFiles(code, reply, provider);
227
264
 
228
265
  const { messagesChat: mediaMessages, url, tempFiles: mediaFiles } = mediaResult;
229
266
  tempFiles = mediaFiles;
@@ -235,22 +272,39 @@ const processThreadMessage = async (code, replies, provider) => {
235
272
  logger.info('processThreadMessage', {
236
273
  index: i + 1,
237
274
  total: replyArray.length,
238
- isPatient,
239
- hasUrl: !!url
275
+ isPatient,
276
+ hasMedia: reply.is_media,
277
+ hasUrl: !!url
240
278
  });
241
279
 
242
280
  return { isPatient, url, messages, reply, tempFiles };
243
281
  } catch (error) {
244
- logger.error('processThreadMessage', error, { message_id: reply.message_id, origin: reply.origin });
282
+ logger.error('processThreadMessage', error, {
283
+ message_id: reply.message_id,
284
+ origin: reply.origin
285
+ });
245
286
  await cleanupFiles(tempFiles);
246
287
  return { isPatient: false, url: null, messages: [], reply, tempFiles: [] };
247
288
  }
248
289
  })
249
290
  );
250
291
 
292
+ logger.info('processThreadMessage_complete', {
293
+ message_count: replyArray.length
294
+ });
295
+
251
296
  return results;
252
297
  };
253
298
 
299
+ const processThreadMessage = withTracing(
300
+ processThreadMessageCore,
301
+ 'process_thread_messages',
302
+ (code, replies) => ({
303
+ 'messages.count': Array.isArray(replies) ? replies.length : 1,
304
+ 'thread.code': code
305
+ })
306
+ );
307
+
254
308
  module.exports = {
255
309
  processTextMessage,
256
310
  processImageFile,
@@ -0,0 +1,58 @@
1
+ const { Thread } = require('../models/threadModel');
2
+ const { createProvider } = require('../providers/createProvider');
3
+ const { logger } = require('../utils/logger');
4
+
5
+ const isThreadNotFoundError = (error) => {
6
+ return error?.status === 404 ||
7
+ error?.code === 'thread_not_found' ||
8
+ error?.code === 'invalid_thread_id' ||
9
+ (error?.message && (
10
+ error.message.includes('No thread found') ||
11
+ error.message.includes('thread does not exist') ||
12
+ error.message.includes('Invalid thread')
13
+ ));
14
+ };
15
+
16
+ const recreateThread = async (thread, variant = 'assistants') => {
17
+ const provider = createProvider({ variant });
18
+ const newConversation = await provider.createConversation({
19
+ metadata: { phoneNumber: thread.code, recreated: true }
20
+ });
21
+
22
+ await Thread.updateOne(
23
+ { code: thread.code, active: true },
24
+ { $set: { conversation_id: newConversation.id } }
25
+ );
26
+
27
+ const updatedThread = await Thread.findOne({ code: thread.code, active: true });
28
+ logger.info('[threadRecovery] Thread recreated', {
29
+ oldId: thread.conversation_id,
30
+ newId: newConversation.id,
31
+ code: thread.code
32
+ });
33
+
34
+ return updatedThread;
35
+ };
36
+
37
+ const withThreadRecovery = async (operation, thread, variant = 'assistants') => {
38
+ try {
39
+ return await operation();
40
+ } catch (error) {
41
+ if (isThreadNotFoundError(error) && thread) {
42
+ logger.warn('[threadRecovery] Thread not found, recreating', {
43
+ conversationId: thread.getConversationId?.() || thread.conversation_id,
44
+ code: thread.code
45
+ });
46
+
47
+ const recoveredThread = await recreateThread(thread, variant);
48
+ return await operation(recoveredThread);
49
+ }
50
+ throw error;
51
+ }
52
+ };
53
+
54
+ module.exports = {
55
+ isThreadNotFoundError,
56
+ recreateThread,
57
+ withThreadRecovery
58
+ };
@@ -32,6 +32,15 @@ const messageSchema = new mongoose.Schema({
32
32
  type: String,
33
33
  enum: ['whatsapp_platform', 'assistant', 'patient'],
34
34
  default: 'whatsapp_platform' },
35
+ tools_executed: [{
36
+ tool_name: { type: String, required: true },
37
+ tool_arguments: { type: Object, default: null },
38
+ tool_output: { type: Object, default: null },
39
+ execution_time_ms: { type: Number, default: null },
40
+ success: { type: Boolean, default: true },
41
+ call_id: { type: String, default: null },
42
+ executed_at: { type: Date, default: Date.now }
43
+ }],
35
44
  media: {
36
45
  contentType: { type: String, default: null },
37
46
  bucketName: { type: String, default: null },
@@ -108,6 +117,7 @@ async function insertMessage(values) {
108
117
  content_sid: values.content_sid || null,
109
118
  clinical_context: clinical_context,
110
119
  origin: values.origin,
120
+ tools_executed: values.tools_executed || [],
111
121
  raw: values.raw || null
112
122
  };
113
123
 
@@ -280,19 +280,23 @@ class OpenAIResponsesProvider {
280
280
  tools = [],
281
281
  model,
282
282
  assistant,
283
+ toolMetadata,
283
284
  } = {}) {
284
285
  try {
285
286
  const id = this._ensurethreadId(threadId);
286
287
  const messages = this._responseInput(additionalMessages) || [];
287
288
 
288
- // Check for pending function calls in the conversation before creating a new response
289
+ const execMetadata = toolMetadata || { thread_id: id, assistant_id: assistantId };
290
+ let toolsExecuted = [];
291
+
289
292
  if (assistant && toolOutputs.length === 0) {
290
293
  try {
291
294
  const conversationMessages = await this.listMessages({ threadId: id, order: 'desc', limit: 50 });
292
295
  const items = conversationMessages?.data || [];
293
- const pendingOutputs = await handlePendingFunctionCallsUtil(assistant, items);
294
- if (pendingOutputs.length > 0) {
295
- toolOutputs = pendingOutputs;
296
+ const result = await handlePendingFunctionCallsUtil(assistant, items, execMetadata);
297
+ if (result.outputs && result.outputs.length > 0) {
298
+ toolOutputs = result.outputs;
299
+ toolsExecuted = result.toolsExecuted || [];
296
300
  }
297
301
  } catch (error) {
298
302
  logger.warn('[OpenAIResponsesProvider] Error checking for pending function calls:', error?.message);
@@ -329,6 +333,7 @@ class OpenAIResponsesProvider {
329
333
  thread_id: id,
330
334
  assistant_id: assistantId,
331
335
  object: response.object || 'response',
336
+ tools_executed: toolsExecuted,
332
337
  };
333
338
  } catch (error) {
334
339
  logger.error('[OpenAIResponsesProvider] Error running conversation:', error);
@@ -423,27 +428,31 @@ class OpenAIResponsesProvider {
423
428
  return await handleRequiresActionUtil(assistant, run);
424
429
  }
425
430
 
426
- async checkRunStatus(assistant, thread_id, run_id, retryCount = 0, maxRetries = DEFAULT_MAX_RETRIES, actionHandled = false) {
431
+ async checkRunStatus(assistant, thread_id, run_id, retryCount = 0, maxRetries = DEFAULT_MAX_RETRIES, actionHandled = false, toolMetadata = {}, accumulatedTools = []) {
427
432
  try {
428
433
  let run = await this.getRun({ threadId: thread_id, runId: run_id });
429
434
  logger.info(`Status: ${run.status} ${thread_id} ${run_id} (attempt ${retryCount + 1})`);
430
435
 
431
436
  if (run.status === 'completed') {
432
- return {run, completed: true};
437
+ return {run, completed: true, tools_executed: accumulatedTools};
433
438
  }
434
439
 
435
440
  if (run.status === 'failed' || run.status === 'cancelled' || run.status === 'expired') {
436
- return {run, completed: false};
441
+ return {run, completed: false, tools_executed: accumulatedTools};
437
442
  }
438
443
 
439
444
  const needsFunctionCall = run.output?.some(item => item.type === 'function_call');
440
445
  if (needsFunctionCall && !actionHandled) {
441
446
  if (retryCount >= maxRetries) {
442
447
  logger.warn('[OpenAIResponsesProvider] Max retries reached while handling function calls');
443
- return {run, completed: false};
448
+ return {run, completed: false, tools_executed: accumulatedTools};
444
449
  }
445
450
 
446
- const outputs = await handleRequiresActionUtil(assistant, run);
451
+ const execMetadata = { ...toolMetadata, thread_id, run_id };
452
+ const result = await handleRequiresActionUtil(assistant, run, execMetadata);
453
+ const outputs = result.outputs || [];
454
+ const toolsExecuted = result.toolsExecuted || [];
455
+
447
456
  logger.info('[OpenAIResponsesProvider] Function call outputs:', outputs);
448
457
 
449
458
  if (outputs.length > 0) {
@@ -456,30 +465,30 @@ class OpenAIResponsesProvider {
456
465
 
457
466
  await new Promise(resolve => setTimeout(resolve, 1000));
458
467
 
459
- return this.checkRunStatus(assistant, thread_id, run_id, retryCount + 1, maxRetries, true);
468
+ return this.checkRunStatus(assistant, thread_id, run_id, retryCount + 1, maxRetries, true, toolMetadata, [...accumulatedTools, ...toolsExecuted]);
460
469
  } catch (submitError) {
461
470
  logger.error('[OpenAIResponsesProvider] Error submitting tool outputs:', submitError);
462
471
  if (retryCount < maxRetries) {
463
472
  await new Promise(resolve => setTimeout(resolve, 2000));
464
- return this.checkRunStatus(assistant, thread_id, run_id, retryCount + 1, maxRetries, false);
473
+ return this.checkRunStatus(assistant, thread_id, run_id, retryCount + 1, maxRetries, false, toolMetadata, accumulatedTools);
465
474
  }
466
- return {run, completed: false};
475
+ return {run, completed: false, tools_executed: accumulatedTools};
467
476
  }
468
477
  } else {
469
478
  logger.warn('[OpenAIResponsesProvider] Function calls detected but no outputs generated');
470
- return {run, completed: false};
479
+ return {run, completed: false, tools_executed: accumulatedTools};
471
480
  }
472
481
  }
473
482
 
474
483
  if (retryCount < maxRetries) {
475
484
  await new Promise(resolve => setTimeout(resolve, 1000));
476
- return this.checkRunStatus(assistant, thread_id, run_id, retryCount + 1, maxRetries, actionHandled);
485
+ return this.checkRunStatus(assistant, thread_id, run_id, retryCount + 1, maxRetries, actionHandled, toolMetadata, accumulatedTools);
477
486
  }
478
487
 
479
- return {run, completed: false};
488
+ return {run, completed: false, tools_executed: accumulatedTools};
480
489
  } catch (error) {
481
490
  logger.error('[OpenAIResponsesProvider] Error checking run status:', error);
482
- return {run: null, completed: false};
491
+ return {run: null, completed: false, tools_executed: accumulatedTools};
483
492
  }
484
493
  }
485
494
 
@@ -1,27 +1,52 @@
1
1
  const { logger } = require('../utils/logger');
2
2
 
3
- /**
4
- * Execute a function call and return the output format
5
- * @param {Object} assistant - The assistant instance with executeTool method
6
- * @param {Object} call - The function call object with name, arguments, and call_id
7
- * @returns {Promise<Object>} Function call output in Responses API format
8
- */
9
- async function executeFunctionCall(assistant, call) {
3
+ async function executeFunctionCall(assistant, call, metadata = {}) {
4
+ const startTime = Date.now();
10
5
  try {
11
6
  const name = call.name;
12
7
  const args = call.arguments ? JSON.parse(call.arguments) : {};
13
8
  const result = await assistant.executeTool(name, args);
14
- return {
15
- type: 'function_call_output',
9
+ const executionTime = Date.now() - startTime;
10
+
11
+ const toolData = {
12
+ tool_name: name,
13
+ tool_arguments: args,
14
+ tool_output: result,
15
+ execution_time_ms: executionTime,
16
+ success: true,
16
17
  call_id: call.call_id,
17
- output: typeof result === 'string' ? result : JSON.stringify(result)
18
+ executed_at: new Date()
19
+ };
20
+
21
+ return {
22
+ functionOutput: {
23
+ type: 'function_call_output',
24
+ call_id: call.call_id,
25
+ output: typeof result === 'string' ? result : JSON.stringify(result)
26
+ },
27
+ toolData
18
28
  };
19
29
  } catch (error) {
30
+ const executionTime = Date.now() - startTime;
31
+
32
+ const toolData = {
33
+ tool_name: call.name,
34
+ tool_arguments: call.arguments ? JSON.parse(call.arguments) : {},
35
+ tool_output: { error: error?.message || 'Tool execution failed' },
36
+ execution_time_ms: executionTime,
37
+ success: false,
38
+ call_id: call.call_id,
39
+ executed_at: new Date()
40
+ };
41
+
20
42
  logger.error('[OpenAIResponsesProvider] Tool execution failed', error);
21
43
  return {
22
- type: 'function_call_output',
23
- call_id: call.call_id,
24
- output: JSON.stringify({ success: false, error: error?.message || 'Tool execution failed' })
44
+ functionOutput: {
45
+ type: 'function_call_output',
46
+ call_id: call.call_id,
47
+ output: JSON.stringify({ success: false, error: error?.message || 'Tool execution failed' })
48
+ },
49
+ toolData
25
50
  };
26
51
  }
27
52
  }
@@ -32,7 +57,7 @@ async function executeFunctionCall(assistant, call) {
32
57
  * @param {Array} conversationItems - Array of conversation items
33
58
  * @returns {Promise<Array>} Array of function call outputs
34
59
  */
35
- async function handlePendingFunctionCalls(assistant, conversationItems) {
60
+ async function handlePendingFunctionCalls(assistant, conversationItems, metadata = {}) {
36
61
  const pendingFunctionCalls = conversationItems.filter(item => item.type === 'function_call');
37
62
  const functionOutputs = conversationItems.filter(item => item.type === 'function_call_output');
38
63
 
@@ -41,15 +66,20 @@ async function handlePendingFunctionCalls(assistant, conversationItems) {
41
66
  );
42
67
 
43
68
  if (orphanedCalls.length === 0) {
44
- return [];
69
+ return { outputs: [], toolsExecuted: [] };
45
70
  }
46
71
 
47
72
  logger.info(`[OpenAIResponsesProvider] Found ${orphanedCalls.length} pending function calls, handling them...`);
48
73
  const outputs = [];
74
+ const toolsExecuted = [];
75
+
49
76
  for (const call of orphanedCalls) {
50
- outputs.push(await executeFunctionCall(assistant, call));
77
+ const result = await executeFunctionCall(assistant, call, metadata);
78
+ outputs.push(result.functionOutput);
79
+ toolsExecuted.push(result.toolData);
51
80
  }
52
- return outputs;
81
+
82
+ return { outputs, toolsExecuted };
53
83
  }
54
84
 
55
85
  /**
@@ -58,18 +88,22 @@ async function handlePendingFunctionCalls(assistant, conversationItems) {
58
88
  * @param {Object} run - The run object with output array
59
89
  * @returns {Promise<Array>} Array of function call outputs
60
90
  */
61
- async function handleRequiresAction(assistant, run) {
91
+ async function handleRequiresAction(assistant, run, metadata = {}) {
62
92
  const functionCalls = run.output?.filter(item => item.type === 'function_call') || [];
63
93
  if (functionCalls.length === 0) {
64
- return [];
94
+ return { outputs: [], toolsExecuted: [] };
65
95
  }
66
96
 
67
97
  const outputs = [];
98
+ const toolsExecuted = [];
99
+
68
100
  for (const call of functionCalls) {
69
- outputs.push(await executeFunctionCall(assistant, call));
101
+ const result = await executeFunctionCall(assistant, call, metadata);
102
+ outputs.push(result.functionOutput);
103
+ toolsExecuted.push(result.toolData);
70
104
  }
71
105
 
72
- return outputs;
106
+ return { outputs, toolsExecuted };
73
107
  }
74
108
 
75
109
  /**
@@ -14,6 +14,7 @@ const { getThread, getThreadInfo } = require('../helpers/threadHelper.js');
14
14
  const { withTracing } = require('../utils/tracingDecorator.js');
15
15
  const { processThreadMessage } = require('../helpers/processHelper.js');
16
16
  const { getLastMessages, updateMessageRecord } = require('../helpers/messageHelper.js');
17
+ const { withThreadRecovery } = require('../helpers/threadRecoveryHelper.js');
17
18
  const { combineImagesToPDF, cleanupFiles } = require('../helpers/filesHelper.js');
18
19
  const { logger } = require('../utils/logger');
19
20
 
@@ -137,13 +138,19 @@ const getAssistantById = (assistant_id, thread) => {
137
138
  const createAssistant = async (code, assistant_id, messages=[], force=false) => {
138
139
  const findThread = await Thread.findOne({ code: code });
139
140
  logger.info('[createAssistant] findThread', findThread);
140
- if (findThread && findThread.getConversationId()) {
141
+ if (findThread && findThread.getConversationId() && !force) {
141
142
  logger.info('[createAssistant] Thread already exists');
142
143
  const updateFields = { active: true, stopped: false };
143
144
  Thread.setAssistantId(updateFields, assistant_id);
144
145
  await Thread.updateOne({ code: code }, { $set: updateFields });
145
146
  return findThread;
146
147
  }
148
+
149
+ if (force && findThread?.getConversationId()) {
150
+ const provider = createProvider({ variant: process.env.VARIANT || 'assistants' });
151
+ await provider.deleteConversation(findThread.getConversationId());
152
+ logger.info('[createAssistant] Deleted old conversation, will create new one');
153
+ }
147
154
 
148
155
  const curRow = await getCurRow(Historial_Clinico_ID, code);
149
156
  logger.info('[createAssistant] curRow', curRow[0]);
@@ -153,7 +160,6 @@ const createAssistant = async (code, assistant_id, messages=[], force=false) =>
153
160
  const assistant = getAssistantById(assistant_id, null);
154
161
  const initialThread = await assistant.create(code, curRow[0]);
155
162
 
156
- // Add new messages to memory
157
163
  const provider = createProvider({ variant: process.env.VARIANT || 'assistants' });
158
164
  for (const message of messages) {
159
165
  await provider.addMessage({
@@ -177,29 +183,32 @@ const createAssistant = async (code, assistant_id, messages=[], force=false) =>
177
183
  const updatedThread = await Thread.findOneAndUpdate(condition, {run_id: null, ...thread}, options);
178
184
  logger.info('[createAssistant] Updated thread:', updatedThread);
179
185
 
180
- // Delete previous thread
181
- if (force) {
182
- await provider.deleteConversation(findThread.getConversationId());
183
- }
184
-
185
186
  return thread;
186
187
  };
187
188
 
188
189
  const addMsgAssistant = async (code, inMessages, role = 'user', reply = false) => {
189
190
  try {
190
- const thread = await Thread.findOne({ code: code });
191
+ let thread = await Thread.findOne({ code: code });
191
192
  logger.info(thread);
192
193
  if (thread === null) return null;
193
194
 
194
195
  const provider = createProvider({ variant: process.env.VARIANT || 'assistants' });
195
- for (const message of inMessages) {
196
- logger.info(message);
197
- await provider.addMessage({
198
- threadId: thread.getConversationId(),
199
- role: role,
200
- content: message
201
- });
202
- }
196
+
197
+ await withThreadRecovery(
198
+ async (recoveredThread = thread) => {
199
+ thread = recoveredThread;
200
+ for (const message of inMessages) {
201
+ logger.info(message);
202
+ await provider.addMessage({
203
+ threadId: thread.getConversationId(),
204
+ role: role,
205
+ content: message
206
+ });
207
+ }
208
+ },
209
+ thread,
210
+ process.env.VARIANT || 'assistants'
211
+ );
203
212
 
204
213
  if (!reply) return null;
205
214
 
@@ -322,9 +331,15 @@ const replyAssistantCore = async (code, message_ = null, thread_ = null, runOpti
322
331
  const allTempFiles = processResults.flatMap(r => r.tempFiles || []);
323
332
 
324
333
  if (allMessagesToAdd.length > 0) {
325
- const threadId = finalThread.getConversationId();
326
334
  logger.info(`[replyAssistantCore] Adding ${allMessagesToAdd.length} messages to thread in batch`);
327
- await provider.addMessage({ threadId, messages: allMessagesToAdd });
335
+ await withThreadRecovery(
336
+ async (thread = finalThread) => {
337
+ const threadId = thread.getConversationId();
338
+ await provider.addMessage({ threadId, messages: allMessagesToAdd });
339
+ },
340
+ finalThread,
341
+ process.env.VARIANT || 'assistants'
342
+ );
328
343
  }
329
344
 
330
345
  await Promise.all(processResults.map(r => updateMessageRecord(r.reply, finalThread)));
@@ -374,14 +389,15 @@ const replyAssistantCore = async (code, message_ = null, thread_ = null, runOpti
374
389
  timings.run_assistant_ms = runAssistantMs;
375
390
  timings.total_ms = Date.now() - startTotal;
376
391
 
377
- const { run, output, completed, retries, predictionTimeMs } = runResult;
392
+ const { run, output, completed, retries, predictionTimeMs, tools_executed } = runResult;
378
393
 
379
394
  logger.info('[Assistant Reply Complete]', {
380
395
  code: code ? `${code.substring(0, 3)}***${code.slice(-4)}` : 'unknown',
381
396
  messageCount: patientReply.length,
382
397
  hasMedia: urls.length > 0,
383
398
  retries,
384
- totalMs: timings.total_ms
399
+ totalMs: timings.total_ms,
400
+ toolsExecuted: tools_executed?.length || 0
385
401
  });
386
402
 
387
403
  if (output && predictionTimeMs) {
@@ -397,7 +413,7 @@ const replyAssistantCore = async (code, message_ = null, thread_ = null, runOpti
397
413
  }).catch(err => logger.error('[replyAssistantCore] Failed to store metrics:', err));
398
414
  }
399
415
 
400
- return output;
416
+ return { output, tools_executed };
401
417
  };
402
418
 
403
419
  const replyAssistant = withTracing(
@@ -186,7 +186,8 @@ class MongoStorage {
186
186
  content_sid: messageData.contentSid || null,
187
187
  template_variables: messageData.variables ? JSON.stringify(messageData.variables) : null,
188
188
  raw: messageData.raw || null,
189
- origin
189
+ origin,
190
+ tools_executed: messageData.tools_executed || []
190
191
  };
191
192
  }
192
193
 
@@ -198,8 +198,11 @@ class MessageParser {
198
198
  return interactive.title || interactive.description || '[List item selected]';
199
199
 
200
200
  case 'flow':
201
- // Flows contain complex JSON data that doesn't need assistant processing
202
- return '';
201
+ if (interactive.data) {
202
+ const flowData = typeof interactive.data === 'string' ? interactive.data : JSON.stringify(interactive.data, null, 2);
203
+ return `Flow Response:\n${flowData}`;
204
+ }
205
+ return '[Flow response]';
203
206
 
204
207
  default:
205
208
  return '[Interactive message]';
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@peopl-health/nexus",
3
- "version": "2.4.9",
3
+ "version": "2.4.11-logs",
4
4
  "description": "Core messaging and assistant library for WhatsApp communication platforms",
5
5
  "keywords": [
6
6
  "whatsapp",