@peopl-health/nexus 2.4.10 → 2.4.11-logs-msg
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/assistants/BaseAssistant.js +4 -1
- package/lib/core/NexusMessaging.js +16 -6
- package/lib/helpers/assistantHelper.js +16 -11
- package/lib/helpers/messageHelper.js +5 -0
- package/lib/helpers/processHelper.js +157 -50
- package/lib/models/messageModel.js +10 -0
- package/lib/providers/OpenAIResponsesProvider.js +25 -16
- package/lib/providers/OpenAIResponsesProviderTools.js +55 -21
- package/lib/services/assistantService.js +24 -4
- package/lib/storage/MongoStorage.js +2 -1
- package/lib/utils/messageParser.js +5 -2
- package/package.json +1 -1
|
@@ -197,7 +197,10 @@ class BaseAssistant {
|
|
|
197
197
|
}
|
|
198
198
|
|
|
199
199
|
try {
|
|
200
|
-
const lastMessages = await Message.find({
|
|
200
|
+
const lastMessages = await Message.find({
|
|
201
|
+
numero: whatsappId,
|
|
202
|
+
interactive_type: { $ne: 'flow' }
|
|
203
|
+
})
|
|
201
204
|
.sort({ createdAt: -1 })
|
|
202
205
|
.limit(DEFAULT_MAX_HISTORICAL_MESSAGES);
|
|
203
206
|
|
|
@@ -442,14 +442,17 @@ class NexusMessaging {
|
|
|
442
442
|
return;
|
|
443
443
|
}
|
|
444
444
|
|
|
445
|
-
const
|
|
445
|
+
const result = await replyAssistant(from, body);
|
|
446
|
+
const response = typeof result === 'string' ? result : result?.output;
|
|
447
|
+
const tools_executed = typeof result === 'object' ? result?.tools_executed : undefined;
|
|
446
448
|
|
|
447
449
|
if (response) {
|
|
448
450
|
await this.sendMessage({
|
|
449
451
|
code: from,
|
|
450
452
|
body: response,
|
|
451
453
|
processed: true,
|
|
452
|
-
origin: 'assistant'
|
|
454
|
+
origin: 'assistant',
|
|
455
|
+
tools_executed
|
|
453
456
|
});
|
|
454
457
|
}
|
|
455
458
|
} catch (error) {
|
|
@@ -506,14 +509,17 @@ class NexusMessaging {
|
|
|
506
509
|
? body
|
|
507
510
|
: `Media received (${mediaDescriptor || 'attachment'})`;
|
|
508
511
|
|
|
509
|
-
const
|
|
512
|
+
const result = await replyAssistant(from, fallbackMessage);
|
|
513
|
+
const response = typeof result === 'string' ? result : result?.output;
|
|
514
|
+
const tools_executed = typeof result === 'object' ? result?.tools_executed : undefined;
|
|
510
515
|
|
|
511
516
|
if (response) {
|
|
512
517
|
await this.sendMessage({
|
|
513
518
|
code: from,
|
|
514
519
|
body: response,
|
|
515
520
|
processed: true,
|
|
516
|
-
origin: 'assistant'
|
|
521
|
+
origin: 'assistant',
|
|
522
|
+
tools_executed
|
|
517
523
|
});
|
|
518
524
|
}
|
|
519
525
|
} catch (error) {
|
|
@@ -647,13 +653,17 @@ class NexusMessaging {
|
|
|
647
653
|
logger.info(`Processing batched messages from ${chatId} (including media if any)`);
|
|
648
654
|
|
|
649
655
|
// Get assistant response
|
|
650
|
-
const
|
|
656
|
+
const result = await replyAssistant(chatId);
|
|
657
|
+
const botResponse = typeof result === 'string' ? result : result?.output;
|
|
658
|
+
const tools_executed = typeof result === 'object' ? result?.tools_executed : undefined;
|
|
659
|
+
|
|
651
660
|
if (botResponse) {
|
|
652
661
|
await this.sendMessage({
|
|
653
662
|
code: chatId,
|
|
654
663
|
body: botResponse,
|
|
655
664
|
processed: true,
|
|
656
|
-
origin: 'assistant'
|
|
665
|
+
origin: 'assistant',
|
|
666
|
+
tools_executed
|
|
657
667
|
});
|
|
658
668
|
}
|
|
659
669
|
|
|
@@ -59,12 +59,13 @@ const runAssistantAndWait = async ({
|
|
|
59
59
|
const variant = provider.getVariant ? provider.getVariant() : (process.env.VARIANT || 'assistants');
|
|
60
60
|
const tools = assistant.getToolSchemas ? assistant.getToolSchemas() : (configTools || []);
|
|
61
61
|
|
|
62
|
-
const runConfigWithAssistant = variant === 'responses'
|
|
63
|
-
? { ...conversationConfig, assistant }
|
|
64
|
-
: conversationConfig;
|
|
65
|
-
|
|
66
62
|
return await withThreadRecovery(
|
|
67
63
|
async (currentThread = thread) => {
|
|
64
|
+
const toolMetadata = { numero: currentThread.code, assistant_id: currentThread.getAssistantId() };
|
|
65
|
+
const runConfigWithAssistant = variant === 'responses'
|
|
66
|
+
? { ...conversationConfig, assistant, toolMetadata }
|
|
67
|
+
: conversationConfig;
|
|
68
|
+
|
|
68
69
|
let run = await provider.runConversation({
|
|
69
70
|
threadId: currentThread.getConversationId(),
|
|
70
71
|
assistantId: currentThread.getAssistantId(),
|
|
@@ -79,10 +80,14 @@ const runAssistantAndWait = async ({
|
|
|
79
80
|
|
|
80
81
|
const maxRetries = polling?.maxRetries ?? DEFAULT_MAX_RETRIES;
|
|
81
82
|
let completed = false;
|
|
83
|
+
let tools_executed = run.tools_executed || [];
|
|
82
84
|
|
|
83
85
|
try {
|
|
84
86
|
logger.info('[runAssistantAndWait] Run started', { runId: run.id, threadId: currentThread.getConversationId(), assistantId: currentThread.getAssistantId() });
|
|
85
|
-
|
|
87
|
+
const result = await provider.checkRunStatus(assistant, currentThread.getConversationId(), run.id, 0, maxRetries, false, toolMetadata);
|
|
88
|
+
run = result.run;
|
|
89
|
+
completed = result.completed;
|
|
90
|
+
tools_executed = [...tools_executed, ...(result.tools_executed || [])];
|
|
86
91
|
} finally {
|
|
87
92
|
if (filter) {
|
|
88
93
|
await Thread.updateOne(filter, { $set: { run_id: null } });
|
|
@@ -90,12 +95,12 @@ const runAssistantAndWait = async ({
|
|
|
90
95
|
}
|
|
91
96
|
|
|
92
97
|
if (!completed) {
|
|
93
|
-
return { run: run, completed: false, output: '' };
|
|
98
|
+
return { run: run, completed: false, output: '', tools_executed };
|
|
94
99
|
}
|
|
95
100
|
|
|
96
101
|
const output = await provider.getRunText({ threadId: currentThread.getConversationId(), runId: run.id, fallback: '' });
|
|
97
102
|
|
|
98
|
-
return { completed: true, output };
|
|
103
|
+
return { completed: true, output, tools_executed };
|
|
99
104
|
},
|
|
100
105
|
thread,
|
|
101
106
|
variant
|
|
@@ -120,13 +125,13 @@ const runAssistantWithRetries = async (thread, assistant, runConfig, patientRepl
|
|
|
120
125
|
}
|
|
121
126
|
|
|
122
127
|
const startTime = Date.now();
|
|
123
|
-
let run, output, completed;
|
|
128
|
+
let run, output, completed, tools_executed;
|
|
124
129
|
let retries = 0;
|
|
125
130
|
const maxRetries = DEFAULT_MAX_RETRIES;
|
|
126
131
|
|
|
127
132
|
do {
|
|
128
133
|
retries++;
|
|
129
|
-
({ run, output, completed } = await withTracing(
|
|
134
|
+
({ run, output, completed, tools_executed } = await withTracing(
|
|
130
135
|
executeAssistantAttempt,
|
|
131
136
|
'assistant_attempt',
|
|
132
137
|
(thread, assistant, runConfig, attemptNumber) => ({
|
|
@@ -150,10 +155,10 @@ const runAssistantWithRetries = async (thread, assistant, runConfig, patientRepl
|
|
|
150
155
|
const predictionTimeMs = Date.now() - startTime;
|
|
151
156
|
|
|
152
157
|
if (run?.last_error) logger.warn('[runAssistantWithRetries] Run error', { error: run.last_error });
|
|
153
|
-
logger.info('[runAssistantWithRetries] Run completed', { completed, outputLength: output?.length || 0 });
|
|
158
|
+
logger.info('[runAssistantWithRetries] Run completed', { completed, outputLength: output?.length || 0, toolsExecuted: tools_executed?.length || 0 });
|
|
154
159
|
logger.info('[runAssistantWithRetries] TIMING', { predictionTimeMs, retries });
|
|
155
160
|
|
|
156
|
-
return { run, output, completed, retries, predictionTimeMs };
|
|
161
|
+
return { run, output, completed, retries, predictionTimeMs, tools_executed };
|
|
157
162
|
};
|
|
158
163
|
|
|
159
164
|
module.exports = {
|
|
@@ -5,6 +5,11 @@ const { logger } = require('../utils/logger');
|
|
|
5
5
|
const addMessageToThread = async (reply, messagesChat, provider, thread) => {
|
|
6
6
|
const threadId = thread.getConversationId();
|
|
7
7
|
|
|
8
|
+
if (reply.interactive_type === 'flow') {
|
|
9
|
+
logger.info(`[addMessageToThread] Skipping flow message (UI only) - ID: ${reply.message_id}`);
|
|
10
|
+
return;
|
|
11
|
+
}
|
|
12
|
+
|
|
8
13
|
if (reply.origin === 'whatsapp_platform') {
|
|
9
14
|
await provider.addMessage({
|
|
10
15
|
threadId,
|
|
@@ -4,6 +4,7 @@ const { analyzeImage } = require('./llmsHelper.js');
|
|
|
4
4
|
const { cleanupFiles, downloadMediaAndCreateFile } = require('./filesHelper.js');
|
|
5
5
|
const { formatMessage } = require('./messageHelper.js');
|
|
6
6
|
const { sanitizeLogMetadata } = require('../utils/sanitizer.js');
|
|
7
|
+
const { withTracing } = require('../utils/tracingDecorator.js');
|
|
7
8
|
|
|
8
9
|
/**
|
|
9
10
|
* Structured logging with PHI protection
|
|
@@ -56,35 +57,41 @@ const processTextMessage = (reply) => {
|
|
|
56
57
|
return messagesChat;
|
|
57
58
|
};
|
|
58
59
|
|
|
59
|
-
const
|
|
60
|
+
const processImageFileCore = async (fileName, reply) => {
|
|
60
61
|
let imageAnalysis = null;
|
|
61
62
|
let url = null;
|
|
62
63
|
const messagesChat = [];
|
|
64
|
+
const timings = {
|
|
65
|
+
analysis_ms: 0,
|
|
66
|
+
url_generation_ms: 0
|
|
67
|
+
};
|
|
63
68
|
|
|
64
69
|
const isSticker = reply.media?.mediaType === 'sticker' ||
|
|
65
70
|
fileName.toLowerCase().includes('sticker/') ||
|
|
66
71
|
fileName.toLowerCase().includes('/sticker/');
|
|
67
72
|
|
|
68
73
|
try {
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
message_id: reply.message_id,
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
has_table: imageAnalysis?.has_table,
|
|
78
|
-
analysis_type: imageAnalysis?.medical_analysis ? 'medical' : 'general'
|
|
79
|
-
});
|
|
80
|
-
|
|
81
|
-
logger.debug('processImageFile_analysis', { imageAnalysis });
|
|
74
|
+
const { result: analysis, duration: analysisDuration } = await withTracing(
|
|
75
|
+
analyzeImage,
|
|
76
|
+
'analyze_image',
|
|
77
|
+
() => ({ 'image.is_sticker': isSticker, 'image.message_id': reply.message_id }),
|
|
78
|
+
{ returnTiming: true }
|
|
79
|
+
)(fileName, isSticker, reply.media?.contentType);
|
|
80
|
+
imageAnalysis = analysis;
|
|
81
|
+
timings.analysis_ms = analysisDuration;
|
|
82
82
|
|
|
83
83
|
const invalidAnalysis = ['NOT_MEDICAL', 'QUALITY_INSUFFICIENT'];
|
|
84
84
|
|
|
85
85
|
// Generate presigned URL only if medically relevant AND not a sticker
|
|
86
86
|
if (imageAnalysis?.medical_relevance && !isSticker) {
|
|
87
|
-
|
|
87
|
+
const { result: presignedUrl, duration: urlDuration } = await withTracing(
|
|
88
|
+
generatePresignedUrl,
|
|
89
|
+
'generate_presigned_url',
|
|
90
|
+
() => ({ 'url.bucket': reply.media.bucketName }),
|
|
91
|
+
{ returnTiming: true }
|
|
92
|
+
)(reply.media.bucketName, reply.media.key);
|
|
93
|
+
url = presignedUrl;
|
|
94
|
+
timings.url_generation_ms = urlDuration;
|
|
88
95
|
}
|
|
89
96
|
|
|
90
97
|
// Add appropriate text based on analysis
|
|
@@ -104,6 +111,18 @@ const processImageFile = async (fileName, reply) => {
|
|
|
104
111
|
text: imageAnalysis?.description || 'Image processed',
|
|
105
112
|
});
|
|
106
113
|
}
|
|
114
|
+
|
|
115
|
+
logger.info('processImageFile', {
|
|
116
|
+
message_id: reply.message_id,
|
|
117
|
+
is_sticker: isSticker,
|
|
118
|
+
medical_relevance: imageAnalysis?.medical_relevance,
|
|
119
|
+
has_table: imageAnalysis?.has_table,
|
|
120
|
+
analysis_type: imageAnalysis?.medical_analysis ? 'medical' : 'general',
|
|
121
|
+
...timings
|
|
122
|
+
});
|
|
123
|
+
|
|
124
|
+
logger.debug('processImageFile_analysis', { imageAnalysis });
|
|
125
|
+
|
|
107
126
|
} catch (error) {
|
|
108
127
|
logger.error('processImageFile', error, {
|
|
109
128
|
message_id: reply.message_id,
|
|
@@ -116,18 +135,36 @@ const processImageFile = async (fileName, reply) => {
|
|
|
116
135
|
});
|
|
117
136
|
}
|
|
118
137
|
|
|
119
|
-
return { messagesChat, url };
|
|
138
|
+
return { messagesChat, url, timings };
|
|
120
139
|
};
|
|
121
140
|
|
|
122
|
-
const
|
|
141
|
+
const processImageFile = withTracing(
|
|
142
|
+
processImageFileCore,
|
|
143
|
+
'process_image_file',
|
|
144
|
+
(fileName, reply) => ({
|
|
145
|
+
'image.message_id': reply.message_id,
|
|
146
|
+
'image.has_media': !!reply.media
|
|
147
|
+
})
|
|
148
|
+
);
|
|
149
|
+
|
|
150
|
+
const processAudioFileCore = async (fileName, provider) => {
|
|
123
151
|
const messagesChat = [];
|
|
152
|
+
const timings = {
|
|
153
|
+
transcribe_ms: 0
|
|
154
|
+
};
|
|
124
155
|
|
|
125
156
|
try {
|
|
126
|
-
const audioTranscript = await
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
157
|
+
const { result: audioTranscript, duration: transcribeDuration } = await withTracing(
|
|
158
|
+
async () => provider.transcribeAudio({
|
|
159
|
+
file: fs.createReadStream(fileName),
|
|
160
|
+
responseFormat: 'text',
|
|
161
|
+
language: 'es'
|
|
162
|
+
}),
|
|
163
|
+
'transcribe_audio',
|
|
164
|
+
() => ({ 'audio.file_name': fileName ? fileName.split('/').pop().replace(/^[^-]+-[^-]+-/, 'xxx-xxx-') : 'unknown' }),
|
|
165
|
+
{ returnTiming: true }
|
|
166
|
+
)();
|
|
167
|
+
timings.transcribe_ms = transcribeDuration;
|
|
131
168
|
|
|
132
169
|
const transcriptText = audioTranscript?.text || audioTranscript;
|
|
133
170
|
messagesChat.push({
|
|
@@ -138,7 +175,8 @@ const processAudioFile = async (fileName, provider) => {
|
|
|
138
175
|
logger.info('processAudioFile', {
|
|
139
176
|
fileName: fileName ? fileName.split('/').pop().replace(/^[^-]+-[^-]+-/, 'xxx-xxx-') : 'unknown',
|
|
140
177
|
transcription_success: true,
|
|
141
|
-
transcript_length: transcriptText?.length || 0
|
|
178
|
+
transcript_length: transcriptText?.length || 0,
|
|
179
|
+
...timings
|
|
142
180
|
});
|
|
143
181
|
|
|
144
182
|
logger.debug('processAudioFile_transcript', { transcriptText });
|
|
@@ -153,34 +191,44 @@ const processAudioFile = async (fileName, provider) => {
|
|
|
153
191
|
});
|
|
154
192
|
}
|
|
155
193
|
|
|
156
|
-
return messagesChat;
|
|
194
|
+
return { messagesChat, timings };
|
|
157
195
|
};
|
|
158
196
|
|
|
159
|
-
const
|
|
197
|
+
const processAudioFile = withTracing(
|
|
198
|
+
processAudioFileCore,
|
|
199
|
+
'process_audio_file',
|
|
200
|
+
(fileName) => ({
|
|
201
|
+
'audio.file_name': fileName ? fileName.split('/').pop().replace(/^[^-]+-[^-]+-/, 'xxx-xxx-') : 'unknown'
|
|
202
|
+
})
|
|
203
|
+
);
|
|
204
|
+
|
|
205
|
+
const processMediaFilesCore = async (code, reply, provider) => {
|
|
160
206
|
let url = null;
|
|
161
207
|
const messagesChat = [];
|
|
162
208
|
const tempFiles = [];
|
|
209
|
+
const timings = {
|
|
210
|
+
download_ms: 0,
|
|
211
|
+
image_analysis_ms: 0,
|
|
212
|
+
audio_transcription_ms: 0,
|
|
213
|
+
url_generation_ms: 0
|
|
214
|
+
};
|
|
163
215
|
|
|
164
216
|
if (!reply.is_media) {
|
|
165
|
-
return { messagesChat, url, tempFiles };
|
|
217
|
+
return { messagesChat, url, tempFiles, timings };
|
|
166
218
|
}
|
|
167
219
|
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
220
|
+
const { result: fileNames, duration: downloadDuration } = await withTracing(
|
|
221
|
+
downloadMediaAndCreateFile,
|
|
222
|
+
'download_media',
|
|
223
|
+
() => ({ 'media.message_id': reply.message_id, 'media.type': reply.media?.mediaType }),
|
|
224
|
+
{ returnTiming: true }
|
|
225
|
+
)(code, reply);
|
|
226
|
+
timings.download_ms = downloadDuration;
|
|
174
227
|
tempFiles.push(...fileNames);
|
|
175
228
|
|
|
176
229
|
for (const fileName of fileNames) {
|
|
177
230
|
const safeFileName = fileName ? fileName.split('/').pop().replace(/^[^-]+-[^-]+-/, 'xxx-xxx-') : 'unknown';
|
|
178
231
|
|
|
179
|
-
logger.info('processMediaFiles_file', {
|
|
180
|
-
message_id: reply.message_id,
|
|
181
|
-
fileName: safeFileName
|
|
182
|
-
});
|
|
183
|
-
|
|
184
232
|
// Skip only WBMP files (unsupported format)
|
|
185
233
|
if (fileName.toLowerCase().includes('.wbmp')) {
|
|
186
234
|
logger.info('processMediaFiles_skip', {
|
|
@@ -200,34 +248,75 @@ const processMediaFiles = async (code, reply, provider) => {
|
|
|
200
248
|
fileName.toLowerCase().includes('/sticker/');
|
|
201
249
|
|
|
202
250
|
if (isImageLike) {
|
|
203
|
-
const { messagesChat: imageMessages, url: imageUrl } = await processImageFile(fileName, reply);
|
|
251
|
+
const { messagesChat: imageMessages, url: imageUrl, timings: imageTimings } = await processImageFile(fileName, reply);
|
|
252
|
+
|
|
204
253
|
messagesChat.push(...imageMessages);
|
|
205
254
|
if (imageUrl) url = imageUrl;
|
|
255
|
+
|
|
256
|
+
if (imageTimings) {
|
|
257
|
+
timings.image_analysis_ms += imageTimings.analysis_ms || 0;
|
|
258
|
+
timings.url_generation_ms += imageTimings.url_generation_ms || 0;
|
|
259
|
+
}
|
|
206
260
|
} else if (fileName.includes('audio')) {
|
|
207
|
-
const audioMessages = await processAudioFile(fileName, provider);
|
|
261
|
+
const { messagesChat: audioMessages, timings: audioTimings } = await processAudioFile(fileName, provider);
|
|
262
|
+
|
|
208
263
|
messagesChat.push(...audioMessages);
|
|
264
|
+
|
|
265
|
+
if (audioTimings) {
|
|
266
|
+
timings.audio_transcription_ms += audioTimings.transcribe_ms || 0;
|
|
267
|
+
}
|
|
209
268
|
}
|
|
210
269
|
}
|
|
211
270
|
|
|
212
|
-
|
|
271
|
+
logger.info('processMediaFiles_complete', {
|
|
272
|
+
message_id: reply.message_id,
|
|
273
|
+
file_count: fileNames.length,
|
|
274
|
+
...timings
|
|
275
|
+
});
|
|
276
|
+
|
|
277
|
+
return { messagesChat, url, tempFiles, timings };
|
|
213
278
|
};
|
|
214
279
|
|
|
215
|
-
const
|
|
280
|
+
const processMediaFiles = withTracing(
|
|
281
|
+
processMediaFilesCore,
|
|
282
|
+
'process_media_files',
|
|
283
|
+
(code, reply) => ({
|
|
284
|
+
'media.message_id': reply.message_id,
|
|
285
|
+
'media.is_media': reply.is_media
|
|
286
|
+
})
|
|
287
|
+
);
|
|
288
|
+
|
|
289
|
+
const processThreadMessageCore = async (code, replies, provider) => {
|
|
216
290
|
const replyArray = Array.isArray(replies) ? replies : [replies];
|
|
291
|
+
const timings = {
|
|
292
|
+
download_ms: 0,
|
|
293
|
+
image_analysis_ms: 0,
|
|
294
|
+
audio_transcription_ms: 0,
|
|
295
|
+
url_generation_ms: 0,
|
|
296
|
+
total_media_ms: 0
|
|
297
|
+
};
|
|
217
298
|
|
|
218
299
|
const results = await Promise.all(
|
|
219
300
|
replyArray.map(async (reply, i) => {
|
|
220
301
|
let tempFiles = [];
|
|
302
|
+
|
|
221
303
|
try {
|
|
222
304
|
const isPatient = reply.origin === 'patient';
|
|
223
|
-
const [textMessages, mediaResult] = await Promise.all([
|
|
224
|
-
Promise.resolve(processTextMessage(reply)),
|
|
225
|
-
processMediaFiles(code, reply, provider)
|
|
226
|
-
]);
|
|
227
305
|
|
|
228
|
-
const
|
|
306
|
+
const textMessages = processTextMessage(reply);
|
|
307
|
+
const mediaResult = await processMediaFiles(code, reply, provider);
|
|
308
|
+
|
|
309
|
+
const { messagesChat: mediaMessages, url, tempFiles: mediaFiles, timings: mediaTimings } = mediaResult;
|
|
229
310
|
tempFiles = mediaFiles;
|
|
230
311
|
|
|
312
|
+
if (mediaTimings) {
|
|
313
|
+
timings.download_ms += mediaTimings.download_ms || 0;
|
|
314
|
+
timings.image_analysis_ms += mediaTimings.image_analysis_ms || 0;
|
|
315
|
+
timings.audio_transcription_ms += mediaTimings.audio_transcription_ms || 0;
|
|
316
|
+
timings.url_generation_ms += mediaTimings.url_generation_ms || 0;
|
|
317
|
+
timings.total_media_ms += (mediaTimings.download_ms + mediaTimings.image_analysis_ms + mediaTimings.audio_transcription_ms + mediaTimings.url_generation_ms);
|
|
318
|
+
}
|
|
319
|
+
|
|
231
320
|
const allMessages = [...textMessages, ...mediaMessages];
|
|
232
321
|
const role = reply.origin === 'patient' ? 'user' : 'assistant';
|
|
233
322
|
const messages = allMessages.map(content => ({ role, content }));
|
|
@@ -235,22 +324,40 @@ const processThreadMessage = async (code, replies, provider) => {
|
|
|
235
324
|
logger.info('processThreadMessage', {
|
|
236
325
|
index: i + 1,
|
|
237
326
|
total: replyArray.length,
|
|
238
|
-
isPatient,
|
|
239
|
-
|
|
327
|
+
isPatient,
|
|
328
|
+
hasMedia: reply.is_media,
|
|
329
|
+
hasUrl: !!url
|
|
240
330
|
});
|
|
241
331
|
|
|
242
332
|
return { isPatient, url, messages, reply, tempFiles };
|
|
243
333
|
} catch (error) {
|
|
244
|
-
logger.error('processThreadMessage', error, {
|
|
334
|
+
logger.error('processThreadMessage', error, {
|
|
335
|
+
message_id: reply.message_id,
|
|
336
|
+
origin: reply.origin
|
|
337
|
+
});
|
|
245
338
|
await cleanupFiles(tempFiles);
|
|
246
339
|
return { isPatient: false, url: null, messages: [], reply, tempFiles: [] };
|
|
247
340
|
}
|
|
248
341
|
})
|
|
249
342
|
);
|
|
250
343
|
|
|
251
|
-
|
|
344
|
+
logger.info('processThreadMessage_complete', {
|
|
345
|
+
message_count: replyArray.length,
|
|
346
|
+
...timings
|
|
347
|
+
});
|
|
348
|
+
|
|
349
|
+
return { results, timings };
|
|
252
350
|
};
|
|
253
351
|
|
|
352
|
+
const processThreadMessage = withTracing(
|
|
353
|
+
processThreadMessageCore,
|
|
354
|
+
'process_thread_messages',
|
|
355
|
+
(code, replies) => ({
|
|
356
|
+
'messages.count': Array.isArray(replies) ? replies.length : 1,
|
|
357
|
+
'thread.code': code
|
|
358
|
+
})
|
|
359
|
+
);
|
|
360
|
+
|
|
254
361
|
module.exports = {
|
|
255
362
|
processTextMessage,
|
|
256
363
|
processImageFile,
|
|
@@ -32,6 +32,15 @@ const messageSchema = new mongoose.Schema({
|
|
|
32
32
|
type: String,
|
|
33
33
|
enum: ['whatsapp_platform', 'assistant', 'patient'],
|
|
34
34
|
default: 'whatsapp_platform' },
|
|
35
|
+
tools_executed: [{
|
|
36
|
+
tool_name: { type: String, required: true },
|
|
37
|
+
tool_arguments: { type: Object, default: null },
|
|
38
|
+
tool_output: { type: Object, default: null },
|
|
39
|
+
execution_time_ms: { type: Number, default: null },
|
|
40
|
+
success: { type: Boolean, default: true },
|
|
41
|
+
call_id: { type: String, default: null },
|
|
42
|
+
executed_at: { type: Date, default: Date.now }
|
|
43
|
+
}],
|
|
35
44
|
media: {
|
|
36
45
|
contentType: { type: String, default: null },
|
|
37
46
|
bucketName: { type: String, default: null },
|
|
@@ -108,6 +117,7 @@ async function insertMessage(values) {
|
|
|
108
117
|
content_sid: values.content_sid || null,
|
|
109
118
|
clinical_context: clinical_context,
|
|
110
119
|
origin: values.origin,
|
|
120
|
+
tools_executed: values.tools_executed || [],
|
|
111
121
|
raw: values.raw || null
|
|
112
122
|
};
|
|
113
123
|
|
|
@@ -280,19 +280,23 @@ class OpenAIResponsesProvider {
|
|
|
280
280
|
tools = [],
|
|
281
281
|
model,
|
|
282
282
|
assistant,
|
|
283
|
+
toolMetadata,
|
|
283
284
|
} = {}) {
|
|
284
285
|
try {
|
|
285
286
|
const id = this._ensurethreadId(threadId);
|
|
286
287
|
const messages = this._responseInput(additionalMessages) || [];
|
|
287
288
|
|
|
288
|
-
|
|
289
|
+
const execMetadata = toolMetadata || { thread_id: id, assistant_id: assistantId };
|
|
290
|
+
let toolsExecuted = [];
|
|
291
|
+
|
|
289
292
|
if (assistant && toolOutputs.length === 0) {
|
|
290
293
|
try {
|
|
291
294
|
const conversationMessages = await this.listMessages({ threadId: id, order: 'desc', limit: 50 });
|
|
292
295
|
const items = conversationMessages?.data || [];
|
|
293
|
-
const
|
|
294
|
-
if (
|
|
295
|
-
toolOutputs =
|
|
296
|
+
const result = await handlePendingFunctionCallsUtil(assistant, items, execMetadata);
|
|
297
|
+
if (result.outputs && result.outputs.length > 0) {
|
|
298
|
+
toolOutputs = result.outputs;
|
|
299
|
+
toolsExecuted = result.toolsExecuted || [];
|
|
296
300
|
}
|
|
297
301
|
} catch (error) {
|
|
298
302
|
logger.warn('[OpenAIResponsesProvider] Error checking for pending function calls:', error?.message);
|
|
@@ -329,6 +333,7 @@ class OpenAIResponsesProvider {
|
|
|
329
333
|
thread_id: id,
|
|
330
334
|
assistant_id: assistantId,
|
|
331
335
|
object: response.object || 'response',
|
|
336
|
+
tools_executed: toolsExecuted,
|
|
332
337
|
};
|
|
333
338
|
} catch (error) {
|
|
334
339
|
logger.error('[OpenAIResponsesProvider] Error running conversation:', error);
|
|
@@ -423,27 +428,31 @@ class OpenAIResponsesProvider {
|
|
|
423
428
|
return await handleRequiresActionUtil(assistant, run);
|
|
424
429
|
}
|
|
425
430
|
|
|
426
|
-
async checkRunStatus(assistant, thread_id, run_id, retryCount = 0, maxRetries = DEFAULT_MAX_RETRIES, actionHandled = false) {
|
|
431
|
+
async checkRunStatus(assistant, thread_id, run_id, retryCount = 0, maxRetries = DEFAULT_MAX_RETRIES, actionHandled = false, toolMetadata = {}, accumulatedTools = []) {
|
|
427
432
|
try {
|
|
428
433
|
let run = await this.getRun({ threadId: thread_id, runId: run_id });
|
|
429
434
|
logger.info(`Status: ${run.status} ${thread_id} ${run_id} (attempt ${retryCount + 1})`);
|
|
430
435
|
|
|
431
436
|
if (run.status === 'completed') {
|
|
432
|
-
return {run, completed: true};
|
|
437
|
+
return {run, completed: true, tools_executed: accumulatedTools};
|
|
433
438
|
}
|
|
434
439
|
|
|
435
440
|
if (run.status === 'failed' || run.status === 'cancelled' || run.status === 'expired') {
|
|
436
|
-
return {run, completed: false};
|
|
441
|
+
return {run, completed: false, tools_executed: accumulatedTools};
|
|
437
442
|
}
|
|
438
443
|
|
|
439
444
|
const needsFunctionCall = run.output?.some(item => item.type === 'function_call');
|
|
440
445
|
if (needsFunctionCall && !actionHandled) {
|
|
441
446
|
if (retryCount >= maxRetries) {
|
|
442
447
|
logger.warn('[OpenAIResponsesProvider] Max retries reached while handling function calls');
|
|
443
|
-
return {run, completed: false};
|
|
448
|
+
return {run, completed: false, tools_executed: accumulatedTools};
|
|
444
449
|
}
|
|
445
450
|
|
|
446
|
-
const
|
|
451
|
+
const execMetadata = { ...toolMetadata, thread_id, run_id };
|
|
452
|
+
const result = await handleRequiresActionUtil(assistant, run, execMetadata);
|
|
453
|
+
const outputs = result.outputs || [];
|
|
454
|
+
const toolsExecuted = result.toolsExecuted || [];
|
|
455
|
+
|
|
447
456
|
logger.info('[OpenAIResponsesProvider] Function call outputs:', outputs);
|
|
448
457
|
|
|
449
458
|
if (outputs.length > 0) {
|
|
@@ -456,30 +465,30 @@ class OpenAIResponsesProvider {
|
|
|
456
465
|
|
|
457
466
|
await new Promise(resolve => setTimeout(resolve, 1000));
|
|
458
467
|
|
|
459
|
-
return this.checkRunStatus(assistant, thread_id, run_id, retryCount + 1, maxRetries, true);
|
|
468
|
+
return this.checkRunStatus(assistant, thread_id, run_id, retryCount + 1, maxRetries, true, toolMetadata, [...accumulatedTools, ...toolsExecuted]);
|
|
460
469
|
} catch (submitError) {
|
|
461
470
|
logger.error('[OpenAIResponsesProvider] Error submitting tool outputs:', submitError);
|
|
462
471
|
if (retryCount < maxRetries) {
|
|
463
472
|
await new Promise(resolve => setTimeout(resolve, 2000));
|
|
464
|
-
return this.checkRunStatus(assistant, thread_id, run_id, retryCount + 1, maxRetries, false);
|
|
473
|
+
return this.checkRunStatus(assistant, thread_id, run_id, retryCount + 1, maxRetries, false, toolMetadata, accumulatedTools);
|
|
465
474
|
}
|
|
466
|
-
return {run, completed: false};
|
|
475
|
+
return {run, completed: false, tools_executed: accumulatedTools};
|
|
467
476
|
}
|
|
468
477
|
} else {
|
|
469
478
|
logger.warn('[OpenAIResponsesProvider] Function calls detected but no outputs generated');
|
|
470
|
-
return {run, completed: false};
|
|
479
|
+
return {run, completed: false, tools_executed: accumulatedTools};
|
|
471
480
|
}
|
|
472
481
|
}
|
|
473
482
|
|
|
474
483
|
if (retryCount < maxRetries) {
|
|
475
484
|
await new Promise(resolve => setTimeout(resolve, 1000));
|
|
476
|
-
return this.checkRunStatus(assistant, thread_id, run_id, retryCount + 1, maxRetries, actionHandled);
|
|
485
|
+
return this.checkRunStatus(assistant, thread_id, run_id, retryCount + 1, maxRetries, actionHandled, toolMetadata, accumulatedTools);
|
|
477
486
|
}
|
|
478
487
|
|
|
479
|
-
return {run, completed: false};
|
|
488
|
+
return {run, completed: false, tools_executed: accumulatedTools};
|
|
480
489
|
} catch (error) {
|
|
481
490
|
logger.error('[OpenAIResponsesProvider] Error checking run status:', error);
|
|
482
|
-
return {run: null, completed: false};
|
|
491
|
+
return {run: null, completed: false, tools_executed: accumulatedTools};
|
|
483
492
|
}
|
|
484
493
|
}
|
|
485
494
|
|
|
@@ -1,27 +1,52 @@
|
|
|
1
1
|
const { logger } = require('../utils/logger');
|
|
2
2
|
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
* @param {Object} assistant - The assistant instance with executeTool method
|
|
6
|
-
* @param {Object} call - The function call object with name, arguments, and call_id
|
|
7
|
-
* @returns {Promise<Object>} Function call output in Responses API format
|
|
8
|
-
*/
|
|
9
|
-
async function executeFunctionCall(assistant, call) {
|
|
3
|
+
async function executeFunctionCall(assistant, call, metadata = {}) {
|
|
4
|
+
const startTime = Date.now();
|
|
10
5
|
try {
|
|
11
6
|
const name = call.name;
|
|
12
7
|
const args = call.arguments ? JSON.parse(call.arguments) : {};
|
|
13
8
|
const result = await assistant.executeTool(name, args);
|
|
14
|
-
|
|
15
|
-
|
|
9
|
+
const executionTime = Date.now() - startTime;
|
|
10
|
+
|
|
11
|
+
const toolData = {
|
|
12
|
+
tool_name: name,
|
|
13
|
+
tool_arguments: args,
|
|
14
|
+
tool_output: result,
|
|
15
|
+
execution_time_ms: executionTime,
|
|
16
|
+
success: true,
|
|
16
17
|
call_id: call.call_id,
|
|
17
|
-
|
|
18
|
+
executed_at: new Date()
|
|
19
|
+
};
|
|
20
|
+
|
|
21
|
+
return {
|
|
22
|
+
functionOutput: {
|
|
23
|
+
type: 'function_call_output',
|
|
24
|
+
call_id: call.call_id,
|
|
25
|
+
output: typeof result === 'string' ? result : JSON.stringify(result)
|
|
26
|
+
},
|
|
27
|
+
toolData
|
|
18
28
|
};
|
|
19
29
|
} catch (error) {
|
|
30
|
+
const executionTime = Date.now() - startTime;
|
|
31
|
+
|
|
32
|
+
const toolData = {
|
|
33
|
+
tool_name: call.name,
|
|
34
|
+
tool_arguments: call.arguments ? JSON.parse(call.arguments) : {},
|
|
35
|
+
tool_output: { error: error?.message || 'Tool execution failed' },
|
|
36
|
+
execution_time_ms: executionTime,
|
|
37
|
+
success: false,
|
|
38
|
+
call_id: call.call_id,
|
|
39
|
+
executed_at: new Date()
|
|
40
|
+
};
|
|
41
|
+
|
|
20
42
|
logger.error('[OpenAIResponsesProvider] Tool execution failed', error);
|
|
21
43
|
return {
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
44
|
+
functionOutput: {
|
|
45
|
+
type: 'function_call_output',
|
|
46
|
+
call_id: call.call_id,
|
|
47
|
+
output: JSON.stringify({ success: false, error: error?.message || 'Tool execution failed' })
|
|
48
|
+
},
|
|
49
|
+
toolData
|
|
25
50
|
};
|
|
26
51
|
}
|
|
27
52
|
}
|
|
@@ -32,7 +57,7 @@ async function executeFunctionCall(assistant, call) {
|
|
|
32
57
|
* @param {Array} conversationItems - Array of conversation items
|
|
33
58
|
* @returns {Promise<Array>} Array of function call outputs
|
|
34
59
|
*/
|
|
35
|
-
async function handlePendingFunctionCalls(assistant, conversationItems) {
|
|
60
|
+
async function handlePendingFunctionCalls(assistant, conversationItems, metadata = {}) {
|
|
36
61
|
const pendingFunctionCalls = conversationItems.filter(item => item.type === 'function_call');
|
|
37
62
|
const functionOutputs = conversationItems.filter(item => item.type === 'function_call_output');
|
|
38
63
|
|
|
@@ -41,15 +66,20 @@ async function handlePendingFunctionCalls(assistant, conversationItems) {
|
|
|
41
66
|
);
|
|
42
67
|
|
|
43
68
|
if (orphanedCalls.length === 0) {
|
|
44
|
-
return [];
|
|
69
|
+
return { outputs: [], toolsExecuted: [] };
|
|
45
70
|
}
|
|
46
71
|
|
|
47
72
|
logger.info(`[OpenAIResponsesProvider] Found ${orphanedCalls.length} pending function calls, handling them...`);
|
|
48
73
|
const outputs = [];
|
|
74
|
+
const toolsExecuted = [];
|
|
75
|
+
|
|
49
76
|
for (const call of orphanedCalls) {
|
|
50
|
-
|
|
77
|
+
const result = await executeFunctionCall(assistant, call, metadata);
|
|
78
|
+
outputs.push(result.functionOutput);
|
|
79
|
+
toolsExecuted.push(result.toolData);
|
|
51
80
|
}
|
|
52
|
-
|
|
81
|
+
|
|
82
|
+
return { outputs, toolsExecuted };
|
|
53
83
|
}
|
|
54
84
|
|
|
55
85
|
/**
|
|
@@ -58,18 +88,22 @@ async function handlePendingFunctionCalls(assistant, conversationItems) {
|
|
|
58
88
|
* @param {Object} run - The run object with output array
|
|
59
89
|
* @returns {Promise<Array>} Array of function call outputs
|
|
60
90
|
*/
|
|
61
|
-
async function handleRequiresAction(assistant, run) {
|
|
91
|
+
async function handleRequiresAction(assistant, run, metadata = {}) {
|
|
62
92
|
const functionCalls = run.output?.filter(item => item.type === 'function_call') || [];
|
|
63
93
|
if (functionCalls.length === 0) {
|
|
64
|
-
return [];
|
|
94
|
+
return { outputs: [], toolsExecuted: [] };
|
|
65
95
|
}
|
|
66
96
|
|
|
67
97
|
const outputs = [];
|
|
98
|
+
const toolsExecuted = [];
|
|
99
|
+
|
|
68
100
|
for (const call of functionCalls) {
|
|
69
|
-
|
|
101
|
+
const result = await executeFunctionCall(assistant, call, metadata);
|
|
102
|
+
outputs.push(result.functionOutput);
|
|
103
|
+
toolsExecuted.push(result.toolData);
|
|
70
104
|
}
|
|
71
105
|
|
|
72
|
-
return outputs;
|
|
106
|
+
return { outputs, toolsExecuted };
|
|
73
107
|
}
|
|
74
108
|
|
|
75
109
|
/**
|
|
@@ -314,7 +314,7 @@ const replyAssistantCore = async (code, message_ = null, thread_ = null, runOpti
|
|
|
314
314
|
|
|
315
315
|
logger.info(`[replyAssistantCore] Processing ${patientReply.length} messages in parallel`);
|
|
316
316
|
|
|
317
|
-
const { result:
|
|
317
|
+
const { result: processResult, duration: processMessagesMs } = await withTracing(
|
|
318
318
|
processThreadMessage,
|
|
319
319
|
'process_thread_messages',
|
|
320
320
|
(code, patientReply, provider) => ({
|
|
@@ -323,8 +323,22 @@ const replyAssistantCore = async (code, message_ = null, thread_ = null, runOpti
|
|
|
323
323
|
}),
|
|
324
324
|
{ returnTiming: true }
|
|
325
325
|
)(code, patientReply, provider);
|
|
326
|
+
|
|
327
|
+
const { results: processResults, timings: processTimings } = processResult;
|
|
326
328
|
timings.process_messages_ms = processMessagesMs;
|
|
327
329
|
|
|
330
|
+
logger.debug('[replyAssistantCore] Process timings breakdown', { processTimings });
|
|
331
|
+
|
|
332
|
+
if (processTimings) {
|
|
333
|
+
timings.process_messages_breakdown = {
|
|
334
|
+
download_ms: processTimings.download_ms || 0,
|
|
335
|
+
image_analysis_ms: processTimings.image_analysis_ms || 0,
|
|
336
|
+
audio_transcription_ms: processTimings.audio_transcription_ms || 0,
|
|
337
|
+
url_generation_ms: processTimings.url_generation_ms || 0,
|
|
338
|
+
total_media_ms: processTimings.total_media_ms || 0
|
|
339
|
+
};
|
|
340
|
+
}
|
|
341
|
+
|
|
328
342
|
const patientMsg = processResults.some(r => r.isPatient);
|
|
329
343
|
const urls = processResults.filter(r => r.url).map(r => ({ url: r.url }));
|
|
330
344
|
const allMessagesToAdd = processResults.flatMap(r => r.messages || []);
|
|
@@ -389,17 +403,23 @@ const replyAssistantCore = async (code, message_ = null, thread_ = null, runOpti
|
|
|
389
403
|
timings.run_assistant_ms = runAssistantMs;
|
|
390
404
|
timings.total_ms = Date.now() - startTotal;
|
|
391
405
|
|
|
392
|
-
const { run, output, completed, retries, predictionTimeMs } = runResult;
|
|
406
|
+
const { run, output, completed, retries, predictionTimeMs, tools_executed } = runResult;
|
|
393
407
|
|
|
394
408
|
logger.info('[Assistant Reply Complete]', {
|
|
395
409
|
code: code ? `${code.substring(0, 3)}***${code.slice(-4)}` : 'unknown',
|
|
396
410
|
messageCount: patientReply.length,
|
|
397
411
|
hasMedia: urls.length > 0,
|
|
398
412
|
retries,
|
|
399
|
-
totalMs: timings.total_ms
|
|
413
|
+
totalMs: timings.total_ms,
|
|
414
|
+
toolsExecuted: tools_executed?.length || 0
|
|
400
415
|
});
|
|
401
416
|
|
|
402
417
|
if (output && predictionTimeMs) {
|
|
418
|
+
logger.debug('[replyAssistantCore] Storing metrics with timing_breakdown', {
|
|
419
|
+
timing_breakdown: timings,
|
|
420
|
+
has_breakdown: !!timings.process_messages_breakdown
|
|
421
|
+
});
|
|
422
|
+
|
|
403
423
|
await PredictionMetrics.create({
|
|
404
424
|
message_id: `${code}-${Date.now()}`,
|
|
405
425
|
numero: code,
|
|
@@ -412,7 +432,7 @@ const replyAssistantCore = async (code, message_ = null, thread_ = null, runOpti
|
|
|
412
432
|
}).catch(err => logger.error('[replyAssistantCore] Failed to store metrics:', err));
|
|
413
433
|
}
|
|
414
434
|
|
|
415
|
-
return output;
|
|
435
|
+
return { output, tools_executed };
|
|
416
436
|
};
|
|
417
437
|
|
|
418
438
|
const replyAssistant = withTracing(
|
|
@@ -186,7 +186,8 @@ class MongoStorage {
|
|
|
186
186
|
content_sid: messageData.contentSid || null,
|
|
187
187
|
template_variables: messageData.variables ? JSON.stringify(messageData.variables) : null,
|
|
188
188
|
raw: messageData.raw || null,
|
|
189
|
-
origin
|
|
189
|
+
origin,
|
|
190
|
+
tools_executed: messageData.tools_executed || []
|
|
190
191
|
};
|
|
191
192
|
}
|
|
192
193
|
|
|
@@ -198,8 +198,11 @@ class MessageParser {
|
|
|
198
198
|
return interactive.title || interactive.description || '[List item selected]';
|
|
199
199
|
|
|
200
200
|
case 'flow':
|
|
201
|
-
|
|
202
|
-
|
|
201
|
+
if (interactive.data) {
|
|
202
|
+
const flowData = typeof interactive.data === 'string' ? interactive.data : JSON.stringify(interactive.data, null, 2);
|
|
203
|
+
return `Flow Response:\n${flowData}`;
|
|
204
|
+
}
|
|
205
|
+
return '[Flow response]';
|
|
203
206
|
|
|
204
207
|
default:
|
|
205
208
|
return '[Interactive message]';
|