@peopl-health/nexus 2.2.5 → 2.2.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -3,6 +3,8 @@ const { fetchConversationData, processConversations } = require('../services/con
|
|
|
3
3
|
const { sendMessage } = require('../core/NexusMessaging');
|
|
4
4
|
const { Thread } = require('../models/threadModel');
|
|
5
5
|
const llmConfig = require('../config/llmConfig');
|
|
6
|
+
const { Historial_Clinico_ID } = require('../config/airtableConfig');
|
|
7
|
+
const { getRecordByFilter } = require('../services/airtableService');
|
|
6
8
|
|
|
7
9
|
const Message = mongoose.models.Message;
|
|
8
10
|
|
|
@@ -301,6 +303,9 @@ const searchConversationsController = async (req, res) => {
|
|
|
301
303
|
});
|
|
302
304
|
}
|
|
303
305
|
|
|
306
|
+
const maxLimit = 100;
|
|
307
|
+
const parsedLimit = Math.min(parseInt(limit) || 50, maxLimit);
|
|
308
|
+
|
|
304
309
|
console.log(`Searching conversations for query: "${query}"`);
|
|
305
310
|
const searchStartTime = Date.now();
|
|
306
311
|
|
|
@@ -332,18 +337,61 @@ const searchConversationsController = async (req, res) => {
|
|
|
332
337
|
messageCount: { $sum: 1 }
|
|
333
338
|
}},
|
|
334
339
|
{ $sort: { 'latestMessage.createdAt': -1 } },
|
|
335
|
-
{ $limit:
|
|
340
|
+
{ $limit: parsedLimit }
|
|
336
341
|
]);
|
|
337
342
|
|
|
338
343
|
const searchTime = Date.now() - searchStartTime;
|
|
339
344
|
console.log(`Search completed in ${searchTime}ms, found ${conversations.length} conversations`);
|
|
340
345
|
|
|
346
|
+
// Fetch names from Airtable and WhatsApp (same logic as fetchConversationData)
|
|
347
|
+
const phoneNumbers = conversations.map(conv => conv._id).filter(Boolean);
|
|
348
|
+
let airtableNameMap = {};
|
|
349
|
+
let airtablePatientIdMap = {};
|
|
350
|
+
|
|
351
|
+
if (phoneNumbers.length > 0) {
|
|
352
|
+
try {
|
|
353
|
+
// Batch phone numbers to avoid Airtable URL length limit (16,000 chars)
|
|
354
|
+
const BATCH_SIZE = 50;
|
|
355
|
+
const batches = [];
|
|
356
|
+
for (let i = 0; i < phoneNumbers.length; i += BATCH_SIZE) {
|
|
357
|
+
batches.push(phoneNumbers.slice(i, i + BATCH_SIZE));
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
const batchPromises = batches.map(batch => {
|
|
361
|
+
const formula = 'OR(' +
|
|
362
|
+
batch.map(p => `{whatsapp_id} = "${p}"`).join(', ') +
|
|
363
|
+
')';
|
|
364
|
+
return getRecordByFilter(Historial_Clinico_ID, 'estado_general', formula).catch(error => {
|
|
365
|
+
console.error('Error fetching Airtable batch for search:', error);
|
|
366
|
+
return [];
|
|
367
|
+
});
|
|
368
|
+
});
|
|
369
|
+
|
|
370
|
+
const batchResults = await Promise.all(batchPromises);
|
|
371
|
+
const patientTable = batchResults.flat();
|
|
372
|
+
|
|
373
|
+
// Create both maps in a single iteration
|
|
374
|
+
patientTable.forEach(patient => {
|
|
375
|
+
if (patient && patient.whatsapp_id) {
|
|
376
|
+
airtableNameMap[patient.whatsapp_id] = patient.name;
|
|
377
|
+
if (patient.patient_id) {
|
|
378
|
+
airtablePatientIdMap[patient.whatsapp_id] = patient.patient_id;
|
|
379
|
+
}
|
|
380
|
+
}
|
|
381
|
+
});
|
|
382
|
+
console.log(`Found ${Object.keys(airtableNameMap).length} names in Airtable for search results (${batches.length} batches)`);
|
|
383
|
+
} catch (error) {
|
|
384
|
+
console.error('Error fetching names from Airtable for search, falling back to nombre_whatsapp:', error);
|
|
385
|
+
}
|
|
386
|
+
}
|
|
387
|
+
|
|
341
388
|
// Process conversations for response
|
|
342
389
|
const processedConversations = conversations.map(conv => {
|
|
343
390
|
if (!conv || !conv.latestMessage) {
|
|
344
391
|
return {
|
|
345
392
|
phoneNumber: conv?._id || 'unknown',
|
|
346
393
|
name: 'Unknown',
|
|
394
|
+
patientId: airtablePatientIdMap[conv?._id] || null,
|
|
347
395
|
lastMessage: '',
|
|
348
396
|
lastMessageTime: new Date(),
|
|
349
397
|
messageCount: 0,
|
|
@@ -375,7 +423,8 @@ const searchConversationsController = async (req, res) => {
|
|
|
375
423
|
|
|
376
424
|
return {
|
|
377
425
|
phoneNumber: conv._id,
|
|
378
|
-
name: conv?.latestMessage?.nombre_whatsapp || 'Unknown',
|
|
426
|
+
name: airtableNameMap[conv._id] || conv?.latestMessage?.nombre_whatsapp || 'Unknown',
|
|
427
|
+
patientId: airtablePatientIdMap[conv._id] || null,
|
|
379
428
|
lastMessage: conv?.latestMessage?.body || '',
|
|
380
429
|
lastMessageTime: conv?.latestMessage?.createdAt || conv?.latestMessage?.timestamp || new Date(),
|
|
381
430
|
messageCount: conv.messageCount || 0,
|
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
const { OpenAI } = require('openai');
|
|
2
|
+
const { retryWithBackoff } = require('../utils/retryHelper');
|
|
2
3
|
|
|
3
4
|
const DEFAULT_MAX_RETRIES = parseInt(process.env.MAX_RETRIES || '10', 10);
|
|
5
|
+
const PROVIDER_NAME = 'OpenAIAssistantsProvider';
|
|
4
6
|
|
|
5
7
|
/**
|
|
6
8
|
* Provider wrapper that targets the legacy Assistants (threads/runs) API surface.
|
|
@@ -41,35 +43,22 @@ class OpenAIAssistantsProvider {
|
|
|
41
43
|
}
|
|
42
44
|
|
|
43
45
|
/**
|
|
44
|
-
* Retry helper
|
|
45
|
-
*
|
|
46
|
+
* Retry helper wrapper that uses shared retry logic
|
|
47
|
+
* @private
|
|
46
48
|
*/
|
|
47
|
-
async
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
throw new Error(`Rate limit exceeded after ${maxRetries} retries: ${error.message}`);
|
|
54
|
-
}
|
|
55
|
-
|
|
56
|
-
let waitTime = 60;
|
|
57
|
-
const waitMatch = error?.error?.message?.match(/after (\d+\.?\d*) seconds?/i) ||
|
|
58
|
-
error?.message?.match(/after (\d+\.?\d*) seconds?/i);
|
|
59
|
-
if (waitMatch) {
|
|
60
|
-
waitTime = Math.ceil(parseFloat(waitMatch[1])) + 1; // Add 1 second buffer
|
|
61
|
-
} else {
|
|
62
|
-
waitTime = 60 * Math.pow(2, retryCount);
|
|
63
|
-
}
|
|
64
|
-
|
|
65
|
-
console.log(`[OpenAIAssistantsProvider] Rate limit exceeded. Waiting ${waitTime} seconds before retry ${retryCount + 1}/${maxRetries}`);
|
|
66
|
-
await new Promise(resolve => setTimeout(resolve, waitTime * 1000));
|
|
67
|
-
|
|
68
|
-
return this._retryWithRateLimit(operation, maxRetries, retryCount + 1);
|
|
69
|
-
}
|
|
49
|
+
async _retryWithBackoff(operation, options = {}) {
|
|
50
|
+
return retryWithBackoff(operation, {
|
|
51
|
+
...options,
|
|
52
|
+
providerName: PROVIDER_NAME,
|
|
53
|
+
});
|
|
54
|
+
}
|
|
70
55
|
|
|
71
|
-
|
|
72
|
-
|
|
56
|
+
/**
|
|
57
|
+
* @deprecated Use _retryWithBackoff instead
|
|
58
|
+
* Kept for backward compatibility
|
|
59
|
+
*/
|
|
60
|
+
async _retryWithRateLimit(operation, maxRetries = 3, retryCount = 0) {
|
|
61
|
+
return this._retryWithBackoff(operation, { maxRetries, retryCount });
|
|
73
62
|
}
|
|
74
63
|
|
|
75
64
|
async createConversation({ metadata, messages = [], toolResources } = {}) {
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
const { OpenAI } = require('openai');
|
|
2
|
+
const { retryWithBackoff } = require('../utils/retryHelper');
|
|
2
3
|
|
|
3
4
|
const CONVERSATION_PREFIX = 'conv_';
|
|
4
5
|
const RESPONSE_PREFIX = 'resp_';
|
|
@@ -6,6 +7,7 @@ const DEFAULT_MAX_RETRIES = parseInt(process.env.MAX_RETRIES || '10', 10);
|
|
|
6
7
|
const MAX_ITEMS_ON_CREATE = 20;
|
|
7
8
|
const MAX_ITEMS_PER_BATCH = 20;
|
|
8
9
|
const DEFAULT_MAX_HISTORICAL_MESSAGES = parseInt(process.env.MAX_HISTORICAL_MESSAGES || '50', 10);
|
|
10
|
+
const PROVIDER_NAME = 'OpenAIResponsesProvider';
|
|
9
11
|
|
|
10
12
|
/**
|
|
11
13
|
* Provider wrapper that targets the Conversations + Responses API surface.
|
|
@@ -47,35 +49,22 @@ class OpenAIResponsesProvider {
|
|
|
47
49
|
}
|
|
48
50
|
|
|
49
51
|
/**
|
|
50
|
-
* Retry helper
|
|
51
|
-
*
|
|
52
|
+
* Retry helper wrapper that uses shared retry logic
|
|
53
|
+
* @private
|
|
52
54
|
*/
|
|
53
|
-
async
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
throw new Error(`Rate limit exceeded after ${maxRetries} retries: ${error.message}`);
|
|
60
|
-
}
|
|
61
|
-
|
|
62
|
-
let waitTime = 60;
|
|
63
|
-
const waitMatch = error?.error?.message?.match(/after (\d+\.?\d*) seconds?/i) ||
|
|
64
|
-
error?.message?.match(/after (\d+\.?\d*) seconds?/i);
|
|
65
|
-
if (waitMatch) {
|
|
66
|
-
waitTime = Math.ceil(parseFloat(waitMatch[1])) + 1; // Add 1 second buffer
|
|
67
|
-
} else {
|
|
68
|
-
waitTime = 60 * Math.pow(2, retryCount);
|
|
69
|
-
}
|
|
70
|
-
|
|
71
|
-
console.log(`[OpenAIResponsesProvider] Rate limit exceeded. Waiting ${waitTime} seconds before retry ${retryCount + 1}/${maxRetries}`);
|
|
72
|
-
await new Promise(resolve => setTimeout(resolve, waitTime * 1000));
|
|
73
|
-
|
|
74
|
-
return this._retryWithRateLimit(operation, maxRetries, retryCount + 1);
|
|
75
|
-
}
|
|
55
|
+
async _retryWithBackoff(operation, options = {}) {
|
|
56
|
+
return retryWithBackoff(operation, {
|
|
57
|
+
...options,
|
|
58
|
+
providerName: PROVIDER_NAME,
|
|
59
|
+
});
|
|
60
|
+
}
|
|
76
61
|
|
|
77
|
-
|
|
78
|
-
|
|
62
|
+
/**
|
|
63
|
+
* @deprecated Use _retryWithBackoff instead
|
|
64
|
+
* Kept for backward compatibility
|
|
65
|
+
*/
|
|
66
|
+
async _retryWithRateLimit(operation, maxRetries = 3, retryCount = 0) {
|
|
67
|
+
return this._retryWithBackoff(operation, { maxRetries, retryCount });
|
|
79
68
|
}
|
|
80
69
|
|
|
81
70
|
/**
|
package/lib/utils/index.js
CHANGED
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
const { logger } = require('./logger');
|
|
2
|
+
|
|
3
|
+
// Retry configuration
|
|
4
|
+
const RETRY_CONFIG = {
|
|
5
|
+
rateLimit: {
|
|
6
|
+
maxRetries: 3,
|
|
7
|
+
baseWaitTime: 60,
|
|
8
|
+
},
|
|
9
|
+
serverError: {
|
|
10
|
+
maxRetries: 5,
|
|
11
|
+
baseWaitTime: 2,
|
|
12
|
+
maxWaitTime: 32, // seconds
|
|
13
|
+
},
|
|
14
|
+
};
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* Extract request ID from error object
|
|
18
|
+
* Handles various formats that OpenAI SDK might use
|
|
19
|
+
*/
|
|
20
|
+
function extractRequestId(error) {
|
|
21
|
+
if (!error) return null;
|
|
22
|
+
return error?.requestID ||
|
|
23
|
+
(error?.headers?.get ? error.headers.get('x-request-id') : null) ||
|
|
24
|
+
(error?.headers?.['x-request-id']) ||
|
|
25
|
+
null;
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
/**
|
|
29
|
+
* Calculate wait time with full jitter (exponential backoff with random jitter)
|
|
30
|
+
* Uses "full jitter" strategy: random(0, base * 2^retryCount)
|
|
31
|
+
* This is the recommended approach (AWS, Google Cloud best practices)
|
|
32
|
+
*/
|
|
33
|
+
function calculateWaitTime(config, retryCount, isRateLimit = false, error = null) {
|
|
34
|
+
let baseWaitTime;
|
|
35
|
+
|
|
36
|
+
if (isRateLimit) {
|
|
37
|
+
const waitMatch = error?.error?.message?.match(/after (\d+\.?\d*) seconds?/i) ||
|
|
38
|
+
error?.message?.match(/after (\d+\.?\d*) seconds?/i);
|
|
39
|
+
if (waitMatch) {
|
|
40
|
+
baseWaitTime = Math.ceil(parseFloat(waitMatch[1])) + 1;
|
|
41
|
+
return baseWaitTime;
|
|
42
|
+
}
|
|
43
|
+
baseWaitTime = config.baseWaitTime * Math.pow(2, retryCount);
|
|
44
|
+
} else {
|
|
45
|
+
baseWaitTime = Math.min(
|
|
46
|
+
config.baseWaitTime * Math.pow(2, retryCount),
|
|
47
|
+
config.maxWaitTime
|
|
48
|
+
);
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
return Math.ceil(Math.random() * baseWaitTime);
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* Check if error is retryable
|
|
56
|
+
* Returns true for rate limits, server errors (5xx), and network/timeout errors
|
|
57
|
+
*/
|
|
58
|
+
function isRetryableError(error) {
|
|
59
|
+
if (!error) return false;
|
|
60
|
+
|
|
61
|
+
const status = error?.status;
|
|
62
|
+
const code = error?.code;
|
|
63
|
+
|
|
64
|
+
if (status === 429 || code === 'rate_limit_exceeded') {
|
|
65
|
+
return true;
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
if (status >= 500 && status < 600) {
|
|
69
|
+
return true;
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
const networkErrorCodes = ['ECONNRESET', 'ETIMEDOUT', 'ENOTFOUND', 'ECONNREFUSED', 'timeout'];
|
|
73
|
+
if (networkErrorCodes.includes(code) || error?.message?.includes('timeout')) {
|
|
74
|
+
return true;
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
return false;
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
/**
|
|
81
|
+
* Create enhanced error with original error information preserved.
|
|
82
|
+
*/
|
|
83
|
+
function createRetryError(error, errorType, maxRetries, retryCount) {
|
|
84
|
+
const requestId = extractRequestId(error);
|
|
85
|
+
const enhancedMessage = `${errorType} after ${maxRetries} retries: ${error.message}${requestId ? ` (Request ID: ${requestId})` : ''}`;
|
|
86
|
+
|
|
87
|
+
const finalError = new Error(enhancedMessage);
|
|
88
|
+
finalError.originalError = error;
|
|
89
|
+
finalError.status = error.status;
|
|
90
|
+
finalError.code = error.code;
|
|
91
|
+
finalError.requestID = requestId;
|
|
92
|
+
finalError.retryCount = retryCount;
|
|
93
|
+
return finalError;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
/**
|
|
97
|
+
* Sleep helper - returns a promise that resolves after specified milliseconds.
|
|
98
|
+
*/
|
|
99
|
+
function sleep(ms) {
|
|
100
|
+
return new Promise(resolve => setTimeout(resolve, ms));
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
/**
|
|
104
|
+
* Retry helper for rate limit errors and server errors
|
|
105
|
+
* Extracts wait time from error message and retries with exponential backoff + full jitter
|
|
106
|
+
* Preserves original error information including requestID
|
|
107
|
+
*/
|
|
108
|
+
async function retryWithBackoff(operation, options = {}) {
|
|
109
|
+
const {
|
|
110
|
+
maxRetries = RETRY_CONFIG.serverError.maxRetries,
|
|
111
|
+
retryCount = 0,
|
|
112
|
+
providerName = 'OpenAIProvider',
|
|
113
|
+
} = options;
|
|
114
|
+
|
|
115
|
+
if (typeof operation !== 'function') {
|
|
116
|
+
throw new Error('Operation must be a function');
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
try {
|
|
120
|
+
return await operation();
|
|
121
|
+
} catch (error) {
|
|
122
|
+
const isRateLimit = error?.status === 429 || error?.code === 'rate_limit_exceeded';
|
|
123
|
+
const isRetryable = isRetryableError(error);
|
|
124
|
+
|
|
125
|
+
if (isRetryable) {
|
|
126
|
+
const config = isRateLimit ? RETRY_CONFIG.rateLimit : RETRY_CONFIG.serverError;
|
|
127
|
+
const effectiveMaxRetries = isRateLimit ? RETRY_CONFIG.rateLimit.maxRetries : maxRetries;
|
|
128
|
+
|
|
129
|
+
if (retryCount >= effectiveMaxRetries) {
|
|
130
|
+
const errorType = isRateLimit ? 'Rate limit' : 'Server/Network error';
|
|
131
|
+
throw createRetryError(error, errorType, effectiveMaxRetries, retryCount);
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
const waitTime = calculateWaitTime(config, retryCount, isRateLimit, error);
|
|
135
|
+
const errorType = isRateLimit ? 'Rate limit exceeded' : `Server/Network error (${error?.status || error?.code || 'unknown'})`;
|
|
136
|
+
const requestId = extractRequestId(error);
|
|
137
|
+
|
|
138
|
+
logger.warn({
|
|
139
|
+
provider: providerName,
|
|
140
|
+
errorType,
|
|
141
|
+
status: error?.status,
|
|
142
|
+
code: error?.code,
|
|
143
|
+
requestId,
|
|
144
|
+
retryCount: retryCount + 1,
|
|
145
|
+
maxRetries: effectiveMaxRetries,
|
|
146
|
+
waitTime,
|
|
147
|
+
}, `${errorType}. Retrying in ${waitTime}s (${retryCount + 1}/${effectiveMaxRetries})`);
|
|
148
|
+
|
|
149
|
+
await sleep(waitTime * 1000);
|
|
150
|
+
|
|
151
|
+
return retryWithBackoff(operation, { maxRetries, retryCount: retryCount + 1, providerName });
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
throw error;
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
module.exports = {
|
|
159
|
+
retryWithBackoff,
|
|
160
|
+
extractRequestId,
|
|
161
|
+
calculateWaitTime,
|
|
162
|
+
isRetryableError,
|
|
163
|
+
createRetryError,
|
|
164
|
+
sleep,
|
|
165
|
+
RETRY_CONFIG,
|
|
166
|
+
};
|
|
167
|
+
|