@peopl-health/nexus 2.2.6 → 2.2.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -22,9 +22,12 @@ const activeAssistantController = async (req, res) => {
22
22
 
23
23
  const addInsAssistantController = async (req, res) => {
24
24
  const { code, instruction } = req.body;
25
-
25
+
26
26
  try {
27
- const ans = await addInsAssistant(code, instruction);
27
+ const variant = process.env.VARIANT || 'assistants';
28
+ const role = variant === 'responses' ? 'developer' : 'user';
29
+
30
+ const ans = await addInsAssistant(code, instruction, role);
28
31
  if (ans) await sendMessage({code, body: ans, fileType: 'text'});
29
32
  return res.status(200).send({ message: 'Add instruction to the assistant' });
30
33
  } catch (error) {
@@ -1,6 +1,8 @@
1
1
  const { OpenAI } = require('openai');
2
+ const { retryWithBackoff } = require('../utils/retryHelper');
2
3
 
3
4
  const DEFAULT_MAX_RETRIES = parseInt(process.env.MAX_RETRIES || '10', 10);
5
+ const PROVIDER_NAME = 'OpenAIAssistantsProvider';
4
6
 
5
7
  /**
6
8
  * Provider wrapper that targets the legacy Assistants (threads/runs) API surface.
@@ -41,35 +43,22 @@ class OpenAIAssistantsProvider {
41
43
  }
42
44
 
43
45
  /**
44
- * Retry helper for rate limit errors
45
- * Extracts wait time from error message and retries with exponential backoff
46
+ * Retry helper wrapper that uses shared retry logic
47
+ * @private
46
48
  */
47
- async _retryWithRateLimit(operation, maxRetries = 3, retryCount = 0) {
48
- try {
49
- return await operation();
50
- } catch (error) {
51
- if (error?.status === 429 || error?.code === 'rate_limit_exceeded') {
52
- if (retryCount >= maxRetries) {
53
- throw new Error(`Rate limit exceeded after ${maxRetries} retries: ${error.message}`);
54
- }
55
-
56
- let waitTime = 60;
57
- const waitMatch = error?.error?.message?.match(/after (\d+\.?\d*) seconds?/i) ||
58
- error?.message?.match(/after (\d+\.?\d*) seconds?/i);
59
- if (waitMatch) {
60
- waitTime = Math.ceil(parseFloat(waitMatch[1])) + 1; // Add 1 second buffer
61
- } else {
62
- waitTime = 60 * Math.pow(2, retryCount);
63
- }
64
-
65
- console.log(`[OpenAIAssistantsProvider] Rate limit exceeded. Waiting ${waitTime} seconds before retry ${retryCount + 1}/${maxRetries}`);
66
- await new Promise(resolve => setTimeout(resolve, waitTime * 1000));
67
-
68
- return this._retryWithRateLimit(operation, maxRetries, retryCount + 1);
69
- }
49
+ async _retryWithBackoff(operation, options = {}) {
50
+ return retryWithBackoff(operation, {
51
+ ...options,
52
+ providerName: PROVIDER_NAME,
53
+ });
54
+ }
70
55
 
71
- throw error;
72
- }
56
+ /**
57
+ * @deprecated Use _retryWithBackoff instead
58
+ * Kept for backward compatibility
59
+ */
60
+ async _retryWithRateLimit(operation, maxRetries = 3, retryCount = 0) {
61
+ return this._retryWithBackoff(operation, { maxRetries, retryCount });
73
62
  }
74
63
 
75
64
  async createConversation({ metadata, messages = [], toolResources } = {}) {
@@ -1,4 +1,5 @@
1
1
  const { OpenAI } = require('openai');
2
+ const { retryWithBackoff } = require('../utils/retryHelper');
2
3
 
3
4
  const CONVERSATION_PREFIX = 'conv_';
4
5
  const RESPONSE_PREFIX = 'resp_';
@@ -6,6 +7,7 @@ const DEFAULT_MAX_RETRIES = parseInt(process.env.MAX_RETRIES || '10', 10);
6
7
  const MAX_ITEMS_ON_CREATE = 20;
7
8
  const MAX_ITEMS_PER_BATCH = 20;
8
9
  const DEFAULT_MAX_HISTORICAL_MESSAGES = parseInt(process.env.MAX_HISTORICAL_MESSAGES || '50', 10);
10
+ const PROVIDER_NAME = 'OpenAIResponsesProvider';
9
11
 
10
12
  /**
11
13
  * Provider wrapper that targets the Conversations + Responses API surface.
@@ -47,35 +49,22 @@ class OpenAIResponsesProvider {
47
49
  }
48
50
 
49
51
  /**
50
- * Retry helper for rate limit errors
51
- * Extracts wait time from error message and retries with exponential backoff
52
+ * Retry helper wrapper that uses shared retry logic
53
+ * @private
52
54
  */
53
- async _retryWithRateLimit(operation, maxRetries = 3, retryCount = 0) {
54
- try {
55
- return await operation();
56
- } catch (error) {
57
- if (error?.status === 429 || error?.code === 'rate_limit_exceeded') {
58
- if (retryCount >= maxRetries) {
59
- throw new Error(`Rate limit exceeded after ${maxRetries} retries: ${error.message}`);
60
- }
61
-
62
- let waitTime = 60;
63
- const waitMatch = error?.error?.message?.match(/after (\d+\.?\d*) seconds?/i) ||
64
- error?.message?.match(/after (\d+\.?\d*) seconds?/i);
65
- if (waitMatch) {
66
- waitTime = Math.ceil(parseFloat(waitMatch[1])) + 1; // Add 1 second buffer
67
- } else {
68
- waitTime = 60 * Math.pow(2, retryCount);
69
- }
70
-
71
- console.log(`[OpenAIResponsesProvider] Rate limit exceeded. Waiting ${waitTime} seconds before retry ${retryCount + 1}/${maxRetries}`);
72
- await new Promise(resolve => setTimeout(resolve, waitTime * 1000));
73
-
74
- return this._retryWithRateLimit(operation, maxRetries, retryCount + 1);
75
- }
55
+ async _retryWithBackoff(operation, options = {}) {
56
+ return retryWithBackoff(operation, {
57
+ ...options,
58
+ providerName: PROVIDER_NAME,
59
+ });
60
+ }
76
61
 
77
- throw error;
78
- }
62
+ /**
63
+ * @deprecated Use _retryWithBackoff instead
64
+ * Kept for backward compatibility
65
+ */
66
+ async _retryWithRateLimit(operation, maxRetries = 3, retryCount = 0) {
67
+ return this._retryWithBackoff(operation, { maxRetries, retryCount });
79
68
  }
80
69
 
81
70
  /**
@@ -1,7 +1,9 @@
1
1
  const { MessageParser } = require('./messageParser');
2
2
  const { logger } = require('./logger');
3
+ const { retryWithBackoff } = require('./retryHelper');
3
4
 
4
5
  module.exports = {
5
6
  MessageParser,
6
- logger
7
+ logger,
8
+ retryWithBackoff,
7
9
  };
@@ -0,0 +1,167 @@
1
+ const { logger } = require('./logger');
2
+
3
+ // Retry configuration
4
+ const RETRY_CONFIG = {
5
+ rateLimit: {
6
+ maxRetries: 3,
7
+ baseWaitTime: 60,
8
+ },
9
+ serverError: {
10
+ maxRetries: 5,
11
+ baseWaitTime: 2,
12
+ maxWaitTime: 32, // seconds
13
+ },
14
+ };
15
+
16
+ /**
17
+ * Extract request ID from error object
18
+ * Handles various formats that OpenAI SDK might use
19
+ */
20
+ function extractRequestId(error) {
21
+ if (!error) return null;
22
+ return error?.requestID ||
23
+ (error?.headers?.get ? error.headers.get('x-request-id') : null) ||
24
+ (error?.headers?.['x-request-id']) ||
25
+ null;
26
+ }
27
+
28
+ /**
29
+ * Calculate wait time with full jitter (exponential backoff with random jitter)
30
+ * Uses "full jitter" strategy: random(0, base * 2^retryCount)
31
+ * This is the recommended approach (AWS, Google Cloud best practices)
32
+ */
33
+ function calculateWaitTime(config, retryCount, isRateLimit = false, error = null) {
34
+ let baseWaitTime;
35
+
36
+ if (isRateLimit) {
37
+ const waitMatch = error?.error?.message?.match(/after (\d+\.?\d*) seconds?/i) ||
38
+ error?.message?.match(/after (\d+\.?\d*) seconds?/i);
39
+ if (waitMatch) {
40
+ baseWaitTime = Math.ceil(parseFloat(waitMatch[1])) + 1;
41
+ return baseWaitTime;
42
+ }
43
+ baseWaitTime = config.baseWaitTime * Math.pow(2, retryCount);
44
+ } else {
45
+ baseWaitTime = Math.min(
46
+ config.baseWaitTime * Math.pow(2, retryCount),
47
+ config.maxWaitTime
48
+ );
49
+ }
50
+
51
+ return Math.ceil(Math.random() * baseWaitTime);
52
+ }
53
+
54
+ /**
55
+ * Check if error is retryable
56
+ * Returns true for rate limits, server errors (5xx), and network/timeout errors
57
+ */
58
+ function isRetryableError(error) {
59
+ if (!error) return false;
60
+
61
+ const status = error?.status;
62
+ const code = error?.code;
63
+
64
+ if (status === 429 || code === 'rate_limit_exceeded') {
65
+ return true;
66
+ }
67
+
68
+ if (status >= 500 && status < 600) {
69
+ return true;
70
+ }
71
+
72
+ const networkErrorCodes = ['ECONNRESET', 'ETIMEDOUT', 'ENOTFOUND', 'ECONNREFUSED', 'timeout'];
73
+ if (networkErrorCodes.includes(code) || error?.message?.includes('timeout')) {
74
+ return true;
75
+ }
76
+
77
+ return false;
78
+ }
79
+
80
+ /**
81
+ * Create enhanced error with original error information preserved.
82
+ */
83
+ function createRetryError(error, errorType, maxRetries, retryCount) {
84
+ const requestId = extractRequestId(error);
85
+ const enhancedMessage = `${errorType} after ${maxRetries} retries: ${error.message}${requestId ? ` (Request ID: ${requestId})` : ''}`;
86
+
87
+ const finalError = new Error(enhancedMessage);
88
+ finalError.originalError = error;
89
+ finalError.status = error.status;
90
+ finalError.code = error.code;
91
+ finalError.requestID = requestId;
92
+ finalError.retryCount = retryCount;
93
+ return finalError;
94
+ }
95
+
96
+ /**
97
+ * Sleep helper - returns a promise that resolves after specified milliseconds.
98
+ */
99
+ function sleep(ms) {
100
+ return new Promise(resolve => setTimeout(resolve, ms));
101
+ }
102
+
103
+ /**
104
+ * Retry helper for rate limit errors and server errors
105
+ * Extracts wait time from error message and retries with exponential backoff + full jitter
106
+ * Preserves original error information including requestID
107
+ */
108
+ async function retryWithBackoff(operation, options = {}) {
109
+ const {
110
+ maxRetries = RETRY_CONFIG.serverError.maxRetries,
111
+ retryCount = 0,
112
+ providerName = 'OpenAIProvider',
113
+ } = options;
114
+
115
+ if (typeof operation !== 'function') {
116
+ throw new Error('Operation must be a function');
117
+ }
118
+
119
+ try {
120
+ return await operation();
121
+ } catch (error) {
122
+ const isRateLimit = error?.status === 429 || error?.code === 'rate_limit_exceeded';
123
+ const isRetryable = isRetryableError(error);
124
+
125
+ if (isRetryable) {
126
+ const config = isRateLimit ? RETRY_CONFIG.rateLimit : RETRY_CONFIG.serverError;
127
+ const effectiveMaxRetries = isRateLimit ? RETRY_CONFIG.rateLimit.maxRetries : maxRetries;
128
+
129
+ if (retryCount >= effectiveMaxRetries) {
130
+ const errorType = isRateLimit ? 'Rate limit' : 'Server/Network error';
131
+ throw createRetryError(error, errorType, effectiveMaxRetries, retryCount);
132
+ }
133
+
134
+ const waitTime = calculateWaitTime(config, retryCount, isRateLimit, error);
135
+ const errorType = isRateLimit ? 'Rate limit exceeded' : `Server/Network error (${error?.status || error?.code || 'unknown'})`;
136
+ const requestId = extractRequestId(error);
137
+
138
+ logger.warn({
139
+ provider: providerName,
140
+ errorType,
141
+ status: error?.status,
142
+ code: error?.code,
143
+ requestId,
144
+ retryCount: retryCount + 1,
145
+ maxRetries: effectiveMaxRetries,
146
+ waitTime,
147
+ }, `${errorType}. Retrying in ${waitTime}s (${retryCount + 1}/${effectiveMaxRetries})`);
148
+
149
+ await sleep(waitTime * 1000);
150
+
151
+ return retryWithBackoff(operation, { maxRetries, retryCount: retryCount + 1, providerName });
152
+ }
153
+
154
+ throw error;
155
+ }
156
+ }
157
+
158
+ module.exports = {
159
+ retryWithBackoff,
160
+ extractRequestId,
161
+ calculateWaitTime,
162
+ isRetryableError,
163
+ createRetryError,
164
+ sleep,
165
+ RETRY_CONFIG,
166
+ };
167
+
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@peopl-health/nexus",
3
- "version": "2.2.6",
3
+ "version": "2.2.8",
4
4
  "description": "Core messaging and assistant library for WhatsApp communication platforms",
5
5
  "keywords": [
6
6
  "whatsapp",