@peopl-health/nexus 2.2.1 → 2.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,10 +1,19 @@
1
1
  const express = require('express');
2
2
  require('dotenv').config();
3
- const { Nexus, setupDefaultRoutes } = require('@peopl-health/nexus');
3
+ const { Nexus, setupDefaultRoutes, BaseAssistant } = require('@peopl-health/nexus');
4
4
 
5
5
  const app = express();
6
6
  app.use(express.json());
7
7
 
8
+ // Define a General Assistant
9
+ class GeneralAssistant extends BaseAssistant {
10
+ constructor(options = {}) {
11
+ super(options);
12
+ // You can add custom tools or setup here
13
+ // Example: this.registerTool('toolName', schema, handler);
14
+ }
15
+ }
16
+
8
17
  async function startServer() {
9
18
  // Initialize Nexus with all services and batching enabled
10
19
  const nexus = new Nexus({
@@ -33,6 +42,21 @@ async function startServer() {
33
42
  media: {
34
43
  bucketName: process.env.AWS_S3_BUCKET_NAME,
35
44
  region: process.env.AWS_REGION || 'us-east-1'
45
+ },
46
+
47
+ // LLM configuration (required for assistants)
48
+ llm: 'openai',
49
+ llmConfig: {
50
+ apiKey: process.env.OPENAI_API_KEY
51
+ },
52
+
53
+ // Assistants configuration
54
+ assistants: {
55
+ registry: {
56
+ // Register the general assistant with its OpenAI assistant ID
57
+ 'asst_O6mAXAhf0xyVj3t4DHRs26uT': GeneralAssistant,
58
+ 'pmpt_68f844cd975481979c080431bde74f6e0adf01a52110813b': GeneralAssistant,
59
+ }
36
60
  }
37
61
  });
38
62
 
@@ -209,8 +209,8 @@ class BaseAssistant {
209
209
  const formattedText = formatMessage(message);
210
210
  return formattedText ? { role: message.from_me ? 'assistant' : 'user', content: formattedText } : null;
211
211
  })
212
- .filter(message => message !== null); // Remove any null entries
213
-
212
+ .filter(message => message !== null);
213
+
214
214
  console.log(`[buildInitialMessages] Built ${formattedMessages.length} initial messages for ${code}`);
215
215
 
216
216
  return formattedMessages;
@@ -114,20 +114,39 @@ const getPatientRoleAndName = (reply, numbers) => {
114
114
 
115
115
  function formatMessage(reply) {
116
116
  try {
117
- // Validate timestamp exists
118
117
  if (!reply.timestamp) {
119
118
  return null;
120
119
  }
121
120
 
121
+ // Normalize timestamp: convert any format to a Date object, then to ISO string
122
+ let dateObj;
123
+
124
+ if (reply.timestamp instanceof Date) {
125
+ dateObj = reply.timestamp;
126
+ } else if (typeof reply.timestamp === 'number') {
127
+ const ms = reply.timestamp < 1e12 ? reply.timestamp * 1000 : reply.timestamp;
128
+ dateObj = new Date(ms);
129
+ } else {
130
+ dateObj = new Date(reply.timestamp);
131
+ }
132
+
133
+ if (isNaN(dateObj.getTime())) {
134
+ console.warn('[formatMessage] Invalid timestamp:', reply.timestamp);
135
+ return null;
136
+ }
137
+
138
+ const isoString = dateObj.toISOString();
139
+
122
140
  // Convert timestamp to Mexico City timezone with Spanish format
123
141
  // Format: martes, 30 de septiembre de 2025 a las 8:30 AM
124
- const mexicoCityTime = moment(reply.timestamp)
142
+ const mexicoCityTime = moment(isoString)
125
143
  .tz('America/Mexico_City')
126
144
  .locale('es')
127
145
  .format('dddd, D [de] MMMM [de] YYYY [a las] h:mm A');
128
146
 
129
147
  return `[${mexicoCityTime}] ${reply.body}`;
130
- } catch {
148
+ } catch (error) {
149
+ console.error('[formatMessage] Error formatting message:', error?.message || error, 'timestamp:', reply.timestamp);
131
150
  return null;
132
151
  }
133
152
  }
@@ -40,11 +40,45 @@ class OpenAIAssistantsProvider {
40
40
  return this.client;
41
41
  }
42
42
 
43
+ /**
44
+ * Retry helper for rate limit errors
45
+ * Extracts wait time from error message and retries with exponential backoff
46
+ */
47
+ async _retryWithRateLimit(operation, maxRetries = 3, retryCount = 0) {
48
+ try {
49
+ return await operation();
50
+ } catch (error) {
51
+ if (error?.status === 429 || error?.code === 'rate_limit_exceeded') {
52
+ if (retryCount >= maxRetries) {
53
+ throw new Error(`Rate limit exceeded after ${maxRetries} retries: ${error.message}`);
54
+ }
55
+
56
+ let waitTime = 60;
57
+ const waitMatch = error?.error?.message?.match(/after (\d+\.?\d*) seconds?/i) ||
58
+ error?.message?.match(/after (\d+\.?\d*) seconds?/i);
59
+ if (waitMatch) {
60
+ waitTime = Math.ceil(parseFloat(waitMatch[1])) + 1; // Add 1 second buffer
61
+ } else {
62
+ waitTime = 60 * Math.pow(2, retryCount);
63
+ }
64
+
65
+ console.log(`[OpenAIAssistantsProvider] Rate limit exceeded. Waiting ${waitTime} seconds before retry ${retryCount + 1}/${maxRetries}`);
66
+ await new Promise(resolve => setTimeout(resolve, waitTime * 1000));
67
+
68
+ return this._retryWithRateLimit(operation, maxRetries, retryCount + 1);
69
+ }
70
+
71
+ throw error;
72
+ }
73
+ }
74
+
43
75
  async createConversation({ metadata, messages = [], toolResources } = {}) {
44
- const thread = await this.client.beta.threads.create({
45
- metadata,
46
- tool_resources: toolResources,
47
- });
76
+ const thread = await this._retryWithRateLimit(() =>
77
+ this.client.beta.threads.create({
78
+ metadata,
79
+ tool_resources: toolResources,
80
+ })
81
+ );
48
82
 
49
83
  if (Array.isArray(messages) && messages.length > 0) {
50
84
  for (const message of messages) {
@@ -72,7 +106,9 @@ class OpenAIAssistantsProvider {
72
106
  delete payload.metadata;
73
107
  }
74
108
 
75
- return this.client.beta.threads.messages.create(this._ensureId(threadId), payload);
109
+ return this._retryWithRateLimit(() =>
110
+ this.client.beta.threads.messages.create(this._ensureId(threadId), payload)
111
+ );
76
112
  }
77
113
 
78
114
  async listMessages({ threadId, runId, order = 'desc', limit } = {}) {
@@ -153,7 +189,9 @@ class OpenAIAssistantsProvider {
153
189
  }
154
190
  });
155
191
 
156
- return this.client.beta.threads.runs.create(this._ensureId(threadId), payload);
192
+ return this._retryWithRateLimit(() =>
193
+ this.client.beta.threads.runs.create(this._ensureId(threadId), payload)
194
+ );
157
195
  }
158
196
 
159
197
  async getRun({ threadId, runId }) {
@@ -3,6 +3,9 @@ const { OpenAI } = require('openai');
3
3
  const CONVERSATION_PREFIX = 'conv_';
4
4
  const RESPONSE_PREFIX = 'resp_';
5
5
  const DEFAULT_MAX_RETRIES = parseInt(process.env.MAX_RETRIES || '10', 10);
6
+ const MAX_ITEMS_ON_CREATE = 20;
7
+ const MAX_ITEMS_PER_BATCH = 20;
8
+ const DEFAULT_MAX_HISTORICAL_MESSAGES = parseInt(process.env.MAX_HISTORICAL_MESSAGES || '50', 10);
6
9
 
7
10
  /**
8
11
  * Provider wrapper that targets the Conversations + Responses API surface.
@@ -43,21 +46,153 @@ class OpenAIResponsesProvider {
43
46
  return this.client;
44
47
  }
45
48
 
49
+ /**
50
+ * Retry helper for rate limit errors
51
+ * Extracts wait time from error message and retries with exponential backoff
52
+ */
53
+ async _retryWithRateLimit(operation, maxRetries = 3, retryCount = 0) {
54
+ try {
55
+ return await operation();
56
+ } catch (error) {
57
+ if (error?.status === 429 || error?.code === 'rate_limit_exceeded') {
58
+ if (retryCount >= maxRetries) {
59
+ throw new Error(`Rate limit exceeded after ${maxRetries} retries: ${error.message}`);
60
+ }
61
+
62
+ let waitTime = 60;
63
+ const waitMatch = error?.error?.message?.match(/after (\d+\.?\d*) seconds?/i) ||
64
+ error?.message?.match(/after (\d+\.?\d*) seconds?/i);
65
+ if (waitMatch) {
66
+ waitTime = Math.ceil(parseFloat(waitMatch[1])) + 1; // Add 1 second buffer
67
+ } else {
68
+ waitTime = 60 * Math.pow(2, retryCount);
69
+ }
70
+
71
+ console.log(`[OpenAIResponsesProvider] Rate limit exceeded. Waiting ${waitTime} seconds before retry ${retryCount + 1}/${maxRetries}`);
72
+ await new Promise(resolve => setTimeout(resolve, waitTime * 1000));
73
+
74
+ return this._retryWithRateLimit(operation, maxRetries, retryCount + 1);
75
+ }
76
+
77
+ throw error;
78
+ }
79
+ }
80
+
46
81
  /**
47
82
  * Conversations helpers
48
83
  */
49
84
  async createConversation({ metadata, messages = [], toolResources } = {}) {
85
+ const messagesToProcess = messages.length > DEFAULT_MAX_HISTORICAL_MESSAGES
86
+ ? messages.slice(-DEFAULT_MAX_HISTORICAL_MESSAGES)
87
+ : messages;
88
+
89
+ if (messages.length > DEFAULT_MAX_HISTORICAL_MESSAGES) {
90
+ console.warn(`[OpenAIResponsesProvider] Capped ${messages.length} → ${DEFAULT_MAX_HISTORICAL_MESSAGES} messages`);
91
+ }
92
+
93
+ const allItems = this._conversationItems(messagesToProcess);
94
+ const totalItems = allItems.length;
95
+
96
+ // Create empty conversation if no messages
97
+ if (totalItems === 0) {
98
+ const payload = this._cleanObject({
99
+ metadata,
100
+ tool_resources: toolResources,
101
+ });
102
+
103
+ return this._retryWithRateLimit(async () => {
104
+ if (this.conversations && typeof this.conversations.create === 'function') {
105
+ return await this.conversations.create(payload);
106
+ }
107
+ return await this._post('/conversations', payload);
108
+ });
109
+ }
110
+
111
+ // Split items: first batch for initial creation, rest to add in batches
112
+ const initialItems = allItems.slice(0, MAX_ITEMS_ON_CREATE);
113
+ const remainingItems = allItems.slice(MAX_ITEMS_ON_CREATE);
114
+ const totalBatches = Math.ceil(remainingItems.length / MAX_ITEMS_PER_BATCH);
115
+
116
+ // Concise batch summary
117
+ console.log(`[OpenAIResponsesProvider] Batching: ${initialItems.length} (create) + ${remainingItems.length} (${totalBatches} batches) = ${totalItems} total`);
118
+
50
119
  const payload = this._cleanObject({
51
120
  metadata,
52
- items: this._conversationItems(messages),
121
+ items: initialItems,
53
122
  tool_resources: toolResources,
54
123
  });
55
124
 
56
- if (this.conversations && typeof this.conversations.create === 'function') {
57
- return await this.conversations.create(payload);
125
+ let conversation;
126
+ try {
127
+ conversation = await this._retryWithRateLimit(async () => {
128
+ if (this.conversations && typeof this.conversations.create === 'function') {
129
+ return await this.conversations.create(payload);
130
+ }
131
+ return await this._post('/conversations', payload);
132
+ });
133
+ } catch (error) {
134
+ console.error('[OpenAIResponsesProvider] Failed to create conversation:', error?.message || error);
135
+ throw error;
136
+ }
137
+
138
+ // Add remaining messages in batches (maintains chronological order)
139
+ if (remainingItems.length > 0) {
140
+ try {
141
+ await this._addItemsInBatches(conversation.id, remainingItems);
142
+ } catch (error) {
143
+ console.error('[OpenAIResponsesProvider] Failed to add remaining messages. Conversation created with partial history:', error?.message || error);
144
+ }
145
+ }
146
+
147
+ return conversation;
148
+ }
149
+
150
+ /**
151
+ * Add items to a conversation in batches
152
+ * Items are converted to API format with type: 'message' for the items.create endpoint
153
+ * @private
154
+ */
155
+ async _addItemsInBatches(threadId, items, batchSize = MAX_ITEMS_PER_BATCH) {
156
+ if (!items || items.length === 0) {
157
+ return;
158
+ }
159
+
160
+ const id = this._ensurethreadId(threadId);
161
+ const totalBatches = Math.ceil(items.length / batchSize);
162
+
163
+ for (let i = 0; i < items.length; i += batchSize) {
164
+ const batch = items.slice(i, i + batchSize);
165
+ const batchNumber = Math.floor(i / batchSize) + 1;
166
+
167
+ const batchPayload = this._convertItemsToApiFormat(batch);
168
+
169
+ try {
170
+ await this._retryWithRateLimit(async () => {
171
+ if (this.conversations?.items?.create) {
172
+ return await this.conversations.items.create(id, { items: batchPayload });
173
+ }
174
+ return await this._post(`/conversations/${id}/items`, { items: batchPayload });
175
+ });
176
+ } catch (error) {
177
+ console.error(`[OpenAIResponsesProvider] Batch ${batchNumber}/${totalBatches} failed:`, error?.message || error);
178
+ throw error;
179
+ }
58
180
  }
181
+
182
+ console.log(`[OpenAIResponsesProvider] Successfully added ${items.length} messages in ${totalBatches} batches`);
183
+ }
59
184
 
60
- return await this._post('/conversations', payload);
185
+ /**
186
+ * Convert conversation items to API format for items.create endpoint
187
+ * Adds type: 'message' which is required by the items.create API
188
+ * @private
189
+ */
190
+ _convertItemsToApiFormat(items) {
191
+ return items.map(item => ({
192
+ role: item.role,
193
+ content: item.content,
194
+ type: 'message',
195
+ }));
61
196
  }
62
197
 
63
198
  async deleteConversation(threadId) {
@@ -79,10 +214,12 @@ class OpenAIResponsesProvider {
79
214
  console.log('payload', payload);
80
215
 
81
216
  if (payload.content) {
82
- if (this.conversations?.items?.create) {
83
- return await this.conversations.items.create(id, {items: [payload]});
84
- }
85
- return await this._post(`/conversations/${id}/items`, {items: [payload]});
217
+ return this._retryWithRateLimit(async () => {
218
+ if (this.conversations?.items?.create) {
219
+ return await this.conversations.items.create(id, {items: [payload]});
220
+ }
221
+ return await this._post(`/conversations/${id}/items`, {items: [payload]});
222
+ });
86
223
  }
87
224
  }
88
225
 
@@ -159,7 +296,9 @@ class OpenAIResponsesProvider {
159
296
  });
160
297
 
161
298
  console.log('payload', payload);
162
- const response = await this.client.responses.create(payload);
299
+ const response = await this._retryWithRateLimit(() =>
300
+ this.client.responses.create(payload)
301
+ );
163
302
  console.log('response', response);
164
303
 
165
304
  if (response?.status !== 'completed') {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@peopl-health/nexus",
3
- "version": "2.2.1",
3
+ "version": "2.2.2",
4
4
  "description": "Core messaging and assistant library for WhatsApp communication platforms",
5
5
  "keywords": [
6
6
  "whatsapp",