@peopl-health/nexus 2.2.0 → 2.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,10 +1,19 @@
1
1
  const express = require('express');
2
2
  require('dotenv').config();
3
- const { Nexus, setupDefaultRoutes } = require('@peopl-health/nexus');
3
+ const { Nexus, setupDefaultRoutes, BaseAssistant } = require('@peopl-health/nexus');
4
4
 
5
5
  const app = express();
6
6
  app.use(express.json());
7
7
 
8
+ // Define a General Assistant
9
+ class GeneralAssistant extends BaseAssistant {
10
+ constructor(options = {}) {
11
+ super(options);
12
+ // You can add custom tools or setup here
13
+ // Example: this.registerTool('toolName', schema, handler);
14
+ }
15
+ }
16
+
8
17
  async function startServer() {
9
18
  // Initialize Nexus with all services and batching enabled
10
19
  const nexus = new Nexus({
@@ -33,6 +42,21 @@ async function startServer() {
33
42
  media: {
34
43
  bucketName: process.env.AWS_S3_BUCKET_NAME,
35
44
  region: process.env.AWS_REGION || 'us-east-1'
45
+ },
46
+
47
+ // LLM configuration (required for assistants)
48
+ llm: 'openai',
49
+ llmConfig: {
50
+ apiKey: process.env.OPENAI_API_KEY
51
+ },
52
+
53
+ // Assistants configuration
54
+ assistants: {
55
+ registry: {
56
+ // Register the general assistant with its OpenAI assistant ID
57
+ 'asst_O6mAXAhf0xyVj3t4DHRs26uT': GeneralAssistant,
58
+ 'pmpt_68f844cd975481979c080431bde74f6e0adf01a52110813b': GeneralAssistant,
59
+ }
36
60
  }
37
61
  });
38
62
 
@@ -1,6 +1,7 @@
1
1
  const llmConfig = require('../config/llmConfig');
2
2
  const { Thread } = require('../models/threadModel');
3
- const { getLastNMessages } = require('../helpers/assistantHelper');
3
+ const { Message } = require('../models/messageModel');
4
+ const { formatMessage } = require('../helpers/assistantHelper');
4
5
  const { createProvider } = require('../providers/createProvider');
5
6
 
6
7
  const DEFAULT_MAX_HISTORICAL_MESSAGES = parseInt(process.env.MAX_HISTORICAL_MESSAGES || '50', 10);
@@ -165,11 +166,6 @@ class BaseAssistant {
165
166
  this._ensureClient();
166
167
  this.status = 'active';
167
168
 
168
- const whatsappId = context?.whatsapp_id || code;
169
- if (whatsappId) {
170
- this.lastMessages = await getLastNMessages(whatsappId, DEFAULT_MAX_HISTORICAL_MESSAGES);
171
- }
172
-
173
169
  const provider = createProvider({ variant: process.env.VARIANT || 'assistants' });
174
170
  if (!provider || typeof provider.createConversation !== 'function') {
175
171
  throw new Error('Provider not configured. Cannot create conversation.');
@@ -189,12 +185,39 @@ class BaseAssistant {
189
185
  return this.thread;
190
186
  }
191
187
 
192
- async buildInitialMessages({ code }) {
193
- if (!this.lastMessages) return [];
194
- return [{
195
- role: 'assistant',
196
- content: `Últimos mensajes para ${code}: \n${this.lastMessages}`
197
- }];
188
+ async buildInitialMessages({ code, context = {} }) {
189
+ const whatsappId = context?.whatsapp_id || code;
190
+ if (!whatsappId) {
191
+ return [];
192
+ }
193
+
194
+ try {
195
+ const lastMessages = await Message.find({ numero: whatsappId })
196
+ .sort({ createdAt: -1 })
197
+ .limit(DEFAULT_MAX_HISTORICAL_MESSAGES);
198
+
199
+ if (!lastMessages || lastMessages.length === 0) {
200
+ return [];
201
+ }
202
+
203
+ const messagesInOrder = lastMessages.reverse();
204
+
205
+ // Messages with from_me: true are assistant messages, from_me: false are user messages
206
+ const formattedMessages = messagesInOrder
207
+ .filter(message => message && message.timestamp && message.body && message.body.trim() !== '')
208
+ .map(message => {
209
+ const formattedText = formatMessage(message);
210
+ return formattedText ? { role: message.from_me ? 'assistant' : 'user', content: formattedText } : null;
211
+ })
212
+ .filter(message => message !== null);
213
+
214
+ console.log(`[buildInitialMessages] Built ${formattedMessages.length} initial messages for ${code}`);
215
+
216
+ return formattedMessages;
217
+ } catch (error) {
218
+ console.error('[buildInitialMessages] Error fetching messages:', error);
219
+ return [];
220
+ }
198
221
  }
199
222
 
200
223
  async close() {
@@ -6,7 +6,7 @@ async function logBugReportToAirtable(reporter, whatsapp_id, description, severi
6
6
  try {
7
7
  let conversation = null;
8
8
  if (messageIds && messageIds.length > 0) {
9
- const messageObjects = await Message.find({ _id: { $in: messageIds } }).sort({ timestamp: 1 });
9
+ const messageObjects = await Message.find({ _id: { $in: messageIds } }).sort({ createdAt: 1 });
10
10
  conversation = messageObjects.map(msg => {
11
11
  const timestamp = new Date(msg.timestamp).toISOString().slice(0, 16).replace('T', ' ');
12
12
  const role = msg.from_me ? 'Assistant' : 'Patient';
@@ -409,14 +409,14 @@ const getConversationsByNameController = async (req, res) => {
409
409
  try {
410
410
  const conversations = await Message.aggregate([
411
411
  { $match: { from_me: false, is_group: false } },
412
- { $sort: { timestamp: -1 } },
412
+ { $sort: { createdAt: -1 } },
413
413
  { $group: {
414
414
  _id: '$numero',
415
415
  name: { $first: '$nombre_whatsapp' },
416
416
  latestMessage: { $first: '$$ROOT' },
417
417
  messageCount: { $sum: 1 }
418
418
  }},
419
- { $sort: { 'latestMessage.timestamp': -1 } }
419
+ { $sort: { 'latestMessage.createdAt': -1 } }
420
420
  ]);
421
421
 
422
422
  res.status(200).json({
@@ -48,7 +48,7 @@ async function getLastMessages(code) {
48
48
  query.is_group = false;
49
49
  }
50
50
 
51
- const lastMessages = await Message.find(query).sort({ timestamp: -1 });
51
+ const lastMessages = await Message.find(query).sort({ createdAt: -1 });
52
52
  console.log('[getLastMessages] lastMessages', lastMessages.map(msg => msg.body).join('\n\n'));
53
53
 
54
54
  if (lastMessages.length === 0) return [];
@@ -73,7 +73,7 @@ async function getLastMessages(code) {
73
73
  async function getLastNMessages(code, n) {
74
74
  try {
75
75
  const lastMessages = await Message.find({ numero: code })
76
- .sort({ timestamp: -1 })
76
+ .sort({ createdAt: -1 })
77
77
  .limit(n);
78
78
 
79
79
  // Format each message and concatenate them with skip lines
@@ -114,20 +114,39 @@ const getPatientRoleAndName = (reply, numbers) => {
114
114
 
115
115
  function formatMessage(reply) {
116
116
  try {
117
- // Validate timestamp exists
118
117
  if (!reply.timestamp) {
119
118
  return null;
120
119
  }
121
120
 
121
+ // Normalize timestamp: convert any format to a Date object, then to ISO string
122
+ let dateObj;
123
+
124
+ if (reply.timestamp instanceof Date) {
125
+ dateObj = reply.timestamp;
126
+ } else if (typeof reply.timestamp === 'number') {
127
+ const ms = reply.timestamp < 1e12 ? reply.timestamp * 1000 : reply.timestamp;
128
+ dateObj = new Date(ms);
129
+ } else {
130
+ dateObj = new Date(reply.timestamp);
131
+ }
132
+
133
+ if (isNaN(dateObj.getTime())) {
134
+ console.warn('[formatMessage] Invalid timestamp:', reply.timestamp);
135
+ return null;
136
+ }
137
+
138
+ const isoString = dateObj.toISOString();
139
+
122
140
  // Convert timestamp to Mexico City timezone with Spanish format
123
141
  // Format: martes, 30 de septiembre de 2025 a las 8:30 AM
124
- const mexicoCityTime = moment(reply.timestamp)
142
+ const mexicoCityTime = moment(isoString)
125
143
  .tz('America/Mexico_City')
126
144
  .locale('es')
127
145
  .format('dddd, D [de] MMMM [de] YYYY [a las] h:mm A');
128
146
 
129
147
  return `[${mexicoCityTime}] ${reply.body}`;
130
- } catch {
148
+ } catch (error) {
149
+ console.error('[formatMessage] Error formatting message:', error?.message || error, 'timestamp:', reply.timestamp);
131
150
  return null;
132
151
  }
133
152
  }
@@ -110,15 +110,15 @@ async function isRecentMessage(chatId) {
110
110
 
111
111
  const recentMessage = await Message.find({
112
112
  $or: [{ group_id: chatId }, { numero: chatId }],
113
- timestamp: { $gte: fiveMinutesAgo.toISOString() }
114
- }).sort({ timestamp: -1 }).limit(1);
113
+ createdAt: { $gte: fiveMinutesAgo }
114
+ }).sort({ createdAt: -1 }).limit(1);
115
115
 
116
116
  return !!recentMessage;
117
117
  }
118
118
 
119
119
  async function getLastMessages(chatId, n) {
120
120
  const messages = await Message.find({ group_id: chatId })
121
- .sort({ timestamp: -1 })
121
+ .sort({ createdAt: -1 })
122
122
  .limit(n)
123
123
  .select('timestamp numero nombre_whatsapp body');
124
124
 
@@ -70,8 +70,8 @@ async function isRecentMessage(chatId) {
70
70
 
71
71
  const recentMessage = await Message.find({
72
72
  $or: [{ group_id: chatId }, { numero: chatId }],
73
- timestamp: { $gte: fiveMinutesAgo.toISOString() }
74
- }).sort({ timestamp: -1 }).limit(1);
73
+ createdAt: { $gte: fiveMinutesAgo }
74
+ }).sort({ createdAt: -1 }).limit(1);
75
75
 
76
76
  return !!recentMessage;
77
77
  }
@@ -79,7 +79,7 @@ async function isRecentMessage(chatId) {
79
79
 
80
80
  async function getLastMessages(chatId, n) {
81
81
  const messages = await Message.find({ numero: chatId })
82
- .sort({ timestamp: -1 })
82
+ .sort({ createdAt: -1 })
83
83
  .limit(n)
84
84
  .select('timestamp numero nombre_whatsapp body');
85
85
 
@@ -57,6 +57,7 @@ const messageSchema = new mongoose.Schema({
57
57
  }, { timestamps: true });
58
58
 
59
59
  messageSchema.index({ message_id: 1, timestamp: 1 }, { unique: true });
60
+ messageSchema.index({ numero: 1, createdAt: -1 });
60
61
 
61
62
  messageSchema.pre('save', function (next) {
62
63
  if (this.timestamp) {
@@ -157,7 +158,10 @@ function getMessageValues(message, content, reply, is_media) {
157
158
 
158
159
  async function getContactDisplayName(contactNumber) {
159
160
  try {
160
- const latestMessage = await Message.findOne({ numero: contactNumber })
161
+ const latestMessage = await Message.findOne({
162
+ numero: contactNumber,
163
+ from_me: false
164
+ })
161
165
  .sort({ createdAt: -1 })
162
166
  .select('nombre_whatsapp');
163
167
 
@@ -40,11 +40,45 @@ class OpenAIAssistantsProvider {
40
40
  return this.client;
41
41
  }
42
42
 
43
+ /**
44
+ * Retry helper for rate limit errors
45
+ * Extracts wait time from error message and retries with exponential backoff
46
+ */
47
+ async _retryWithRateLimit(operation, maxRetries = 3, retryCount = 0) {
48
+ try {
49
+ return await operation();
50
+ } catch (error) {
51
+ if (error?.status === 429 || error?.code === 'rate_limit_exceeded') {
52
+ if (retryCount >= maxRetries) {
53
+ throw new Error(`Rate limit exceeded after ${maxRetries} retries: ${error.message}`);
54
+ }
55
+
56
+ let waitTime = 60;
57
+ const waitMatch = error?.error?.message?.match(/after (\d+\.?\d*) seconds?/i) ||
58
+ error?.message?.match(/after (\d+\.?\d*) seconds?/i);
59
+ if (waitMatch) {
60
+ waitTime = Math.ceil(parseFloat(waitMatch[1])) + 1; // Add 1 second buffer
61
+ } else {
62
+ waitTime = 60 * Math.pow(2, retryCount);
63
+ }
64
+
65
+ console.log(`[OpenAIAssistantsProvider] Rate limit exceeded. Waiting ${waitTime} seconds before retry ${retryCount + 1}/${maxRetries}`);
66
+ await new Promise(resolve => setTimeout(resolve, waitTime * 1000));
67
+
68
+ return this._retryWithRateLimit(operation, maxRetries, retryCount + 1);
69
+ }
70
+
71
+ throw error;
72
+ }
73
+ }
74
+
43
75
  async createConversation({ metadata, messages = [], toolResources } = {}) {
44
- const thread = await this.client.beta.threads.create({
45
- metadata,
46
- tool_resources: toolResources,
47
- });
76
+ const thread = await this._retryWithRateLimit(() =>
77
+ this.client.beta.threads.create({
78
+ metadata,
79
+ tool_resources: toolResources,
80
+ })
81
+ );
48
82
 
49
83
  if (Array.isArray(messages) && messages.length > 0) {
50
84
  for (const message of messages) {
@@ -72,7 +106,9 @@ class OpenAIAssistantsProvider {
72
106
  delete payload.metadata;
73
107
  }
74
108
 
75
- return this.client.beta.threads.messages.create(this._ensureId(threadId), payload);
109
+ return this._retryWithRateLimit(() =>
110
+ this.client.beta.threads.messages.create(this._ensureId(threadId), payload)
111
+ );
76
112
  }
77
113
 
78
114
  async listMessages({ threadId, runId, order = 'desc', limit } = {}) {
@@ -153,7 +189,9 @@ class OpenAIAssistantsProvider {
153
189
  }
154
190
  });
155
191
 
156
- return this.client.beta.threads.runs.create(this._ensureId(threadId), payload);
192
+ return this._retryWithRateLimit(() =>
193
+ this.client.beta.threads.runs.create(this._ensureId(threadId), payload)
194
+ );
157
195
  }
158
196
 
159
197
  async getRun({ threadId, runId }) {
@@ -3,6 +3,9 @@ const { OpenAI } = require('openai');
3
3
  const CONVERSATION_PREFIX = 'conv_';
4
4
  const RESPONSE_PREFIX = 'resp_';
5
5
  const DEFAULT_MAX_RETRIES = parseInt(process.env.MAX_RETRIES || '10', 10);
6
+ const MAX_ITEMS_ON_CREATE = 20;
7
+ const MAX_ITEMS_PER_BATCH = 20;
8
+ const DEFAULT_MAX_HISTORICAL_MESSAGES = parseInt(process.env.MAX_HISTORICAL_MESSAGES || '50', 10);
6
9
 
7
10
  /**
8
11
  * Provider wrapper that targets the Conversations + Responses API surface.
@@ -43,21 +46,153 @@ class OpenAIResponsesProvider {
43
46
  return this.client;
44
47
  }
45
48
 
49
+ /**
50
+ * Retry helper for rate limit errors
51
+ * Extracts wait time from error message and retries with exponential backoff
52
+ */
53
+ async _retryWithRateLimit(operation, maxRetries = 3, retryCount = 0) {
54
+ try {
55
+ return await operation();
56
+ } catch (error) {
57
+ if (error?.status === 429 || error?.code === 'rate_limit_exceeded') {
58
+ if (retryCount >= maxRetries) {
59
+ throw new Error(`Rate limit exceeded after ${maxRetries} retries: ${error.message}`);
60
+ }
61
+
62
+ let waitTime = 60;
63
+ const waitMatch = error?.error?.message?.match(/after (\d+\.?\d*) seconds?/i) ||
64
+ error?.message?.match(/after (\d+\.?\d*) seconds?/i);
65
+ if (waitMatch) {
66
+ waitTime = Math.ceil(parseFloat(waitMatch[1])) + 1; // Add 1 second buffer
67
+ } else {
68
+ waitTime = 60 * Math.pow(2, retryCount);
69
+ }
70
+
71
+ console.log(`[OpenAIResponsesProvider] Rate limit exceeded. Waiting ${waitTime} seconds before retry ${retryCount + 1}/${maxRetries}`);
72
+ await new Promise(resolve => setTimeout(resolve, waitTime * 1000));
73
+
74
+ return this._retryWithRateLimit(operation, maxRetries, retryCount + 1);
75
+ }
76
+
77
+ throw error;
78
+ }
79
+ }
80
+
46
81
  /**
47
82
  * Conversations helpers
48
83
  */
49
84
  async createConversation({ metadata, messages = [], toolResources } = {}) {
85
+ const messagesToProcess = messages.length > DEFAULT_MAX_HISTORICAL_MESSAGES
86
+ ? messages.slice(-DEFAULT_MAX_HISTORICAL_MESSAGES)
87
+ : messages;
88
+
89
+ if (messages.length > DEFAULT_MAX_HISTORICAL_MESSAGES) {
90
+ console.warn(`[OpenAIResponsesProvider] Capped ${messages.length} → ${DEFAULT_MAX_HISTORICAL_MESSAGES} messages`);
91
+ }
92
+
93
+ const allItems = this._conversationItems(messagesToProcess);
94
+ const totalItems = allItems.length;
95
+
96
+ // Create empty conversation if no messages
97
+ if (totalItems === 0) {
98
+ const payload = this._cleanObject({
99
+ metadata,
100
+ tool_resources: toolResources,
101
+ });
102
+
103
+ return this._retryWithRateLimit(async () => {
104
+ if (this.conversations && typeof this.conversations.create === 'function') {
105
+ return await this.conversations.create(payload);
106
+ }
107
+ return await this._post('/conversations', payload);
108
+ });
109
+ }
110
+
111
+ // Split items: first batch for initial creation, rest to add in batches
112
+ const initialItems = allItems.slice(0, MAX_ITEMS_ON_CREATE);
113
+ const remainingItems = allItems.slice(MAX_ITEMS_ON_CREATE);
114
+ const totalBatches = Math.ceil(remainingItems.length / MAX_ITEMS_PER_BATCH);
115
+
116
+ // Concise batch summary
117
+ console.log(`[OpenAIResponsesProvider] Batching: ${initialItems.length} (create) + ${remainingItems.length} (${totalBatches} batches) = ${totalItems} total`);
118
+
50
119
  const payload = this._cleanObject({
51
120
  metadata,
52
- items: this._conversationItems(messages),
121
+ items: initialItems,
53
122
  tool_resources: toolResources,
54
123
  });
55
124
 
56
- if (this.conversations && typeof this.conversations.create === 'function') {
57
- return await this.conversations.create(payload);
125
+ let conversation;
126
+ try {
127
+ conversation = await this._retryWithRateLimit(async () => {
128
+ if (this.conversations && typeof this.conversations.create === 'function') {
129
+ return await this.conversations.create(payload);
130
+ }
131
+ return await this._post('/conversations', payload);
132
+ });
133
+ } catch (error) {
134
+ console.error('[OpenAIResponsesProvider] Failed to create conversation:', error?.message || error);
135
+ throw error;
136
+ }
137
+
138
+ // Add remaining messages in batches (maintains chronological order)
139
+ if (remainingItems.length > 0) {
140
+ try {
141
+ await this._addItemsInBatches(conversation.id, remainingItems);
142
+ } catch (error) {
143
+ console.error('[OpenAIResponsesProvider] Failed to add remaining messages. Conversation created with partial history:', error?.message || error);
144
+ }
145
+ }
146
+
147
+ return conversation;
148
+ }
149
+
150
+ /**
151
+ * Add items to a conversation in batches
152
+ * Items are converted to API format with type: 'message' for the items.create endpoint
153
+ * @private
154
+ */
155
+ async _addItemsInBatches(threadId, items, batchSize = MAX_ITEMS_PER_BATCH) {
156
+ if (!items || items.length === 0) {
157
+ return;
158
+ }
159
+
160
+ const id = this._ensurethreadId(threadId);
161
+ const totalBatches = Math.ceil(items.length / batchSize);
162
+
163
+ for (let i = 0; i < items.length; i += batchSize) {
164
+ const batch = items.slice(i, i + batchSize);
165
+ const batchNumber = Math.floor(i / batchSize) + 1;
166
+
167
+ const batchPayload = this._convertItemsToApiFormat(batch);
168
+
169
+ try {
170
+ await this._retryWithRateLimit(async () => {
171
+ if (this.conversations?.items?.create) {
172
+ return await this.conversations.items.create(id, { items: batchPayload });
173
+ }
174
+ return await this._post(`/conversations/${id}/items`, { items: batchPayload });
175
+ });
176
+ } catch (error) {
177
+ console.error(`[OpenAIResponsesProvider] Batch ${batchNumber}/${totalBatches} failed:`, error?.message || error);
178
+ throw error;
179
+ }
58
180
  }
181
+
182
+ console.log(`[OpenAIResponsesProvider] Successfully added ${items.length} messages in ${totalBatches} batches`);
183
+ }
59
184
 
60
- return await this._post('/conversations', payload);
185
+ /**
186
+ * Convert conversation items to API format for items.create endpoint
187
+ * Adds type: 'message' which is required by the items.create API
188
+ * @private
189
+ */
190
+ _convertItemsToApiFormat(items) {
191
+ return items.map(item => ({
192
+ role: item.role,
193
+ content: item.content,
194
+ type: 'message',
195
+ }));
61
196
  }
62
197
 
63
198
  async deleteConversation(threadId) {
@@ -79,10 +214,12 @@ class OpenAIResponsesProvider {
79
214
  console.log('payload', payload);
80
215
 
81
216
  if (payload.content) {
82
- if (this.conversations?.items?.create) {
83
- return await this.conversations.items.create(id, {items: [payload]});
84
- }
85
- return await this._post(`/conversations/${id}/items`, {items: [payload]});
217
+ return this._retryWithRateLimit(async () => {
218
+ if (this.conversations?.items?.create) {
219
+ return await this.conversations.items.create(id, {items: [payload]});
220
+ }
221
+ return await this._post(`/conversations/${id}/items`, {items: [payload]});
222
+ });
86
223
  }
87
224
  }
88
225
 
@@ -159,7 +296,9 @@ class OpenAIResponsesProvider {
159
296
  });
160
297
 
161
298
  console.log('payload', payload);
162
- const response = await this.client.responses.create(payload);
299
+ const response = await this._retryWithRateLimit(() =>
300
+ this.client.responses.create(payload)
301
+ );
163
302
  console.log('response', response);
164
303
 
165
304
  if (response?.status !== 'completed') {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@peopl-health/nexus",
3
- "version": "2.2.0",
3
+ "version": "2.2.2",
4
4
  "description": "Core messaging and assistant library for WhatsApp communication platforms",
5
5
  "keywords": [
6
6
  "whatsapp",