@peopl-health/nexus 1.7.12 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,37 +1,23 @@
1
1
  const { Historial_Clinico_ID } = require('../config/airtableConfig.js');
2
2
  const AWS = require('../config/awsConfig.js');
3
- const { combineImagesToPDF, cleanupFiles } = require('../helpers/filesHelper.js');
4
- const runtimeConfig = require('../config/runtimeConfig');
5
3
  const llmConfig = require('../config/llmConfig');
4
+ const runtimeConfig = require('../config/runtimeConfig');
6
5
  const { BaseAssistant } = require('../assistants/BaseAssistant');
7
- const { OpenAIProvider } = require('../providers/OpenAIProvider');
8
-
9
- const configureLLMProvider = (provider) => {
10
- if (!provider) {
11
- throw new Error('configureLLMProvider requires an OpenAI provider or raw client');
12
- }
13
-
14
- if (provider instanceof OpenAIProvider || typeof provider.runConversation === 'function') {
15
- llmConfig.setOpenAIProvider(provider);
16
- return provider;
17
- }
18
-
19
- const wrappedProvider = new OpenAIProvider({ client: provider });
20
- llmConfig.setOpenAIProvider(wrappedProvider);
21
- return wrappedProvider;
22
- };
23
-
24
- let assistantConfig = null;
25
- let assistantRegistry = {};
26
- let customGetAssistantById = null;
6
+ const { createProvider } = require('../providers/createProvider');
27
7
 
28
8
  const { Message, formatTimestamp } = require('../models/messageModel.js');
29
9
  const { Thread } = require('../models/threadModel.js');
30
10
 
31
11
  const { checkRunStatus, getCurRow } = require('../helpers/assistantHelper.js');
32
12
  const { processIndividualMessage, getLastMessages } = require('../helpers/assistantHelper.js');
13
+ const { combineImagesToPDF, cleanupFiles } = require('../helpers/filesHelper.js');
33
14
  const { delay } = require('../helpers/whatsappHelper.js');
34
15
 
16
+ let assistantConfig = null;
17
+ let assistantRegistry = {};
18
+ let customGetAssistantById = null;
19
+
20
+
35
21
  const configureAssistants = (config) => {
36
22
  if (!config) {
37
23
  throw new Error('Assistant configuration is required');
@@ -44,53 +30,60 @@ const runAssistantAndWait = async ({
44
30
  assistant,
45
31
  runConfig = {}
46
32
  }) => {
47
- if (!thread || !thread.thread_id) {
48
- throw new Error('runAssistantAndWait requires a thread with a valid thread_id');
33
+ if (!thread || !(thread.thread_id || thread.conversation_id)) {
34
+ throw new Error('runAssistantAndWait requires a thread with a valid thread_id or conversation_id');
49
35
  }
50
36
 
51
37
  if (!assistant) {
52
38
  throw new Error('runAssistantAndWait requires an assistant instance');
53
39
  }
54
40
 
55
- const provider = llmConfig.requireOpenAIProvider();
41
+ const provider = createProvider({ variant: process.env.VARIANT || 'assistants' });
56
42
  const { polling, ...conversationConfig } = runConfig || {};
57
43
 
58
44
  const run = await provider.runConversation({
59
- conversationId: thread.thread_id,
60
- assistantId: thread.assistant_id,
45
+ threadId: thread?.thread_id,
46
+ conversationId: thread?.conversation_id,
47
+ assistantId: thread?.assistant_id,
48
+ promptId: thread?.prompt_id,
61
49
  ...conversationConfig,
62
50
  });
63
51
 
64
- const filter = thread.code ? { code: thread.code, active: true } : null;
65
- if (filter) {
66
- await Thread.updateOne(filter, { $set: { run_id: run.id } });
67
- }
52
+ let output = null;
53
+ if (thread?.thread_id) {
54
+ const filter = thread.code ? { code: thread.code, active: true } : null;
55
+ if (filter) {
56
+ await Thread.updateOne(filter, { $set: { run_id: run.id } });
57
+ }
68
58
 
69
- const maxRetries = polling?.maxRetries ?? 30;
70
- let completed = false;
59
+ const maxRetries = polling?.maxRetries ?? 30;
60
+ let completed = false;
71
61
 
72
- try {
73
- completed = await checkRunStatus(assistant, run.thread_id, run.id, 0, maxRetries);
74
- } finally {
75
- if (filter) {
76
- await Thread.updateOne(filter, { $set: { run_id: null } });
62
+ try {
63
+ completed = await checkRunStatus(assistant, run.thread_id, run.id, 0, maxRetries);
64
+ } finally {
65
+ if (filter) {
66
+ await Thread.updateOne(filter, { $set: { run_id: null } });
67
+ }
77
68
  }
78
- }
79
69
 
80
- let finalRun = run;
81
- try {
82
- finalRun = await provider.getRun({ conversationId: run.thread_id, runId: run.id });
83
- } catch (error) {
84
- console.warn('Warning: unable to retrieve final run state:', error?.message || error);
85
- }
70
+ let finalRun = run;
71
+ try {
72
+ finalRun = await provider.getRun({ threadId: run.thread_id, runId: run.id });
73
+ } catch (error) {
74
+ console.warn('Warning: unable to retrieve final run state:', error?.message || error);
75
+ }
86
76
 
87
- if (!completed) {
88
- return { run: finalRun, completed: false, output: '' };
89
- }
77
+ if (!completed) {
78
+ return { run: finalRun, completed: false, output: '' };
79
+ }
90
80
 
91
- const output = await provider.getRunText({ conversationId: run.thread_id, runId: run.id, fallback: '' });
81
+ output = await provider.getRunText({ threadId: run.thread_id, runId: run.id, fallback: '' });
82
+ } else {
83
+ output = run.output_text;
84
+ }
92
85
 
93
- return { run: finalRun, completed: true, output };
86
+ return { completed: true, output };
94
87
  };
95
88
 
96
89
  const registerAssistant = (assistantId, definition) => {
@@ -171,7 +164,7 @@ const getAssistantById = (assistant_id, thread) => {
171
164
  throw new Error(`Assistant '${assistant_id}' not found. Available assistants: ${Object.keys(assistantRegistry).join(', ')}`);
172
165
  }
173
166
 
174
- const provider = llmConfig.getOpenAIProvider({ instantiate: false });
167
+ const provider = createProvider({ variant: process.env.VARIANT || 'assistants' });
175
168
  const sharedClient = provider?.getClient?.() || llmConfig.openaiClient || null;
176
169
 
177
170
  if (AssistantClass.prototype instanceof BaseAssistant) {
@@ -196,10 +189,11 @@ const getAssistantById = (assistant_id, thread) => {
196
189
  };
197
190
 
198
191
 
199
- const createAssistant = async (code, assistant_id, messages=[], prevThread=null) => {
192
+ const createAssistant = async (code, assistant_id, messages=[], force=false) => {
200
193
  // If thread already exists, update it
201
194
  const findThread = await Thread.findOne({ code: code });
202
- if (findThread) {
195
+ const variant = process.env.VARIANT || 'assistants';
196
+ if ((findThread?.conversation_id && variant === 'responses') || (findThread?.thread_id && variant === 'assistants')) {
203
197
  await Thread.updateOne({ code: code }, { $set: { active: true, stopped: false, assistant_id: assistant_id } });
204
198
  return findThread;
205
199
  }
@@ -213,34 +207,40 @@ const createAssistant = async (code, assistant_id, messages=[], prevThread=null)
213
207
  const initialThread = await assistant.create(code, curRow[0]);
214
208
 
215
209
  // Add new messages to memory
216
- const provider = llmConfig.requireOpenAIProvider();
210
+ const provider = createProvider({ variant: variant || 'assistants' });
217
211
  for (const message of messages) {
218
212
  await provider.addMessage({
219
- conversationId: initialThread.id,
213
+ threadId: initialThread.id,
220
214
  role: 'assistant',
221
215
  content: message
222
216
  });
223
217
  }
224
-
218
+
219
+ console.log('initialThread', initialThread);
225
220
  // Define new thread data
226
221
  const thread = {
227
222
  code: code,
228
- assistant_id: assistant_id,
229
- thread_id: initialThread.id,
223
+ assistant_id: assistant_id.startsWith('assistant') ? assistant_id : null,
224
+ thread_id: initialThread.id.startsWith('thread') ? initialThread.id : null,
225
+ conversation_id: initialThread.id.startsWith('conv') ? initialThread.id : null,
226
+ prompt_id: assistant_id.startsWith('pmpt') ? assistant_id : null,
230
227
  patient_id: patientId,
231
- run_id: null,
232
228
  nombre: nombre,
233
229
  active: true
234
230
  };
235
231
 
236
- const condition = { thread_id: prevThread?.thread_id };
232
+ const updateData = Object.fromEntries(
233
+ Object.entries(thread).filter(([_, v]) => v != null)
234
+ );
235
+
236
+ const condition = { $or: [{ thread_id: findThread?.thread_id }, { conversation_id: findThread?.conversation_id }] };
237
237
  const options = { new: true, upsert: true };
238
- const updatedThread = await Thread.findOneAndUpdate(condition, thread, options);
238
+ const updatedThread = await Thread.findOneAndUpdate(condition, {run_id: null, ...updateData}, options);
239
239
  console.log('Updated thread:', updatedThread);
240
240
 
241
241
  // Delete previous thread
242
- if (prevThread) {
243
- await provider.deleteConversation(prevThread.thread_id);
242
+ if (force) {
243
+ await provider.deleteConversation(findThread.conversation_id || findThread.thread_id);
244
244
  }
245
245
 
246
246
  return thread;
@@ -252,12 +252,11 @@ const addMsgAssistant = async (code, inMessages, reply = false) => {
252
252
  console.log(thread);
253
253
  if (thread === null) return null;
254
254
 
255
- const provider = llmConfig.requireOpenAIProvider();
256
-
255
+ const provider = createProvider({ variant: process.env.VARIANT || 'assistants' });
257
256
  for (const message of inMessages) {
258
257
  console.log(message);
259
258
  await provider.addMessage({
260
- conversationId: thread.thread_id,
259
+ threadId: thread.conversation_id || thread.thread_id,
261
260
  role: 'assistant',
262
261
  content: message
263
262
  });
@@ -265,7 +264,7 @@ const addMsgAssistant = async (code, inMessages, reply = false) => {
265
264
 
266
265
  if (!reply) return null;
267
266
 
268
- const assistant = getAssistantById(thread.assistant_id, thread);
267
+ const assistant = getAssistantById(thread?.prompt_id || thread?.assistant_id, thread);
269
268
  const { output } = await runAssistantAndWait({ thread, assistant });
270
269
  console.log('THE ANS IS', output);
271
270
 
@@ -282,7 +281,7 @@ const addInsAssistant = async (code, instruction) => {
282
281
  console.log(thread);
283
282
  if (thread === null) return null;
284
283
 
285
- const assistant = getAssistantById(thread.assistant_id, thread);
284
+ const assistant = getAssistantById(thread?.prompt_id || thread?.assistant_id, thread);
286
285
  const { output } = await runAssistantAndWait({
287
286
  thread,
288
287
  assistant,
@@ -317,11 +316,11 @@ const getThread = async (code, message = null) => {
317
316
  return null;
318
317
  }
319
318
 
320
- const provider = llmConfig.getOpenAIProvider({ instantiate: false });
319
+ const provider = createProvider({ variant: process.env.VARIANT || 'assistants' });
321
320
  while (thread && thread.run_id) {
322
321
  console.log(`Wait for ${thread.run_id} to be executed`);
323
322
  const activeProvider = provider || llmConfig.requireOpenAIProvider();
324
- const run = await activeProvider.getRun({ conversationId: thread.thread_id, runId: thread.run_id });
323
+ const run = await activeProvider.getRun({ threadId: thread.conversation_id || thread.thread_id, runId: thread.run_id });
325
324
  if (run.status === 'cancelled' || run.status === 'expired' || run.status === 'completed') {
326
325
  await Thread.updateOne({ code: code }, { $set: { run_id: null } });
327
326
  }
@@ -353,28 +352,28 @@ const replyAssistant = async function (code, message_ = null, thread_ = null, ru
353
352
  if (!thread || !thread.active) return null;
354
353
 
355
354
  const patientReply = await getLastMessages(code);
356
- console.log('UNREAD DATA', patientReply);
357
355
  if (!patientReply) {
358
356
  console.log('No relevant data found for this assistant.');
359
357
  return null;
360
358
  }
361
359
 
362
- const provider = llmConfig.requireOpenAIProvider();
363
-
364
- let activeRuns = await provider.listRuns({ conversationId: thread.thread_id, activeOnly: true });
365
- let activeRunsCount = activeRuns?.data?.length || 0;
366
- console.log('ACTIVE RUNS:', activeRunsCount, activeRuns?.data?.map(run => ({ id: run.id, status: run.status })));
367
- while (activeRunsCount > 0) {
368
- console.log(`WAITING FOR ${activeRunsCount} ACTIVE RUNS TO COMPLETE - ${thread.thread_id}`);
369
- activeRuns = await provider.listRuns({ conversationId: thread.thread_id, activeOnly: true });
370
- activeRunsCount = activeRuns?.data?.length || 0;
371
- await delay(5000);
360
+ const provider = createProvider({ variant: process.env.VARIANT || 'assistants' });
361
+ if (!(thread?.conversation_id)) {
362
+ let activeRuns = await provider.listRuns({ threadId: thread.thread_id, activeOnly: true });
363
+ let activeRunsCount = activeRuns?.data?.length || 0;
364
+ console.log('ACTIVE RUNS:', activeRunsCount, activeRuns?.data?.map(run => ({ id: run.id, status: run.status })));
365
+ while (activeRunsCount > 0) {
366
+ console.log(`WAITING FOR ${activeRunsCount} ACTIVE RUNS TO COMPLETE - ${thread.thread_id}`);
367
+ activeRuns = await provider.listRuns({ threadId: thread.thread_id, activeOnly: true });
368
+ activeRunsCount = activeRuns?.data?.length || 0;
369
+ await delay(5000);
370
+ }
372
371
  }
373
372
 
374
373
  let patientMsg = false;
375
374
  let urls = [];
376
375
  for (const reply of patientReply) {
377
- const { isNotAssistant, url } = await processIndividualMessage(code, reply, thread);
376
+ const { isNotAssistant, url } = await processIndividualMessage(code, reply, provider, thread);
378
377
  console.log(`isNotAssistant ${isNotAssistant} ${url}`);
379
378
  patientMsg = patientMsg || isNotAssistant;
380
379
  if (url) urls.push({ 'url': url });
@@ -390,15 +389,13 @@ const replyAssistant = async function (code, message_ = null, thread_ = null, ru
390
389
  await AWS.uploadBufferToS3(pdfBuffer, bucket, key, 'application/pdf');
391
390
  }
392
391
  if (processedFiles && processedFiles.length) {
393
- await cleanupFiles(processedFiles);
392
+ cleanupFiles(processedFiles);
394
393
  }
395
394
  }
396
395
 
397
- thread = await getThread(code);
398
- console.log('THREAD STOPPED', code, thread?.stopped);
399
- if (!patientMsg || !thread || thread?.stopped) return null;
396
+ if (!patientMsg) return null;
400
397
 
401
- const assistant = getAssistantById(thread.assistant_id, thread);
398
+ const assistant = getAssistantById(thread?.prompt_id || thread?.assistant_id, thread);
402
399
  assistant.setReplies(patientReply);
403
400
 
404
401
  const { run, output, completed } = await runAssistantAndWait({
@@ -440,7 +437,6 @@ module.exports = {
440
437
  switchAssistant,
441
438
  configureAssistants,
442
439
  registerAssistant,
443
- configureLLMProvider,
444
440
  overrideGetAssistantById,
445
441
  runAssistantAndWait
446
442
  };
@@ -61,17 +61,9 @@ class MongoStorage {
61
61
 
62
62
  async saveMessage(messageData) {
63
63
  try {
64
- console.log('[MongoStorage] saveMessage called', {
65
- code: messageData?.to || messageData?.code || messageData?.numero,
66
- from: messageData?.from,
67
- provider: messageData?.provider || 'unknown',
68
- hasRaw: Boolean(messageData?.raw),
69
- hasMedia: Boolean(messageData?.media || messageData?.fileUrl),
70
- hasContentSid: Boolean(messageData?.contentSid),
71
- is_interactive: messageData.isInteractive,
72
- interaction_type: messageData.interactionType
73
- });
64
+ console.log('[MongoStorage] Saving message', messageData);
74
65
  const enrichedMessage = await this._enrichTwilioMedia(messageData);
66
+ console.log('[MongoStorage] Enriched message', enrichedMessage);
75
67
  const values = this.buildMessageValues(enrichedMessage);
76
68
  const { insertMessage } = require('../models/messageModel');
77
69
  await insertMessage(values);
@@ -179,6 +171,7 @@ class MongoStorage {
179
171
  const now = new Date();
180
172
  const timestamp = now.toISOString();
181
173
  const nombre = messageData.nombre_whatsapp || messageData.author || messageData.fromName || runtimeConfig.get('USER_DB_MONGO') || process.env.USER_DB_MONGO || 'Nexus';
174
+ const processed = messageData.processed || false;
182
175
 
183
176
  // Use message body directly (template rendering is now handled by the provider)
184
177
  let textBody = messageData.body;
@@ -204,6 +197,7 @@ class MongoStorage {
204
197
  numero: normalizedNumero,
205
198
  body: textBody,
206
199
  timestamp,
200
+ processed,
207
201
  message_id: providerId,
208
202
  is_group: isGroup,
209
203
  is_media: isMedia,
@@ -1,9 +1,7 @@
1
- const { DefaultLLMProvider } = require('./defaultLLMProvider');
2
1
  const { MessageParser } = require('./messageParser');
3
2
  const { logger } = require('./logger');
4
3
 
5
4
  module.exports = {
6
- DefaultLLMProvider,
7
5
  MessageParser,
8
6
  logger
9
7
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@peopl-health/nexus",
3
- "version": "1.7.12",
3
+ "version": "2.0.0",
4
4
  "description": "Core messaging and assistant library for WhatsApp communication platforms",
5
5
  "keywords": [
6
6
  "whatsapp",
@@ -93,7 +93,7 @@
93
93
  "@anthropic-ai/sdk": "^0.32.0",
94
94
  "baileys": "^6.4.0",
95
95
  "express": "4.21.2",
96
- "openai": "^4.0.0",
96
+ "openai": "6.2.0",
97
97
  "twilio": "5.6.0"
98
98
  },
99
99
  "engines": {
@@ -1,20 +0,0 @@
1
- const { OpenAIProvider } = require('../providers/OpenAIProvider');
2
-
3
- /**
4
- * Default LLM Provider using OpenAI
5
- */
6
- class DefaultLLMProvider {
7
- constructor(config = {}) {
8
- this.provider = new OpenAIProvider(config);
9
- }
10
-
11
- getProvider() {
12
- return this.provider;
13
- }
14
-
15
- getClient() {
16
- return this.provider.getClient();
17
- }
18
- }
19
-
20
- module.exports = { DefaultLLMProvider };