@peopl-health/nexus 1.7.12 → 2.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,37 +1,23 @@
1
1
  const { Historial_Clinico_ID } = require('../config/airtableConfig.js');
2
2
  const AWS = require('../config/awsConfig.js');
3
- const { combineImagesToPDF, cleanupFiles } = require('../helpers/filesHelper.js');
4
- const runtimeConfig = require('../config/runtimeConfig');
5
3
  const llmConfig = require('../config/llmConfig');
4
+ const runtimeConfig = require('../config/runtimeConfig');
6
5
  const { BaseAssistant } = require('../assistants/BaseAssistant');
7
- const { OpenAIProvider } = require('../providers/OpenAIProvider');
8
-
9
- const configureLLMProvider = (provider) => {
10
- if (!provider) {
11
- throw new Error('configureLLMProvider requires an OpenAI provider or raw client');
12
- }
13
-
14
- if (provider instanceof OpenAIProvider || typeof provider.runConversation === 'function') {
15
- llmConfig.setOpenAIProvider(provider);
16
- return provider;
17
- }
18
-
19
- const wrappedProvider = new OpenAIProvider({ client: provider });
20
- llmConfig.setOpenAIProvider(wrappedProvider);
21
- return wrappedProvider;
22
- };
23
-
24
- let assistantConfig = null;
25
- let assistantRegistry = {};
26
- let customGetAssistantById = null;
6
+ const { createProvider } = require('../providers/createProvider');
27
7
 
28
8
  const { Message, formatTimestamp } = require('../models/messageModel.js');
29
9
  const { Thread } = require('../models/threadModel.js');
30
10
 
31
11
  const { checkRunStatus, getCurRow } = require('../helpers/assistantHelper.js');
32
12
  const { processIndividualMessage, getLastMessages } = require('../helpers/assistantHelper.js');
13
+ const { combineImagesToPDF, cleanupFiles } = require('../helpers/filesHelper.js');
33
14
  const { delay } = require('../helpers/whatsappHelper.js');
34
15
 
16
+ let assistantConfig = null;
17
+ let assistantRegistry = {};
18
+ let customGetAssistantById = null;
19
+
20
+
35
21
  const configureAssistants = (config) => {
36
22
  if (!config) {
37
23
  throw new Error('Assistant configuration is required');
@@ -44,53 +30,61 @@ const runAssistantAndWait = async ({
44
30
  assistant,
45
31
  runConfig = {}
46
32
  }) => {
47
- if (!thread || !thread.thread_id) {
48
- throw new Error('runAssistantAndWait requires a thread with a valid thread_id');
33
+ if (!thread || !(thread.thread_id || thread.conversation_id)) {
34
+ throw new Error('runAssistantAndWait requires a thread with a valid thread_id or conversation_id');
49
35
  }
50
36
 
51
37
  if (!assistant) {
52
38
  throw new Error('runAssistantAndWait requires an assistant instance');
53
39
  }
54
40
 
55
- const provider = llmConfig.requireOpenAIProvider();
41
+ const provider = createProvider({ variant: process.env.VARIANT || 'assistants' });
56
42
  const { polling, ...conversationConfig } = runConfig || {};
57
43
 
58
44
  const run = await provider.runConversation({
59
- conversationId: thread.thread_id,
60
- assistantId: thread.assistant_id,
45
+ threadId: thread?.thread_id,
46
+ conversationId: thread?.conversation_id,
47
+ assistantId: thread?.assistant_id,
48
+ promptId: thread?.prompt_id,
61
49
  ...conversationConfig,
62
50
  });
63
51
 
64
- const filter = thread.code ? { code: thread.code, active: true } : null;
65
- if (filter) {
66
- await Thread.updateOne(filter, { $set: { run_id: run.id } });
67
- }
52
+ let output = null;
53
+ if (thread?.thread_id) {
54
+ const filter = thread.code ? { code: thread.code, active: true } : null;
55
+ if (filter) {
56
+ await Thread.updateOne(filter, { $set: { run_id: run.id } });
57
+ }
68
58
 
69
- const maxRetries = polling?.maxRetries ?? 30;
70
- let completed = false;
59
+ const maxRetries = polling?.maxRetries ?? 30;
60
+ let completed = false;
71
61
 
72
- try {
73
- completed = await checkRunStatus(assistant, run.thread_id, run.id, 0, maxRetries);
74
- } finally {
75
- if (filter) {
76
- await Thread.updateOne(filter, { $set: { run_id: null } });
62
+ try {
63
+ console.log('RUN ID', run.id, thread.thread_id);
64
+ completed = await checkRunStatus(assistant, thread.thread_id, run.id, 0, maxRetries);
65
+ } finally {
66
+ if (filter) {
67
+ await Thread.updateOne(filter, { $set: { run_id: null } });
68
+ }
77
69
  }
78
- }
79
70
 
80
- let finalRun = run;
81
- try {
82
- finalRun = await provider.getRun({ conversationId: run.thread_id, runId: run.id });
83
- } catch (error) {
84
- console.warn('Warning: unable to retrieve final run state:', error?.message || error);
85
- }
71
+ let finalRun = run;
72
+ try {
73
+ finalRun = await provider.getRun({ threadId: run.thread_id, runId: run.id });
74
+ } catch (error) {
75
+ console.warn('Warning: unable to retrieve final run state:', error?.message || error);
76
+ }
86
77
 
87
- if (!completed) {
88
- return { run: finalRun, completed: false, output: '' };
89
- }
78
+ if (!completed) {
79
+ return { run: finalRun, completed: false, output: '' };
80
+ }
90
81
 
91
- const output = await provider.getRunText({ conversationId: run.thread_id, runId: run.id, fallback: '' });
82
+ output = await provider.getRunText({ threadId: run.thread_id, runId: run.id, fallback: '' });
83
+ } else {
84
+ output = run.output_text;
85
+ }
92
86
 
93
- return { run: finalRun, completed: true, output };
87
+ return { completed: true, output };
94
88
  };
95
89
 
96
90
  const registerAssistant = (assistantId, definition) => {
@@ -171,7 +165,7 @@ const getAssistantById = (assistant_id, thread) => {
171
165
  throw new Error(`Assistant '${assistant_id}' not found. Available assistants: ${Object.keys(assistantRegistry).join(', ')}`);
172
166
  }
173
167
 
174
- const provider = llmConfig.getOpenAIProvider({ instantiate: false });
168
+ const provider = createProvider({ variant: process.env.VARIANT || 'assistants' });
175
169
  const sharedClient = provider?.getClient?.() || llmConfig.openaiClient || null;
176
170
 
177
171
  if (AssistantClass.prototype instanceof BaseAssistant) {
@@ -196,10 +190,11 @@ const getAssistantById = (assistant_id, thread) => {
196
190
  };
197
191
 
198
192
 
199
- const createAssistant = async (code, assistant_id, messages=[], prevThread=null) => {
193
+ const createAssistant = async (code, assistant_id, messages=[], force=false) => {
200
194
  // If thread already exists, update it
201
195
  const findThread = await Thread.findOne({ code: code });
202
- if (findThread) {
196
+ const variant = process.env.VARIANT || 'assistants';
197
+ if ((findThread?.conversation_id && variant === 'responses') || (findThread?.thread_id && variant === 'assistants')) {
203
198
  await Thread.updateOne({ code: code }, { $set: { active: true, stopped: false, assistant_id: assistant_id } });
204
199
  return findThread;
205
200
  }
@@ -213,34 +208,40 @@ const createAssistant = async (code, assistant_id, messages=[], prevThread=null)
213
208
  const initialThread = await assistant.create(code, curRow[0]);
214
209
 
215
210
  // Add new messages to memory
216
- const provider = llmConfig.requireOpenAIProvider();
211
+ const provider = createProvider({ variant: variant || 'assistants' });
217
212
  for (const message of messages) {
218
213
  await provider.addMessage({
219
- conversationId: initialThread.id,
214
+ threadId: initialThread.id,
220
215
  role: 'assistant',
221
216
  content: message
222
217
  });
223
218
  }
224
-
219
+
220
+ console.log('initialThread', initialThread);
225
221
  // Define new thread data
226
222
  const thread = {
227
223
  code: code,
228
- assistant_id: assistant_id,
229
- thread_id: initialThread.id,
224
+ assistant_id: assistant_id.startsWith('assistant') ? assistant_id : null,
225
+ thread_id: initialThread.id.startsWith('thread') ? initialThread.id : null,
226
+ conversation_id: initialThread.id.startsWith('conv') ? initialThread.id : null,
227
+ prompt_id: assistant_id.startsWith('pmpt') ? assistant_id : null,
230
228
  patient_id: patientId,
231
- run_id: null,
232
229
  nombre: nombre,
233
230
  active: true
234
231
  };
235
232
 
236
- const condition = { thread_id: prevThread?.thread_id };
233
+ const updateData = Object.fromEntries(
234
+ Object.entries(thread).filter(([_, v]) => v != null)
235
+ );
236
+
237
+ const condition = { $or: [{ thread_id: findThread?.thread_id }, { conversation_id: findThread?.conversation_id }] };
237
238
  const options = { new: true, upsert: true };
238
- const updatedThread = await Thread.findOneAndUpdate(condition, thread, options);
239
+ const updatedThread = await Thread.findOneAndUpdate(condition, {run_id: null, ...updateData}, options);
239
240
  console.log('Updated thread:', updatedThread);
240
241
 
241
242
  // Delete previous thread
242
- if (prevThread) {
243
- await provider.deleteConversation(prevThread.thread_id);
243
+ if (force) {
244
+ await provider.deleteConversation(findThread.conversation_id || findThread.thread_id);
244
245
  }
245
246
 
246
247
  return thread;
@@ -252,12 +253,11 @@ const addMsgAssistant = async (code, inMessages, reply = false) => {
252
253
  console.log(thread);
253
254
  if (thread === null) return null;
254
255
 
255
- const provider = llmConfig.requireOpenAIProvider();
256
-
256
+ const provider = createProvider({ variant: process.env.VARIANT || 'assistants' });
257
257
  for (const message of inMessages) {
258
258
  console.log(message);
259
259
  await provider.addMessage({
260
- conversationId: thread.thread_id,
260
+ threadId: thread.conversation_id || thread.thread_id,
261
261
  role: 'assistant',
262
262
  content: message
263
263
  });
@@ -265,7 +265,7 @@ const addMsgAssistant = async (code, inMessages, reply = false) => {
265
265
 
266
266
  if (!reply) return null;
267
267
 
268
- const assistant = getAssistantById(thread.assistant_id, thread);
268
+ const assistant = getAssistantById(thread?.prompt_id || thread?.assistant_id, thread);
269
269
  const { output } = await runAssistantAndWait({ thread, assistant });
270
270
  console.log('THE ANS IS', output);
271
271
 
@@ -282,7 +282,7 @@ const addInsAssistant = async (code, instruction) => {
282
282
  console.log(thread);
283
283
  if (thread === null) return null;
284
284
 
285
- const assistant = getAssistantById(thread.assistant_id, thread);
285
+ const assistant = getAssistantById(thread?.prompt_id || thread?.assistant_id, thread);
286
286
  const { output } = await runAssistantAndWait({
287
287
  thread,
288
288
  assistant,
@@ -317,11 +317,11 @@ const getThread = async (code, message = null) => {
317
317
  return null;
318
318
  }
319
319
 
320
- const provider = llmConfig.getOpenAIProvider({ instantiate: false });
320
+ const provider = createProvider({ variant: process.env.VARIANT || 'assistants' });
321
321
  while (thread && thread.run_id) {
322
322
  console.log(`Wait for ${thread.run_id} to be executed`);
323
323
  const activeProvider = provider || llmConfig.requireOpenAIProvider();
324
- const run = await activeProvider.getRun({ conversationId: thread.thread_id, runId: thread.run_id });
324
+ const run = await activeProvider.getRun({ threadId: thread.conversation_id || thread.thread_id, runId: thread.run_id });
325
325
  if (run.status === 'cancelled' || run.status === 'expired' || run.status === 'completed') {
326
326
  await Thread.updateOne({ code: code }, { $set: { run_id: null } });
327
327
  }
@@ -353,28 +353,28 @@ const replyAssistant = async function (code, message_ = null, thread_ = null, ru
353
353
  if (!thread || !thread.active) return null;
354
354
 
355
355
  const patientReply = await getLastMessages(code);
356
- console.log('UNREAD DATA', patientReply);
357
356
  if (!patientReply) {
358
357
  console.log('No relevant data found for this assistant.');
359
358
  return null;
360
359
  }
361
360
 
362
- const provider = llmConfig.requireOpenAIProvider();
363
-
364
- let activeRuns = await provider.listRuns({ conversationId: thread.thread_id, activeOnly: true });
365
- let activeRunsCount = activeRuns?.data?.length || 0;
366
- console.log('ACTIVE RUNS:', activeRunsCount, activeRuns?.data?.map(run => ({ id: run.id, status: run.status })));
367
- while (activeRunsCount > 0) {
368
- console.log(`WAITING FOR ${activeRunsCount} ACTIVE RUNS TO COMPLETE - ${thread.thread_id}`);
369
- activeRuns = await provider.listRuns({ conversationId: thread.thread_id, activeOnly: true });
370
- activeRunsCount = activeRuns?.data?.length || 0;
371
- await delay(5000);
361
+ const provider = createProvider({ variant: process.env.VARIANT || 'assistants' });
362
+ if (!(thread?.conversation_id)) {
363
+ let activeRuns = await provider.listRuns({ threadId: thread.thread_id, activeOnly: true });
364
+ let activeRunsCount = activeRuns?.data?.length || 0;
365
+ console.log('ACTIVE RUNS:', activeRunsCount, activeRuns?.data?.map(run => ({ id: run.id, status: run.status })));
366
+ while (activeRunsCount > 0) {
367
+ console.log(`WAITING FOR ${activeRunsCount} ACTIVE RUNS TO COMPLETE - ${thread.thread_id}`);
368
+ activeRuns = await provider.listRuns({ threadId: thread.thread_id, activeOnly: true });
369
+ activeRunsCount = activeRuns?.data?.length || 0;
370
+ await delay(5000);
371
+ }
372
372
  }
373
373
 
374
374
  let patientMsg = false;
375
375
  let urls = [];
376
376
  for (const reply of patientReply) {
377
- const { isNotAssistant, url } = await processIndividualMessage(code, reply, thread);
377
+ const { isNotAssistant, url } = await processIndividualMessage(code, reply, provider, thread);
378
378
  console.log(`isNotAssistant ${isNotAssistant} ${url}`);
379
379
  patientMsg = patientMsg || isNotAssistant;
380
380
  if (url) urls.push({ 'url': url });
@@ -390,15 +390,13 @@ const replyAssistant = async function (code, message_ = null, thread_ = null, ru
390
390
  await AWS.uploadBufferToS3(pdfBuffer, bucket, key, 'application/pdf');
391
391
  }
392
392
  if (processedFiles && processedFiles.length) {
393
- await cleanupFiles(processedFiles);
393
+ cleanupFiles(processedFiles);
394
394
  }
395
395
  }
396
396
 
397
- thread = await getThread(code);
398
- console.log('THREAD STOPPED', code, thread?.stopped);
399
- if (!patientMsg || !thread || thread?.stopped) return null;
397
+ if (!patientMsg) return null;
400
398
 
401
- const assistant = getAssistantById(thread.assistant_id, thread);
399
+ const assistant = getAssistantById(process.env.VARIANT === 'responses' ? thread?.prompt_id : thread?.assistant_id, thread);
402
400
  assistant.setReplies(patientReply);
403
401
 
404
402
  const { run, output, completed } = await runAssistantAndWait({
@@ -440,7 +438,6 @@ module.exports = {
440
438
  switchAssistant,
441
439
  configureAssistants,
442
440
  registerAssistant,
443
- configureLLMProvider,
444
441
  overrideGetAssistantById,
445
442
  runAssistantAndWait
446
443
  };
@@ -61,17 +61,9 @@ class MongoStorage {
61
61
 
62
62
  async saveMessage(messageData) {
63
63
  try {
64
- console.log('[MongoStorage] saveMessage called', {
65
- code: messageData?.to || messageData?.code || messageData?.numero,
66
- from: messageData?.from,
67
- provider: messageData?.provider || 'unknown',
68
- hasRaw: Boolean(messageData?.raw),
69
- hasMedia: Boolean(messageData?.media || messageData?.fileUrl),
70
- hasContentSid: Boolean(messageData?.contentSid),
71
- is_interactive: messageData.isInteractive,
72
- interaction_type: messageData.interactionType
73
- });
64
+ console.log('[MongoStorage] Saving message', messageData);
74
65
  const enrichedMessage = await this._enrichTwilioMedia(messageData);
66
+ console.log('[MongoStorage] Enriched message', enrichedMessage);
75
67
  const values = this.buildMessageValues(enrichedMessage);
76
68
  const { insertMessage } = require('../models/messageModel');
77
69
  await insertMessage(values);
@@ -179,6 +171,7 @@ class MongoStorage {
179
171
  const now = new Date();
180
172
  const timestamp = now.toISOString();
181
173
  const nombre = messageData.nombre_whatsapp || messageData.author || messageData.fromName || runtimeConfig.get('USER_DB_MONGO') || process.env.USER_DB_MONGO || 'Nexus';
174
+ const processed = messageData.processed || false;
182
175
 
183
176
  // Use message body directly (template rendering is now handled by the provider)
184
177
  let textBody = messageData.body;
@@ -204,6 +197,7 @@ class MongoStorage {
204
197
  numero: normalizedNumero,
205
198
  body: textBody,
206
199
  timestamp,
200
+ processed,
207
201
  message_id: providerId,
208
202
  is_group: isGroup,
209
203
  is_media: isMedia,
@@ -1,9 +1,7 @@
1
- const { DefaultLLMProvider } = require('./defaultLLMProvider');
2
1
  const { MessageParser } = require('./messageParser');
3
2
  const { logger } = require('./logger');
4
3
 
5
4
  module.exports = {
6
- DefaultLLMProvider,
7
5
  MessageParser,
8
6
  logger
9
7
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@peopl-health/nexus",
3
- "version": "1.7.12",
3
+ "version": "2.0.1",
4
4
  "description": "Core messaging and assistant library for WhatsApp communication platforms",
5
5
  "keywords": [
6
6
  "whatsapp",
@@ -93,7 +93,7 @@
93
93
  "@anthropic-ai/sdk": "^0.32.0",
94
94
  "baileys": "^6.4.0",
95
95
  "express": "4.21.2",
96
- "openai": "^4.0.0",
96
+ "openai": "6.2.0",
97
97
  "twilio": "5.6.0"
98
98
  },
99
99
  "engines": {
@@ -1,20 +0,0 @@
1
- const { OpenAIProvider } = require('../providers/OpenAIProvider');
2
-
3
- /**
4
- * Default LLM Provider using OpenAI
5
- */
6
- class DefaultLLMProvider {
7
- constructor(config = {}) {
8
- this.provider = new OpenAIProvider(config);
9
- }
10
-
11
- getProvider() {
12
- return this.provider;
13
- }
14
-
15
- getClient() {
16
- return this.provider.getClient();
17
- }
18
- }
19
-
20
- module.exports = { DefaultLLMProvider };