@peopl-health/nexus 1.7.12 → 2.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,7 @@
1
1
  const llmConfig = require('../config/llmConfig');
2
2
  const { Thread } = require('../models/threadModel');
3
3
  const { getLastNMessages } = require('../helpers/assistantHelper');
4
+ const { createProvider } = require('../providers/createProvider');
4
5
 
5
6
  /**
6
7
  * Flexible base assistant implementation that integrates with OpenAI Threads
@@ -8,7 +9,6 @@ const { getLastNMessages } = require('../helpers/assistantHelper');
8
9
  */
9
10
  class BaseAssistant {
10
11
  constructor(options = {}) {
11
- console.log('options', options);
12
12
  this.assistantId = options.assistantId || null;
13
13
  this.thread = options.thread || null;
14
14
  this.status = options.status || 'idle';
@@ -25,8 +25,10 @@ class BaseAssistant {
25
25
 
26
26
  if (!this.provider && this.client) {
27
27
  try {
28
- const { OpenAIProvider } = require('../providers/OpenAIProvider');
29
- const provider = new OpenAIProvider({ client: this.client });
28
+ const provider = createProvider({
29
+ client: this.client,
30
+ variant: process.env.VARIANT || 'assistants'
31
+ });
30
32
  this.provider = provider;
31
33
  if (typeof llmConfig.setOpenAIProvider === 'function') {
32
34
  llmConfig.setOpenAIProvider(provider);
@@ -54,7 +56,24 @@ class BaseAssistant {
54
56
 
55
57
  _ensureClient() {
56
58
  if (!this.client) {
57
- throw new Error('LLM client not configured. Ensure openaiClient is initialized.');
59
+ this.client = this.provider?.getClient?.() || null;
60
+ }
61
+
62
+ if (!this.client) {
63
+ throw new Error('LLM client not configured. Ensure client is initialized.');
64
+ }
65
+
66
+ if (!this.provider) {
67
+ try {
68
+ const variant = process.env.VARIANT || 'assistants';
69
+ const provider = createProvider({ client: this.client, variant });
70
+ this.provider = provider;
71
+ if (typeof llmConfig.setOpenAIProvider === 'function') {
72
+ llmConfig.setOpenAIProvider(provider);
73
+ }
74
+ } catch (error) {
75
+ console.warn('[BaseAssistant] Failed to bootstrap provider:', error?.message || error);
76
+ }
58
77
  }
59
78
  }
60
79
 
@@ -147,35 +166,41 @@ class BaseAssistant {
147
166
  throw new Error('Assistant thread not initialized. Call create() before sendMessage().');
148
167
  }
149
168
 
150
- await this.client.beta.threads.messages.create(
151
- this.thread.thread_id,
152
- { role: 'user', content: message }
153
- );
169
+ const provider = this.provider || null;
170
+ if (!provider || typeof provider.addMessage !== 'function') {
171
+ throw new Error('Provider not configured. Ensure configureLLMProvider has been called.');
172
+ }
154
173
 
155
- const runConfig = {
156
- assistant_id: this.assistantId,
157
- ...options
158
- };
174
+ const assistantId = this.assistantId;
175
+ const threadId = this.thread.thread_id;
176
+ await provider.addMessage({ threadId, role: 'user', content: message });
177
+
178
+ const runConfig = { threadId, assistantId, ...options };
159
179
 
160
180
  const toolSchemas = this.getToolSchemas();
161
181
  if (toolSchemas.length > 0) {
162
182
  runConfig.tools = toolSchemas;
163
183
  }
164
184
 
165
- const run = await this.client.beta.threads.runs.create(
166
- this.thread.thread_id,
167
- runConfig
168
- );
185
+ const run = await provider.runConversation(runConfig);
169
186
 
170
- return await this.waitForCompletion(this.thread.thread_id, run.id);
187
+ return await this.waitForCompletion(threadId, run.id, options);
171
188
  }
172
189
 
173
190
  async waitForCompletion(threadId, runId, { interval = 2000, maxAttempts = 30 } = {}) {
174
191
  this._ensureClient();
175
- let attempts = 0;
192
+ const provider = this.provider || null;
193
+ if (!provider || typeof provider.getRun !== 'function') {
194
+ throw new Error('Provider not configured. Cannot poll run status.');
195
+ }
176
196
 
197
+ let attempts = 0;
177
198
  while (attempts < maxAttempts) {
178
- const run = await this.client.beta.threads.runs.retrieve(threadId, runId);
199
+ const run = await provider.getRun({ threadId, runId });
200
+
201
+ if (!run) {
202
+ throw new Error('Unable to retrieve run status.');
203
+ }
179
204
 
180
205
  if (run.status === 'completed') {
181
206
  return run;
@@ -183,7 +208,7 @@ class BaseAssistant {
183
208
 
184
209
  if (run.status === 'requires_action') {
185
210
  await this.handleRequiresAction(run);
186
- } else if (['failed', 'cancelled', 'expired', 'incomplete'].includes(run.status)) {
211
+ } else if (['failed', 'cancelled', 'expired', 'incomplete', 'errored'].includes(run.status)) {
187
212
  throw new Error(`Assistant run ended with status '${run.status}'`);
188
213
  }
189
214
 
@@ -199,13 +224,26 @@ class BaseAssistant {
199
224
  const threadRef = threadDoc || this.thread;
200
225
  if (!threadRef) return [];
201
226
 
202
- const response = await this.client.beta.threads.messages.list(threadRef.thread_id, { order: 'asc' });
203
- return response.data.map((msg) => {
204
- const textContents = msg.content
205
- .filter((part) => part.type === 'text')
206
- .map((part) => part.text.value);
227
+ const provider = this.provider || null;
228
+ if (!provider || typeof provider.listMessages !== 'function') {
229
+ throw new Error('OpenAI provider not configured. Cannot list messages.');
230
+ }
231
+
232
+ const response = await provider.listMessages({ threadId: threadRef.thread_id, order: 'asc' });
233
+ const messages = Array.isArray(response?.data) ? response.data : Array.isArray(response?.items) ? response.items : [];
234
+
235
+ return messages.map((msg) => {
236
+ const parts = Array.isArray(msg.content) ? msg.content : [];
237
+ const textContents = parts
238
+ .map((part) => {
239
+ if (part.type === 'text' && part.text?.value) return part.text.value;
240
+ if ((part.type === 'input_text' || part.type === 'output_text') && part.text) return part.text;
241
+ return null;
242
+ })
243
+ .filter(Boolean);
244
+
207
245
  const content = textContents.length <= 1 ? textContents[0] || '' : textContents;
208
- return { role: msg.role, content };
246
+ return { role: msg.role || 'user', content };
209
247
  });
210
248
  }
211
249
 
@@ -218,19 +256,25 @@ class BaseAssistant {
218
256
  this.lastMessages = await getLastNMessages(whatsappId, 20);
219
257
  }
220
258
 
259
+ const provider = createProvider({ variant: process.env.VARIANT || 'assistants' });
260
+ if (!provider || typeof provider.createConversation !== 'function') {
261
+ throw new Error('Provider not configured. Cannot create conversation.');
262
+ }
263
+
221
264
  const initialMessages = await this.buildInitialMessages({ code, context });
222
- const threadPayload = {};
265
+ const conversation = await provider.createConversation({
266
+ messages: initialMessages,
267
+ });
223
268
 
224
- if (initialMessages.length > 0) {
225
- threadPayload.messages = initialMessages.map((msg) => ({
226
- role: msg.role || 'assistant',
227
- content: msg.content
228
- }));
229
- }
269
+ this.thread = {
270
+ ...conversation,
271
+ thread_id: conversation?.id,
272
+ assistant_id: this.assistantId.startsWith('assistant') ? this.assistantId : null,
273
+ prompt_id: this.assistantId.startsWith('pmpt') ? this.assistantId : null,
274
+ code,
275
+ };
230
276
 
231
- const thread = await this.client.beta.threads.create(threadPayload);
232
- this.thread = thread;
233
- return thread;
277
+ return this.thread;
234
278
  }
235
279
 
236
280
  async buildInitialMessages({ code }) {
@@ -264,16 +308,17 @@ class BaseAssistant {
264
308
  }
265
309
  }
266
310
 
267
- if (!this.client) {
268
- console.warn('[BaseAssistant] Cannot submit tool outputs: client not configured');
311
+ const provider = this.provider || null;
312
+ if (!provider || typeof provider.submitToolOutputs !== 'function') {
313
+ console.warn('[BaseAssistant] Cannot submit tool outputs: provider not configured');
269
314
  return outputs;
270
315
  }
271
316
 
272
- await this.client.beta.threads.runs.submitToolOutputs(
273
- run.thread_id,
274
- run.id,
275
- { tool_outputs: outputs }
276
- );
317
+ await provider.submitToolOutputs({
318
+ threadId: run.thread_id || this.thread?.thread_id,
319
+ runId: run.id,
320
+ toolOutputs: outputs
321
+ });
277
322
 
278
323
  return outputs;
279
324
  }
@@ -78,7 +78,7 @@ async function downloadFileFromS3(bucketName, key, downloadPath) {
78
78
  }
79
79
  }
80
80
 
81
- async function generatePresignedUrl(bucketName, key, expiration = 300) {
81
+ async function generatePresignedUrl(bucketName, key, expiration = 3000) {
82
82
  const params = {
83
83
  Bucket: bucketName,
84
84
  Key: key,
@@ -1,22 +1,27 @@
1
1
  const runtimeConfig = require('./runtimeConfig');
2
- const { OpenAIProvider } = require('../providers/OpenAIProvider');
2
+ const { createProvider } = require('../providers/createProvider');
3
3
 
4
4
  let anthropicClient = null;
5
5
  let openaiClient = null;
6
- let openaiProviderInstance = null;
6
+ let providerInstance = null;
7
+ let providerVariant = 'assistants';
7
8
 
8
9
  const setOpenAIClient = (client) => {
9
10
  openaiClient = client || null;
10
11
  module.exports.openaiClient = openaiClient;
11
12
  if (!client) {
12
- openaiProviderInstance = null;
13
- module.exports.openaiProvider = null;
13
+ providerInstance = null;
14
+ module.exports.providerInstance = null;
15
+ providerVariant = 'assistants';
16
+ module.exports.providerVariant = providerVariant;
14
17
  }
15
18
  };
16
19
 
17
20
  const setOpenAIProvider = (provider) => {
18
- openaiProviderInstance = provider || null;
19
- module.exports.openaiProvider = openaiProviderInstance;
21
+ providerInstance = provider || null;
22
+ module.exports.providerInstance = providerInstance;
23
+ providerVariant = provider?.variant || providerVariant || 'assistants';
24
+ module.exports.providerVariant = providerVariant;
20
25
 
21
26
  if (!provider) {
22
27
  setOpenAIClient(null);
@@ -33,10 +38,10 @@ const setOpenAIProvider = (provider) => {
33
38
  };
34
39
 
35
40
  const getOpenAIProvider = ({ instantiate = true } = {}) => {
36
- if (openaiProviderInstance) return openaiProviderInstance;
41
+ if (providerInstance) return providerInstance;
37
42
  if (!instantiate) return null;
38
43
  if (!openaiClient) return null;
39
- const provider = new OpenAIProvider({ client: openaiClient });
44
+ const provider = createProvider({ client: openaiClient, variant: providerVariant });
40
45
  setOpenAIProvider(provider);
41
46
  return provider;
42
47
  };
@@ -69,7 +74,8 @@ const resolveAnthropicClient = () => {
69
74
 
70
75
  module.exports = {
71
76
  openaiClient,
72
- openaiProvider: openaiProviderInstance,
77
+ providerInstance,
78
+ providerVariant,
73
79
  setOpenAIClient,
74
80
  setOpenAIProvider,
75
81
  getOpenAIProvider,
@@ -55,14 +55,7 @@ const createAssistantController = async (req, res) => {
55
55
  try {
56
56
  console.log('codes', codes);
57
57
  for (const code of codes) {
58
- const thread = await Thread.findOne({ code: code });
59
- if (thread !== null) {
60
- await switchAssistant(code, assistant_id);
61
- console.log('FORCE', force);
62
- if (!force) continue;
63
- }
64
-
65
- await createAssistant(code, assistant_id, [...instrucciones, ...messages], thread);
58
+ await createAssistant(code, assistant_id, [...instrucciones, ...messages], force);
66
59
  console.log('messages', messages);
67
60
  for (const message of messages) {
68
61
  console.log('message', message);
@@ -498,7 +498,8 @@ class NexusMessaging {
498
498
  if (response) {
499
499
  await this.sendMessage({
500
500
  code: from,
501
- body: response
501
+ body: response,
502
+ processed: true
502
503
  });
503
504
  }
504
505
  } catch (error) {
@@ -7,6 +7,7 @@ const { convertPdfToImages } = require('./filesHelper.js');
7
7
  const { analyzeImage } = require('../helpers/llmsHelper.js');
8
8
 
9
9
  const { getRecordByFilter } = require('../services/airtableService.js');
10
+ const { createProvider } = require('../providers/createProvider');
10
11
 
11
12
  const fs = require('fs');
12
13
  const path = require('path');
@@ -17,11 +18,12 @@ const mode = process.env.NODE_ENV || 'dev';
17
18
 
18
19
  async function checkRunStatus(assistant, thread_id, run_id, retryCount = 0, maxRetries = 30) {
19
20
  try {
20
- const provider = llmConfig.requireOpenAIProvider();
21
- const run = await provider.getRun({ conversationId: thread_id, runId: run_id });
21
+ const provider = createProvider({ variant: process.env.VARIANT || 'assistants' });
22
+ const run = await provider.getRun({ threadId: thread_id, runId: run_id });
22
23
  console.log(`Status: ${run.status} ${thread_id} ${run_id} (attempt ${retryCount + 1})`);
23
24
 
24
- if (run.status === 'failed' || run.status === 'expired' || run.status === 'incomplete') {
25
+ const failedStatuses = ['failed', 'expired', 'incomplete', 'errored'];
26
+ if (failedStatuses.includes(run.status)) {
25
27
  console.log(`Run failed. ${run.status} `);
26
28
  console.log('Error:');
27
29
  console.log(run);
@@ -37,7 +39,7 @@ async function checkRunStatus(assistant, thread_id, run_id, retryCount = 0, maxR
37
39
  await assistant.handleRequiresAction(run);
38
40
  await new Promise(resolve => setTimeout(resolve, 5000));
39
41
  return checkRunStatus(assistant, thread_id, run_id, retryCount + 1, maxRetries);
40
- } else if (run.status !== 'completed') {
42
+ } else if (!['completed', 'succeeded'].includes(run.status)) {
41
43
  if (retryCount >= maxRetries) {
42
44
  return false;
43
45
  }
@@ -206,14 +208,13 @@ async function downloadMediaAndCreateFile(code, reply) {
206
208
  return fileNames;
207
209
  }
208
210
 
209
- async function processIndividualMessage(code, reply, thread) {
211
+ async function processIndividualMessage(code, reply, provider, thread) {
212
+ const tempFiles = [];
210
213
  try {
211
- const provider = llmConfig.requireOpenAIProvider();
212
214
  const formattedMessage = formatMessage(reply);
213
215
  console.log('[processIndividualMessage] formattedMessage:', formattedMessage);
214
216
  const isNotAssistant = !reply.from_me;
215
217
  let messagesChat = [];
216
- let attachments = [];
217
218
  let url = null;
218
219
 
219
220
  if (formattedMessage) {
@@ -224,6 +225,7 @@ async function processIndividualMessage(code, reply, thread) {
224
225
  if (reply.is_media) {
225
226
  console.log('IS MEDIA', reply.is_media);
226
227
  const fileNames = await downloadMediaAndCreateFile(code, reply);
228
+ tempFiles.push(...fileNames);
227
229
  for (const fileName of fileNames) {
228
230
  console.log(fileName);
229
231
  // Skip WBMP images and stickers
@@ -255,13 +257,17 @@ async function processIndividualMessage(code, reply, thread) {
255
257
  });
256
258
  } else {
257
259
  console.log('Add attachment');
258
- const file = await provider.uploadFile({
260
+ /*const file = await provider.uploadFile({
259
261
  file: fs.createReadStream(fileName),
260
262
  purpose: 'vision',
261
263
  });
262
264
  messagesChat.push({
263
265
  type: 'image_file',
264
266
  image_file: { file_id: file.id },
267
+ });*/
268
+ messagesChat.push({
269
+ type: 'text',
270
+ text: imageAnalysis.description,
265
271
  });
266
272
  }
267
273
  } else if (fileName.includes('audio')) {
@@ -271,7 +277,6 @@ async function processIndividualMessage(code, reply, thread) {
271
277
  language: 'es'
272
278
  });
273
279
  const transcriptText = audioTranscript?.text || audioTranscript;
274
- console.log('Inside AUDIO', transcriptText);
275
280
  messagesChat.push({
276
281
  type: 'text',
277
282
  text: transcriptText,
@@ -280,30 +285,40 @@ async function processIndividualMessage(code, reply, thread) {
280
285
  }
281
286
  }
282
287
 
283
- console.log('messagesChat', messagesChat);
284
- console.log('[processIndividualMessage] attachments:', attachments);
285
- console.log('[processIndividualMessage] formattedMessage:', formattedMessage);
286
-
288
+ console.log('[processIndividualMessage] messagesChat', messagesChat);
287
289
 
288
290
  // ONLY add user messages to the thread
291
+ const threadId = process.env.VARIANT === 'responses' ? thread?.conversation_id : thread?.thread_id;
289
292
  if (isNotAssistant) {
293
+ console.log('Adding user message to thread', thread);
290
294
  await provider.addMessage({
291
- conversationId: thread.thread_id,
295
+ threadId,
292
296
  role: 'user',
293
- content: messagesChat,
294
- attachments: attachments
297
+ content: messagesChat
295
298
  });
296
299
  console.log('[processIndividualMessage] User message added to thread');
297
300
  }
298
301
 
299
302
  await Message.updateOne(
300
303
  { message_id: reply.message_id, timestamp: reply.timestamp },
301
- { $set: { assistant_id: thread.assistant_id, thread_id: thread.thread_id } }
304
+ { $set: { assistant_id: thread.assistant_id, thread_id: threadId } }
302
305
  );
303
306
 
304
307
  return {isNotAssistant, url};
305
308
  } catch (err) {
306
309
  console.log(`Error inside process message ${err}`);
310
+ } finally {
311
+ if (tempFiles.length > 0) {
312
+ await Promise.all(tempFiles.map(async (filePath) => {
313
+ try {
314
+ await fs.promises.unlink(filePath);
315
+ } catch (error) {
316
+ if (error?.code !== 'ENOENT') {
317
+ console.warn('[processIndividualMessage] Failed to remove temp file:', filePath, error?.message || error);
318
+ }
319
+ }
320
+ }));
321
+ }
307
322
  }
308
323
  }
309
324
 
@@ -42,6 +42,33 @@ async function analyzeImage(imagePath) {
42
42
  const imageBuffer = await fs.promises.readFile(imagePath);
43
43
  const base64Image = imageBuffer.toString('base64');
44
44
 
45
+ // Descrciption of the image
46
+ const imageDescription = 'Describe the image in detail.';
47
+ const messageDescription = await anthropicClient.messages.create({
48
+ model: 'claude-3-7-sonnet-20250219',
49
+ max_tokens: 1024,
50
+ messages: [
51
+ {
52
+ role: 'user',
53
+ content: [
54
+ {
55
+ type: 'image',
56
+ source: {
57
+ type: 'base64',
58
+ media_type: mimeType,
59
+ data: base64Image,
60
+ },
61
+ },
62
+ {
63
+ type: 'text',
64
+ text: imageDescription,
65
+ },
66
+ ],
67
+ },
68
+ ],
69
+ });
70
+ const description = messageDescription.content[0].text;
71
+
45
72
  // Create a more specific prompt for table detection and extraction
46
73
  const tablePrompt = `Please analyze this image and respond in the following format:
47
74
  1. First, determine if there is a table in the image.
@@ -187,8 +214,8 @@ Ejemplo 1:
187
214
  const isTable = (table === null) ? false : true;
188
215
  const isRelevant = (messageRelevanceStr.includes('YES')) ? true : false;
189
216
 
190
- return {medical_analysis: messageAnalysisStr, medical_relevance: isRelevant,
191
- has_table: isTable, table_data: table};
217
+ return {description: description, medical_analysis: messageAnalysisStr,
218
+ medical_relevance: isRelevant, has_table: isTable, table_data: table};
192
219
  } catch (error) {
193
220
  console.error('Error analyzing image:', error);
194
221
  throw error;
package/lib/index.js CHANGED
@@ -1,7 +1,9 @@
1
1
  const { NexusMessaging, setDefaultInstance } = require('./core/NexusMessaging');
2
2
  const { MongoStorage } = require('./storage/MongoStorage');
3
3
  const { MessageParser } = require('./utils/messageParser');
4
- const { DefaultLLMProvider } = require('./utils/defaultLLMProvider');
4
+ const { createProvider } = require('./providers/createProvider');
5
+ const { OpenAIAssistantsProvider } = require('./providers/OpenAIAssistantsProvider');
6
+ const { OpenAIResponsesProvider } = require('./providers/OpenAIResponsesProvider');
5
7
  const { loadNexusConfig } = require('./config/configLoader');
6
8
  const templateController = require('./controllers/templateController');
7
9
  const templateFlowController = require('./controllers/templateFlowController');
@@ -9,14 +11,13 @@ const interactive = require('./interactive');
9
11
  const runtimeConfig = require('./config/runtimeConfig');
10
12
  const llmConfigModule = require('./config/llmConfig');
11
13
  const {
12
- configureLLMProvider: configureAssistantsLLM,
13
14
  registerAssistant,
14
15
  overrideGetAssistantById,
15
16
  configureAssistants: setAssistantsConfig
16
17
  } = require('./services/assistantService');
17
18
  const { TwilioProvider } = require('./adapters/TwilioProvider');
18
19
  const { BaileysProvider } = require('./adapters/BaileysProvider');
19
- const { BaseAssistant: CoreBaseAssistant } = require('./assistants/BaseAssistant');
20
+ const { BaseAssistant } = require('./assistants/BaseAssistant');
20
21
  const {
21
22
  setPreprocessingHandler,
22
23
  hasPreprocessingHandler,
@@ -134,16 +135,14 @@ class Nexus {
134
135
 
135
136
  // Initialize default LLM provider if requested
136
137
  if (llm === 'openai') {
137
- this.llmProvider = new DefaultLLMProvider(llmConfig);
138
138
  try {
139
- const providerInstance = typeof this.llmProvider.getProvider === 'function'
140
- ? this.llmProvider.getProvider()
141
- : null;
139
+ const providerInstance = createProvider(llmConfig);
140
+ this.llmProvider = providerInstance;
142
141
 
143
142
  if (providerInstance && typeof llmConfigModule.setOpenAIProvider === 'function') {
144
143
  llmConfigModule.setOpenAIProvider(providerInstance);
145
- } else if (typeof this.llmProvider.getClient === 'function' && typeof llmConfigModule.setOpenAIClient === 'function') {
146
- llmConfigModule.setOpenAIClient(this.llmProvider.getClient());
144
+ } else if (providerInstance?.getClient && typeof llmConfigModule.setOpenAIClient === 'function') {
145
+ llmConfigModule.setOpenAIClient(providerInstance.getClient());
147
146
  }
148
147
  } catch (err) {
149
148
  console.warn('[Nexus] Failed to expose OpenAI provider:', err?.message || err);
@@ -175,15 +174,7 @@ class Nexus {
175
174
  const assistantsConfig = assistantsOpt || assistantOpt;
176
175
  try {
177
176
  if (this.llmProvider && typeof configureAssistantsLLM === 'function') {
178
- const providerInstance = typeof this.llmProvider.getProvider === 'function'
179
- ? this.llmProvider.getProvider()
180
- : null;
181
-
182
- if (providerInstance) {
183
- configureAssistantsLLM(providerInstance);
184
- } else if (typeof this.llmProvider.getClient === 'function') {
185
- configureAssistantsLLM(this.llmProvider.getClient());
186
- }
177
+ configureAssistantsLLM(this.llmProvider);
187
178
  }
188
179
  if (assistantsConfig) {
189
180
  if (assistantsConfig.registry && typeof assistantsConfig.registry === 'object') {
@@ -313,7 +304,7 @@ class Nexus {
313
304
 
314
305
  /**
315
306
  * Get LLM provider instance
316
- * @returns {DefaultLLMProvider|null} LLM provider instance
307
+ * @returns {Object|null} LLM provider instance
317
308
  */
318
309
  getLLMProvider() {
319
310
  return this.llmProvider;
@@ -330,11 +321,11 @@ module.exports = {
330
321
  BaileysProvider,
331
322
  MongoStorage,
332
323
  MessageParser,
333
- DefaultLLMProvider,
334
- OpenAIProvider: require('./providers/OpenAIProvider').OpenAIProvider,
335
- BaseAssistant: CoreBaseAssistant,
324
+ createProvider,
325
+ OpenAIAssistantsProvider,
326
+ OpenAIResponsesProvider,
327
+ BaseAssistant,
336
328
  registerAssistant,
337
- configureAssistantsLLM,
338
329
  overrideGetAssistantById,
339
330
  configureAssistants: setAssistantsConfig,
340
331
  setPreprocessingHandler,
@@ -47,7 +47,7 @@ const messageSchema = new mongoose.Schema({
47
47
  }
48
48
  }, { timestamps: true });
49
49
 
50
- messageSchema.index({ message_id: 1, body: 1 }, { unique: true });
50
+ messageSchema.index({ message_id: 1, timestamp: 1 }, { unique: true });
51
51
 
52
52
  messageSchema.pre('save', function (next) {
53
53
  if (this.timestamp) {
@@ -75,10 +75,11 @@ async function insertMessage(values) {
75
75
  group_id: values.group_id,
76
76
  reply_id: values.reply_id,
77
77
  from_me: values.from_me,
78
- processed: skipNumbers.includes(values.numero),
78
+ processed: values.processed || skipNumbers.includes(values.numero),
79
79
  media: values.media ? values.media : null,
80
80
  content_sid: values.content_sid || null
81
81
  };
82
+ console.log('Inserting message', messageData);
82
83
 
83
84
  await Message.findOneAndUpdate(
84
85
  { message_id: values.message_id, body: values.body },
@@ -113,7 +114,7 @@ function getMessageValues(message, content, reply, is_media) {
113
114
  const group_id = is_group ? message.key.remoteJid : null;
114
115
  const reply_id = reply || null;
115
116
  const from_me = message.key.fromMe;
116
-
117
+
117
118
  return {
118
119
  nombre_whatsapp,
119
120
  numero,
@@ -3,6 +3,8 @@ const mongoose = require('mongoose');
3
3
  const threadSchema = new mongoose.Schema({
4
4
  code: { type: String, required: true },
5
5
  assistant_id: { type: String, required: true },
6
+ conversation_id: { type: String, required: true },
7
+ prompt_id: { type: String, default: null },
6
8
  thread_id: { type: String, required: true },
7
9
  patient_id: { type: String, default: null },
8
10
  run_id: { type: String, default: null },
@@ -14,6 +16,7 @@ const threadSchema = new mongoose.Schema({
14
16
 
15
17
  threadSchema.index({ code: 1, active: 1 });
16
18
  threadSchema.index({ thread_id: 1 });
19
+ threadSchema.index({ conversation_id: 1 });
17
20
 
18
21
  const Thread = mongoose.model('Thread', threadSchema);
19
22