@peopl-health/nexus 1.6.3 → 1.6.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -17,7 +17,27 @@ class BaseAssistant {
17
17
  this.lastMessages = null;
18
18
  this.createdAt = new Date();
19
19
 
20
- this.client = options.client || llmConfig.openaiClient || null;
20
+ const existingProvider = llmConfig.getOpenAIProvider({ instantiate: false });
21
+ this.provider = options.provider || existingProvider || null;
22
+ this.client = options.client
23
+ || (this.provider && this.provider.getClient ? this.provider.getClient() : null)
24
+ || llmConfig.openaiClient
25
+ || null;
26
+
27
+ if (!this.provider && this.client) {
28
+ try {
29
+ const { OpenAIProvider } = require('../providers/OpenAIProvider');
30
+ const provider = new OpenAIProvider({ client: this.client });
31
+ this.provider = provider;
32
+ if (typeof llmConfig.setOpenAIProvider === 'function') {
33
+ llmConfig.setOpenAIProvider(provider);
34
+ }
35
+ } catch (err) {
36
+ if (process.env.NODE_ENV !== 'production') {
37
+ console.warn('[BaseAssistant] Failed to initialise OpenAIProvider from client:', err?.message || err);
38
+ }
39
+ }
40
+ }
21
41
  this.tools = new Map();
22
42
 
23
43
  if (Array.isArray(options.tools)) {
@@ -67,7 +87,7 @@ class BaseAssistant {
67
87
  this.thread = thread;
68
88
  }
69
89
 
70
- set_replies(replies) {
90
+ setReplies(replies) {
71
91
  this.replies = replies;
72
92
  }
73
93
 
@@ -1,6 +1,53 @@
1
1
  const runtimeConfig = require('./runtimeConfig');
2
+ const { OpenAIProvider } = require('../providers/OpenAIProvider');
2
3
 
3
4
  let anthropicClient = null;
5
+ let openaiClient = null;
6
+ let openaiProviderInstance = null;
7
+
8
+ const setOpenAIClient = (client) => {
9
+ openaiClient = client || null;
10
+ module.exports.openaiClient = openaiClient;
11
+ if (!client) {
12
+ openaiProviderInstance = null;
13
+ module.exports.openaiProvider = null;
14
+ }
15
+ };
16
+
17
+ const setOpenAIProvider = (provider) => {
18
+ openaiProviderInstance = provider || null;
19
+ module.exports.openaiProvider = openaiProviderInstance;
20
+
21
+ if (!provider) {
22
+ setOpenAIClient(null);
23
+ return;
24
+ }
25
+
26
+ const client = typeof provider.getClient === 'function'
27
+ ? provider.getClient()
28
+ : provider.client;
29
+
30
+ if (client) {
31
+ setOpenAIClient(client);
32
+ }
33
+ };
34
+
35
+ const getOpenAIProvider = ({ instantiate = true } = {}) => {
36
+ if (openaiProviderInstance) return openaiProviderInstance;
37
+ if (!instantiate) return null;
38
+ if (!openaiClient) return null;
39
+ const provider = new OpenAIProvider({ client: openaiClient });
40
+ setOpenAIProvider(provider);
41
+ return provider;
42
+ };
43
+
44
+ const requireOpenAIProvider = (options) => {
45
+ const provider = getOpenAIProvider(options);
46
+ if (!provider) {
47
+ throw new Error('OpenAI provider not configured. Call configureLLMProvider first.');
48
+ }
49
+ return provider;
50
+ };
4
51
 
5
52
  const resolveAnthropicClient = () => {
6
53
  if (anthropicClient) return anthropicClient;
@@ -21,7 +68,12 @@ const resolveAnthropicClient = () => {
21
68
  };
22
69
 
23
70
  module.exports = {
24
- openaiClient: null,
71
+ openaiClient,
72
+ openaiProvider: openaiProviderInstance,
73
+ setOpenAIClient,
74
+ setOpenAIProvider,
75
+ getOpenAIProvider,
76
+ requireOpenAIProvider,
25
77
  get anthropicClient() {
26
78
  return resolveAnthropicClient();
27
79
  }
@@ -16,9 +16,8 @@ const mode = process.env.NODE_ENV || 'dev';
16
16
 
17
17
  async function checkRunStatus(assistant, thread_id, run_id, retryCount = 0, maxRetries = 30) {
18
18
  try {
19
- const client = llmConfig.openaiClient;
20
- if (!client) throw new Error('OpenAI client not configured');
21
- const run = await client.beta.threads.runs.retrieve(thread_id, run_id);
19
+ const provider = llmConfig.requireOpenAIProvider();
20
+ const run = await provider.getRun({ conversationId: thread_id, runId: run_id });
22
21
  console.log(`Status: ${run.status} ${thread_id} ${run_id} (attempt ${retryCount + 1})`);
23
22
 
24
23
  if (run.status === 'failed' || run.status === 'expired' || run.status === 'incomplete') {
@@ -49,9 +48,8 @@ async function checkRunStatus(assistant, thread_id, run_id, retryCount = 0, maxR
49
48
 
50
49
  async function checkIfFinished(text) {
51
50
  try {
52
- const client = llmConfig.openaiClient;
53
- if (!client) throw new Error('OpenAI client not configured');
54
- const completion = await client.chat.completions.create({
51
+ const provider = llmConfig.requireOpenAIProvider();
52
+ const completion = await provider.createChatCompletion({
55
53
  model: 'gpt-4o-mini',
56
54
  messages: [
57
55
  {
@@ -189,6 +187,7 @@ async function downloadMediaAndCreateFile(code, reply) {
189
187
 
190
188
  async function processMessage(code, reply, thread) {
191
189
  try {
190
+ const provider = llmConfig.requireOpenAIProvider();
192
191
  const formattedMessage = formatMessage(reply);
193
192
  const isNotAssistant = !reply.from_me;
194
193
  let messagesChat = [];
@@ -234,9 +233,7 @@ async function processMessage(code, reply, thread) {
234
233
  });
235
234
  } else {
236
235
  console.log('Add attachment');
237
- const client = llmConfig.openaiClient;
238
- if (!client) throw new Error('OpenAI client not configured');
239
- const file = await client.files.create({
236
+ const file = await provider.uploadFile({
240
237
  file: fs.createReadStream(fileName),
241
238
  purpose: 'vision',
242
239
  });
@@ -246,18 +243,16 @@ async function processMessage(code, reply, thread) {
246
243
  });
247
244
  }
248
245
  } else if (fileName.includes('audio')) {
249
- const client = llmConfig.openaiClient;
250
- if (!client) throw new Error('OpenAI client not configured');
251
- const audioTranscript = await client.audio.transcriptions.create({
252
- model: 'whisper-1',
246
+ const audioTranscript = await provider.transcribeAudio({
253
247
  file: fs.createReadStream(fileName),
254
- response_format: 'text',
248
+ responseFormat: 'text',
255
249
  language: 'es'
256
250
  });
257
- console.log('Inside AUDIO', audioTranscript);
251
+ const transcriptText = audioTranscript?.text || audioTranscript;
252
+ console.log('Inside AUDIO', transcriptText);
258
253
  messagesChat.push({
259
254
  type: 'text',
260
- text: audioTranscript,
255
+ text: transcriptText,
261
256
  });
262
257
  }
263
258
  }
@@ -266,9 +261,8 @@ async function processMessage(code, reply, thread) {
266
261
  console.log('messagesChat', messagesChat);
267
262
  console.log('attachments', attachments);
268
263
 
269
- const client = llmConfig.openaiClient;
270
- if (!client) throw new Error('OpenAI client not configured');
271
- await client.beta.threads.messages.create(thread.thread_id, {
264
+ await provider.addMessage({
265
+ conversationId: thread.thread_id,
272
266
  role: 'user',
273
267
  content: messagesChat,
274
268
  attachments: attachments
package/lib/index.js CHANGED
@@ -131,11 +131,17 @@ class Nexus {
131
131
  if (llm === 'openai') {
132
132
  this.llmProvider = new DefaultLLMProvider(llmConfig);
133
133
  try {
134
- if (typeof this.llmProvider.getClient === 'function') {
135
- llmConfigModule.openaiClient = this.llmProvider.getClient();
134
+ const providerInstance = typeof this.llmProvider.getProvider === 'function'
135
+ ? this.llmProvider.getProvider()
136
+ : null;
137
+
138
+ if (providerInstance && typeof llmConfigModule.setOpenAIProvider === 'function') {
139
+ llmConfigModule.setOpenAIProvider(providerInstance);
140
+ } else if (typeof this.llmProvider.getClient === 'function' && typeof llmConfigModule.setOpenAIClient === 'function') {
141
+ llmConfigModule.setOpenAIClient(this.llmProvider.getClient());
136
142
  }
137
143
  } catch (err) {
138
- console.warn('[Nexus] Failed to expose OpenAI client:', err?.message || err);
144
+ console.warn('[Nexus] Failed to expose OpenAI provider:', err?.message || err);
139
145
  }
140
146
  }
141
147
 
@@ -163,9 +169,16 @@ class Nexus {
163
169
  // Configure Assistants (registry + overrides)
164
170
  const assistantsConfig = assistantsOpt || assistantOpt;
165
171
  try {
166
- if (this.llmProvider && typeof configureAssistantsLLM === 'function' && typeof this.llmProvider.getClient === 'function') {
167
- // Provide the raw OpenAI client to the assistant service
168
- configureAssistantsLLM(this.llmProvider.getClient());
172
+ if (this.llmProvider && typeof configureAssistantsLLM === 'function') {
173
+ const providerInstance = typeof this.llmProvider.getProvider === 'function'
174
+ ? this.llmProvider.getProvider()
175
+ : null;
176
+
177
+ if (providerInstance) {
178
+ configureAssistantsLLM(providerInstance);
179
+ } else if (typeof this.llmProvider.getClient === 'function') {
180
+ configureAssistantsLLM(this.llmProvider.getClient());
181
+ }
169
182
  }
170
183
  if (assistantsConfig) {
171
184
  if (assistantsConfig.registry && typeof assistantsConfig.registry === 'object') {
@@ -313,6 +326,7 @@ module.exports = {
313
326
  MongoStorage,
314
327
  MessageParser,
315
328
  DefaultLLMProvider,
329
+ OpenAIProvider: require('./providers/OpenAIProvider').OpenAIProvider,
316
330
  BaseAssistant: CoreBaseAssistant,
317
331
  registerAssistant,
318
332
  configureAssistantsLLM,
@@ -0,0 +1,307 @@
1
+ const { OpenAI } = require('openai');
2
+
3
+ /**
4
+ * Wrapper around the OpenAI SDK that exposes a higher level interface for
5
+ * common operations while remaining compatible with existing callers that
6
+ * expect the raw SDK surface (e.g. `.beta`).
7
+ */
8
+ class OpenAIProvider {
9
+ constructor(options = {}) {
10
+ const {
11
+ apiKey = process.env.OPENAI_API_KEY,
12
+ organization,
13
+ client,
14
+ defaultModels = {},
15
+ } = options;
16
+
17
+ if (!client && !apiKey) {
18
+ throw new Error('OpenAIProvider requires an API key or a preconfigured client');
19
+ }
20
+
21
+ this.client = client || new OpenAI({ apiKey, organization });
22
+ this.defaults = {
23
+ responseModel: 'o4-mini',
24
+ chatModel: 'gpt-4o-mini',
25
+ transcriptionModel: 'whisper-1',
26
+ reasoningEffort: 'medium',
27
+ ...defaultModels,
28
+ };
29
+
30
+ // Expose raw SDK sub-clients for backward compatibility
31
+ this.beta = this.client.beta;
32
+ this.responses = this.client.responses;
33
+ this.chat = this.client.chat;
34
+ this.audio = this.client.audio;
35
+ this.files = this.client.files;
36
+ }
37
+
38
+ getClient() {
39
+ return this.client;
40
+ }
41
+
42
+ async createConversation({ metadata, messages = [], toolResources } = {}) {
43
+ const thread = await this.client.beta.threads.create({
44
+ metadata,
45
+ tool_resources: toolResources,
46
+ });
47
+
48
+ if (Array.isArray(messages) && messages.length > 0) {
49
+ for (const message of messages) {
50
+ await this.addMessage({ conversationId: thread.id, ...message });
51
+ }
52
+ }
53
+
54
+ return thread;
55
+ }
56
+
57
+ async deleteConversation(conversationId) {
58
+ await this.client.beta.threads.del(this._ensureId(conversationId));
59
+ }
60
+
61
+ async addMessage({ conversationId, role = 'user', content, attachments = [], metadata }) {
62
+ const formattedContent = this._normalizeContent(content);
63
+
64
+ const payload = {
65
+ role,
66
+ content: formattedContent,
67
+ attachments,
68
+ metadata,
69
+ };
70
+
71
+ if (payload.metadata && Object.keys(payload.metadata).length === 0) {
72
+ delete payload.metadata;
73
+ }
74
+
75
+ if (!payload.attachments || payload.attachments.length === 0) {
76
+ delete payload.attachments;
77
+ }
78
+
79
+ return this.client.beta.threads.messages.create(this._ensureId(conversationId), payload);
80
+ }
81
+
82
+ async listMessages({ conversationId, runId, order = 'desc', limit } = {}) {
83
+ return this.client.beta.threads.messages.list(this._ensureId(conversationId), {
84
+ run_id: runId,
85
+ order,
86
+ limit,
87
+ });
88
+ }
89
+
90
+ async getRunText({
91
+ conversationId,
92
+ runId,
93
+ messageIndex = 0,
94
+ contentIndex = 0,
95
+ fallback = '',
96
+ } = {}) {
97
+ const messages = await this.listMessages({ conversationId, runId });
98
+ const message = messages?.data?.[messageIndex];
99
+ const content = message?.content?.[contentIndex];
100
+
101
+ if (!content) {
102
+ return fallback;
103
+ }
104
+
105
+ if (content?.text?.value) {
106
+ return content.text.value;
107
+ }
108
+
109
+ if (content?.text && typeof content.text === 'string') {
110
+ return content.text;
111
+ }
112
+
113
+ return fallback;
114
+ }
115
+
116
+ async runConversation({
117
+ conversationId,
118
+ assistantId,
119
+ instructions,
120
+ additionalMessages = [],
121
+ additionalInstructions,
122
+ metadata,
123
+ topP,
124
+ temperature,
125
+ maxOutputTokens,
126
+ truncationStrategy,
127
+ tools = [],
128
+ } = {}) {
129
+ const payload = {
130
+ assistant_id: assistantId,
131
+ instructions,
132
+ additional_messages: additionalMessages,
133
+ additional_instructions: additionalInstructions,
134
+ metadata,
135
+ top_p: topP,
136
+ temperature,
137
+ max_output_tokens: maxOutputTokens,
138
+ truncation_strategy: truncationStrategy,
139
+ tools,
140
+ };
141
+
142
+ if (Array.isArray(payload.additional_messages) && payload.additional_messages.length === 0) {
143
+ delete payload.additional_messages;
144
+ }
145
+
146
+ if (Array.isArray(payload.tools) && payload.tools.length === 0) {
147
+ delete payload.tools;
148
+ }
149
+
150
+ if (payload.metadata && Object.keys(payload.metadata).length === 0) {
151
+ delete payload.metadata;
152
+ }
153
+
154
+ Object.keys(payload).forEach((key) => {
155
+ if (payload[key] === undefined || payload[key] === null) {
156
+ delete payload[key];
157
+ }
158
+ });
159
+
160
+ return this.client.beta.threads.runs.create(this._ensureId(conversationId), payload);
161
+ }
162
+
163
+ async getRun({ conversationId, runId }) {
164
+ return this.client.beta.threads.runs.retrieve(this._ensureId(conversationId), this._ensureId(runId));
165
+ }
166
+
167
+ async listRuns({ conversationId, limit, order = 'desc', activeOnly = false } = {}) {
168
+ const runs = await this.client.beta.threads.runs.list(this._ensureId(conversationId), {
169
+ limit,
170
+ order,
171
+ });
172
+
173
+ if (activeOnly) {
174
+ const activeStatuses = ['in_progress', 'queued', 'requires_action'];
175
+ return {
176
+ ...runs,
177
+ data: runs.data.filter(run => activeStatuses.includes(run.status))
178
+ };
179
+ }
180
+
181
+ return runs;
182
+ }
183
+
184
+ async submitToolOutputs({ conversationId, runId, toolOutputs }) {
185
+ return this.client.beta.threads.runs.submitToolOutputs(
186
+ this._ensureId(conversationId),
187
+ this._ensureId(runId),
188
+ { tool_outputs: toolOutputs }
189
+ );
190
+ }
191
+
192
+ async cancelRun({ conversationId, runId }) {
193
+ return this.client.beta.threads.runs.cancel(
194
+ this._ensureId(conversationId),
195
+ this._ensureId(runId)
196
+ );
197
+ }
198
+
199
+ async runPrompt({
200
+ model,
201
+ input,
202
+ instructions,
203
+ reasoningEffort = this.defaults.reasoningEffort,
204
+ responseFormat,
205
+ metadata,
206
+ temperature,
207
+ maxOutputTokens,
208
+ tools,
209
+ } = {}) {
210
+ const payload = {
211
+ model: model || this.defaults.responseModel,
212
+ input,
213
+ instructions,
214
+ reasoning: reasoningEffort ? { effort: reasoningEffort } : undefined,
215
+ text: responseFormat ? { format: responseFormat } : undefined,
216
+ metadata,
217
+ temperature,
218
+ max_output_tokens: maxOutputTokens,
219
+ tools,
220
+ };
221
+
222
+ if (Array.isArray(payload.tools) && payload.tools.length === 0) {
223
+ delete payload.tools;
224
+ }
225
+
226
+ if (payload.metadata && Object.keys(payload.metadata).length === 0) {
227
+ delete payload.metadata;
228
+ }
229
+
230
+ Object.keys(payload).forEach((key) => {
231
+ if (payload[key] === undefined || payload[key] === null) {
232
+ delete payload[key];
233
+ }
234
+ });
235
+
236
+ return this.client.responses.create(payload);
237
+ }
238
+
239
+ async createChatCompletion({ model, messages, temperature, maxTokens, topP, metadata, responseFormat } = {}) {
240
+ return this.client.chat.completions.create({
241
+ model: model || this.defaults.chatModel,
242
+ messages,
243
+ temperature,
244
+ max_tokens: maxTokens,
245
+ top_p: topP,
246
+ metadata,
247
+ response_format: responseFormat,
248
+ });
249
+ }
250
+
251
+ async uploadFile({ file, purpose }) {
252
+ if (!file) {
253
+ throw new Error('uploadFile requires a readable file stream or object');
254
+ }
255
+
256
+ return this.client.files.create({ file, purpose: purpose || 'assistants' });
257
+ }
258
+
259
+ async transcribeAudio({ file, model, language, responseFormat, temperature, prompt } = {}) {
260
+ return this.client.audio.transcriptions.create({
261
+ model: model || this.defaults.transcriptionModel,
262
+ file,
263
+ language,
264
+ response_format: responseFormat,
265
+ temperature,
266
+ prompt,
267
+ });
268
+ }
269
+
270
+ _normalizeContent(content) {
271
+ if (content === undefined || content === null) {
272
+ return [{ type: 'text', text: '' }];
273
+ }
274
+
275
+ if (Array.isArray(content)) {
276
+ return content.map((item) => {
277
+ if (typeof item === 'string') {
278
+ return { type: 'text', text: item };
279
+ }
280
+ return item;
281
+ });
282
+ }
283
+
284
+ if (typeof content === 'string') {
285
+ return [{ type: 'text', text: content }];
286
+ }
287
+
288
+ return [content];
289
+ }
290
+
291
+ _ensureId(value) {
292
+ if (!value) {
293
+ throw new Error('Identifier value is required');
294
+ }
295
+ if (typeof value === 'string') {
296
+ return value;
297
+ }
298
+ if (typeof value === 'object' && value.id) {
299
+ return value.id;
300
+ }
301
+ throw new Error('Unable to resolve identifier value');
302
+ }
303
+ }
304
+
305
+ module.exports = {
306
+ OpenAIProvider,
307
+ };
@@ -5,10 +5,21 @@ const { addRecord } = require('../services/airtableService.js');
5
5
  const runtimeConfig = require('../config/runtimeConfig');
6
6
  const llmConfig = require('../config/llmConfig');
7
7
  const { BaseAssistant } = require('../assistants/BaseAssistant');
8
+ const { OpenAIProvider } = require('../providers/OpenAIProvider');
8
9
 
9
- let llmProvider = null;
10
10
  const configureLLMProvider = (provider) => {
11
- llmProvider = provider;
11
+ if (!provider) {
12
+ throw new Error('configureLLMProvider requires an OpenAI provider or raw client');
13
+ }
14
+
15
+ if (provider instanceof OpenAIProvider || typeof provider.runConversation === 'function') {
16
+ llmConfig.setOpenAIProvider(provider);
17
+ return provider;
18
+ }
19
+
20
+ const wrappedProvider = new OpenAIProvider({ client: provider });
21
+ llmConfig.setOpenAIProvider(wrappedProvider);
22
+ return wrappedProvider;
12
23
  };
13
24
 
14
25
  let assistantConfig = null;
@@ -29,6 +40,60 @@ const configureAssistants = (config) => {
29
40
  assistantConfig = config;
30
41
  };
31
42
 
43
+ const runAssistantAndWait = async ({
44
+ thread,
45
+ assistant,
46
+ runConfig = {}
47
+ }) => {
48
+ if (!thread || !thread.thread_id) {
49
+ throw new Error('runAssistantAndWait requires a thread with a valid thread_id');
50
+ }
51
+
52
+ if (!assistant) {
53
+ throw new Error('runAssistantAndWait requires an assistant instance');
54
+ }
55
+
56
+ const provider = llmConfig.requireOpenAIProvider();
57
+ const { polling, ...conversationConfig } = runConfig || {};
58
+
59
+ const run = await provider.runConversation({
60
+ conversationId: thread.thread_id,
61
+ assistantId: thread.assistant_id,
62
+ ...conversationConfig,
63
+ });
64
+
65
+ const filter = thread.code ? { code: thread.code, active: true } : null;
66
+ if (filter) {
67
+ await Thread.updateOne(filter, { $set: { run_id: run.id } });
68
+ }
69
+
70
+ const maxRetries = polling?.maxRetries ?? 30;
71
+ let completed = false;
72
+
73
+ try {
74
+ completed = await checkRunStatus(assistant, run.thread_id, run.id, 0, maxRetries);
75
+ } finally {
76
+ if (filter) {
77
+ await Thread.updateOne(filter, { $set: { run_id: null } });
78
+ }
79
+ }
80
+
81
+ let finalRun = run;
82
+ try {
83
+ finalRun = await provider.getRun({ conversationId: run.thread_id, runId: run.id });
84
+ } catch (error) {
85
+ console.warn('Warning: unable to retrieve final run state:', error?.message || error);
86
+ }
87
+
88
+ if (!completed) {
89
+ return { run: finalRun, completed: false, output: '' };
90
+ }
91
+
92
+ const output = await provider.getRunText({ conversationId: run.thread_id, runId: run.id, fallback: '' });
93
+
94
+ return { run: finalRun, completed: true, output };
95
+ };
96
+
32
97
  const registerAssistant = (assistantId, definition) => {
33
98
  if (!assistantId || typeof assistantId !== 'string') {
34
99
  throw new Error('registerAssistant requires a string assistantId');
@@ -49,10 +114,17 @@ const registerAssistant = (assistantId, definition) => {
49
114
 
50
115
  class ConfiguredAssistant extends ParentClass {
51
116
  constructor(options = {}) {
117
+ const provider = options.provider || llmConfig.getOpenAIProvider({ instantiate: false });
118
+ const sharedClient = options.client
119
+ || provider?.getClient?.()
120
+ || llmConfig.openaiClient
121
+ || null;
122
+
52
123
  super({
53
124
  ...options,
54
125
  assistantId,
55
- client: options.client || llmProvider || llmConfig.openaiClient || null,
126
+ client: sharedClient,
127
+ provider,
56
128
  tools: [...tools, ...(options.tools || [])]
57
129
  });
58
130
 
@@ -100,20 +172,27 @@ const getAssistantById = (assistant_id, thread) => {
100
172
  throw new Error(`Assistant '${assistant_id}' not found. Available assistants: ${Object.keys(assistantRegistry).join(', ')}`);
101
173
  }
102
174
 
103
- const sharedClient = llmProvider || llmConfig.openaiClient || null;
175
+ const provider = llmConfig.getOpenAIProvider({ instantiate: false });
176
+ const sharedClient = provider?.getClient?.() || llmConfig.openaiClient || null;
104
177
 
105
178
  if (AssistantClass.prototype instanceof BaseAssistant) {
106
179
  return new AssistantClass({
107
180
  assistantId: assistant_id,
108
181
  thread,
109
- client: sharedClient
182
+ client: sharedClient,
183
+ provider
110
184
  });
111
185
  }
112
186
 
113
187
  try {
114
188
  return new AssistantClass(thread);
115
189
  } catch (error) {
116
- return new AssistantClass({ thread, assistantId: assistant_id, client: sharedClient });
190
+ return new AssistantClass({
191
+ thread,
192
+ assistantId: assistant_id,
193
+ client: sharedClient,
194
+ provider
195
+ });
117
196
  }
118
197
  };
119
198
 
@@ -135,10 +214,13 @@ const createAssistant = async (code, assistant_id, messages=[], prevThread=null)
135
214
  const initialThread = await assistant.create(code, curRow[0]);
136
215
 
137
216
  // Add new messages to memory
217
+ const provider = llmConfig.requireOpenAIProvider();
138
218
  for (const message of messages) {
139
- await llmProvider.beta.threads.messages.create(
140
- initialThread.id, { role: 'assistant', content: message }
141
- );
219
+ await provider.addMessage({
220
+ conversationId: initialThread.id,
221
+ role: 'assistant',
222
+ content: message
223
+ });
142
224
  }
143
225
 
144
226
  // Define new thread data
@@ -159,7 +241,7 @@ const createAssistant = async (code, assistant_id, messages=[], prevThread=null)
159
241
 
160
242
  // Delete previous thread
161
243
  if (prevThread) {
162
- await llmProvider.beta.threads.del(prevThread.thread_id);
244
+ await provider.deleteConversation(prevThread.thread_id);
163
245
  }
164
246
 
165
247
  return thread;
@@ -171,32 +253,24 @@ const addMsgAssistant = async (code, inMessages, reply = false) => {
171
253
  console.log(thread);
172
254
  if (thread === null) return null;
173
255
 
256
+ const provider = llmConfig.requireOpenAIProvider();
257
+
174
258
  for (const message of inMessages) {
175
259
  console.log(message);
176
- await llmProvider.beta.threads.messages.create(
177
- thread.thread_id, { role: 'assistant', content: message }
178
- );
260
+ await provider.addMessage({
261
+ conversationId: thread.thread_id,
262
+ role: 'assistant',
263
+ content: message
264
+ });
179
265
  }
180
266
 
181
267
  if (!reply) return null;
182
268
 
183
269
  const assistant = getAssistantById(thread.assistant_id, thread);
184
- const run = await llmProvider.beta.threads.runs.create(
185
- thread.thread_id,
186
- {
187
- assistant_id: thread.assistant_id
188
- }
189
- );
190
-
191
- await Thread.updateOne({ code: thread.code, active: true }, { $set: { run_id: run.id } });
192
- await checkRunStatus(assistant, run.thread_id, run.id);
193
- await Thread.updateOne({ code: thread.code, active: true }, { $set: { run_id: null } });
270
+ const { output } = await runAssistantAndWait({ thread, assistant });
271
+ console.log('THE ANS IS', output);
194
272
 
195
- const messages = await llmProvider.beta.threads.messages.list(run.thread_id, { run_id: run.id });
196
- const ans = messages.data[0].content[0].text.value;
197
- console.log('THE ANS IS', ans);
198
-
199
- return ans;
273
+ return output;
200
274
  } catch (error) {
201
275
  console.log(error);
202
276
  return null;
@@ -210,25 +284,19 @@ const addInsAssistant = async (code, instruction) => {
210
284
  if (thread === null) return null;
211
285
 
212
286
  const assistant = getAssistantById(thread.assistant_id, thread);
213
- const run = await llmProvider.beta.threads.runs.create(
214
- thread.thread_id, {
215
- assistant_id: thread.assistant_id,
216
- additional_instructions: instruction,
217
- additional_messages: [
287
+ const { output } = await runAssistantAndWait({
288
+ thread,
289
+ assistant,
290
+ runConfig: {
291
+ additionalInstructions: instruction,
292
+ additionalMessages: [
218
293
  { role: 'user', content: instruction }
219
294
  ]
220
295
  }
221
- );
222
-
223
- await Thread.updateOne({ code: thread.code, active: true }, { $set: { run_id: run.id } });
224
- await checkRunStatus(assistant, run.thread_id, run.id);
225
- await Thread.updateOne({ code: thread.code, active: true }, { $set: { run_id: null } });
296
+ });
297
+ console.log('RUN RESPONSE', output);
226
298
 
227
- const messages = await llmProvider.beta.threads.messages.list(run.thread_id, { run_id: run.id });
228
- console.log(messages.data[0].content);
229
- const ans = messages.data[0].content[0].text.value;
230
-
231
- return ans;
299
+ return output;
232
300
  } catch (error) {
233
301
  console.log(error);
234
302
  return null;
@@ -250,9 +318,11 @@ const getThread = async (code, message = null) => {
250
318
  return null;
251
319
  }
252
320
 
321
+ const provider = llmConfig.getOpenAIProvider({ instantiate: false });
253
322
  while (thread && thread.run_id) {
254
323
  console.log(`Wait for ${thread.run_id} to be executed`);
255
- const run = await llmProvider.beta.threads.runs.retrieve(thread.thread_id, thread.run_id);
324
+ const activeProvider = provider || llmConfig.requireOpenAIProvider();
325
+ const run = await activeProvider.getRun({ conversationId: thread.thread_id, runId: thread.run_id });
256
326
  if (run.status === 'cancelled' || run.status === 'expired' || run.status === 'completed') {
257
327
  await Thread.updateOne({ code: code }, { $set: { run_id: null } });
258
328
  }
@@ -283,18 +353,22 @@ const replyAssistant = async function (code, message_ = null, thread_ = null, ru
283
353
  console.log('THREAD STOPPED', code, thread?.active);
284
354
  if (!thread || !thread.active) return null;
285
355
 
286
- const patientReply = await getLastMessages(code);
356
+ const patientReply = message_ ? [message_] : await getLastMessages(code);
287
357
  console.log('UNREAD DATA', patientReply);
288
358
  if (!patientReply) {
289
359
  console.log('No relevant data found for this assistant.');
290
360
  return null;
291
361
  }
292
362
 
293
- let activeRuns = await llmProvider.beta.threads.runs.list(thread.thread_id);
294
- console.log('ACTIVE RUNS:', activeRuns.length);
295
- while (activeRuns.length > 0) {
296
- console.log(`ACTIVE RUNS ${thread.thread_id}`);
297
- activeRuns = await llmProvider.beta.threads.runs.list(thread.thread_id);
363
+ const provider = llmConfig.requireOpenAIProvider();
364
+
365
+ let activeRuns = await provider.listRuns({ conversationId: thread.thread_id, activeOnly: true });
366
+ let activeRunsCount = activeRuns?.data?.length || 0;
367
+ console.log('ACTIVE RUNS:', activeRunsCount, activeRuns?.data?.map(run => ({ id: run.id, status: run.status })));
368
+ while (activeRunsCount > 0) {
369
+ console.log(`WAITING FOR ${activeRunsCount} ACTIVE RUNS TO COMPLETE - ${thread.thread_id}`);
370
+ activeRuns = await provider.listRuns({ conversationId: thread.thread_id, activeOnly: true });
371
+ activeRunsCount = activeRuns?.data?.length || 0;
298
372
  await delay(5000);
299
373
  }
300
374
 
@@ -335,27 +409,18 @@ const replyAssistant = async function (code, message_ = null, thread_ = null, ru
335
409
  if (!patientMsg || !thread || thread?.stopped) return null;
336
410
 
337
411
  const assistant = getAssistantById(thread.assistant_id, thread);
338
- const run = await llmProvider.beta.threads.runs.create(
339
- thread.thread_id,
340
- {
341
- assistant_id: thread.assistant_id,
342
- ...runOptions
343
- }
344
- );
345
- console.log('RUN LAST ERROR:', run.last_error);
346
-
347
- assistant.set_replies(patientReply);
348
-
349
- await Thread.updateOne({ code: thread.code, active: true }, { $set: { run_id: run.id } });
350
- const runStatus = await checkRunStatus(assistant, run.thread_id, run.id);
351
- console.log('RUN STATUS', runStatus);
352
- await Thread.updateOne({ code: thread.code, active: true }, { $set: { run_id: null } });
412
+ assistant.setReplies(patientReply);
353
413
 
354
- const messages = await llmProvider.beta.threads.messages.list(run.thread_id, { run_id: run.id });
355
- const reply = messages.data?.[0]?.content?.[0]?.text?.value || '';
356
- console.log(reply);
414
+ const { run, output, completed } = await runAssistantAndWait({
415
+ thread,
416
+ assistant,
417
+ runConfig: runOptions
418
+ });
419
+ console.log('RUN LAST ERROR:', run?.last_error);
420
+ console.log('RUN STATUS', completed);
421
+ console.log(output);
357
422
 
358
- return reply;
423
+ return output;
359
424
  } catch (err) {
360
425
  console.log(`Error inside reply assistant ${err} ${code}`);
361
426
  }
@@ -386,5 +451,6 @@ module.exports = {
386
451
  configureAssistants,
387
452
  registerAssistant,
388
453
  configureLLMProvider,
389
- overrideGetAssistantById
454
+ overrideGetAssistantById,
455
+ runAssistantAndWait
390
456
  };
@@ -1,21 +1,19 @@
1
- const OpenAI = require('openai');
1
+ const { OpenAIProvider } = require('../providers/OpenAIProvider');
2
2
 
3
3
  /**
4
4
  * Default LLM Provider using OpenAI
5
5
  */
6
6
  class DefaultLLMProvider {
7
7
  constructor(config = {}) {
8
- const apiKey = config.apiKey || process.env.OPENAI_API_KEY;
9
-
10
- if (!apiKey) {
11
- throw new Error('OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass apiKey in config.');
12
- }
8
+ this.provider = new OpenAIProvider(config);
9
+ }
13
10
 
14
- this.client = new OpenAI({ apiKey });
11
+ getProvider() {
12
+ return this.provider;
15
13
  }
16
14
 
17
15
  getClient() {
18
- return this.client;
16
+ return this.provider.getClient();
19
17
  }
20
18
  }
21
19
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@peopl-health/nexus",
3
- "version": "1.6.3",
3
+ "version": "1.6.5",
4
4
  "description": "Core messaging and assistant library for WhatsApp communication platforms",
5
5
  "keywords": [
6
6
  "whatsapp",