@peopl-health/nexus 1.6.3 → 1.6.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/assistants/BaseAssistant.js +22 -2
- package/lib/config/llmConfig.js +53 -1
- package/lib/helpers/assistantHelper.js +13 -19
- package/lib/index.js +20 -6
- package/lib/providers/OpenAIProvider.js +297 -0
- package/lib/services/assistantService.js +135 -69
- package/lib/utils/defaultLLMProvider.js +6 -8
- package/package.json +1 -1
|
@@ -17,7 +17,27 @@ class BaseAssistant {
|
|
|
17
17
|
this.lastMessages = null;
|
|
18
18
|
this.createdAt = new Date();
|
|
19
19
|
|
|
20
|
-
|
|
20
|
+
const existingProvider = llmConfig.getOpenAIProvider({ instantiate: false });
|
|
21
|
+
this.provider = options.provider || existingProvider || null;
|
|
22
|
+
this.client = options.client
|
|
23
|
+
|| (this.provider && this.provider.getClient ? this.provider.getClient() : null)
|
|
24
|
+
|| llmConfig.openaiClient
|
|
25
|
+
|| null;
|
|
26
|
+
|
|
27
|
+
if (!this.provider && this.client) {
|
|
28
|
+
try {
|
|
29
|
+
const { OpenAIProvider } = require('../providers/OpenAIProvider');
|
|
30
|
+
const provider = new OpenAIProvider({ client: this.client });
|
|
31
|
+
this.provider = provider;
|
|
32
|
+
if (typeof llmConfig.setOpenAIProvider === 'function') {
|
|
33
|
+
llmConfig.setOpenAIProvider(provider);
|
|
34
|
+
}
|
|
35
|
+
} catch (err) {
|
|
36
|
+
if (process.env.NODE_ENV !== 'production') {
|
|
37
|
+
console.warn('[BaseAssistant] Failed to initialise OpenAIProvider from client:', err?.message || err);
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
}
|
|
21
41
|
this.tools = new Map();
|
|
22
42
|
|
|
23
43
|
if (Array.isArray(options.tools)) {
|
|
@@ -67,7 +87,7 @@ class BaseAssistant {
|
|
|
67
87
|
this.thread = thread;
|
|
68
88
|
}
|
|
69
89
|
|
|
70
|
-
|
|
90
|
+
setReplies(replies) {
|
|
71
91
|
this.replies = replies;
|
|
72
92
|
}
|
|
73
93
|
|
package/lib/config/llmConfig.js
CHANGED
|
@@ -1,6 +1,53 @@
|
|
|
1
1
|
const runtimeConfig = require('./runtimeConfig');
|
|
2
|
+
const { OpenAIProvider } = require('../providers/OpenAIProvider');
|
|
2
3
|
|
|
3
4
|
let anthropicClient = null;
|
|
5
|
+
let openaiClient = null;
|
|
6
|
+
let openaiProviderInstance = null;
|
|
7
|
+
|
|
8
|
+
const setOpenAIClient = (client) => {
|
|
9
|
+
openaiClient = client || null;
|
|
10
|
+
module.exports.openaiClient = openaiClient;
|
|
11
|
+
if (!client) {
|
|
12
|
+
openaiProviderInstance = null;
|
|
13
|
+
module.exports.openaiProvider = null;
|
|
14
|
+
}
|
|
15
|
+
};
|
|
16
|
+
|
|
17
|
+
const setOpenAIProvider = (provider) => {
|
|
18
|
+
openaiProviderInstance = provider || null;
|
|
19
|
+
module.exports.openaiProvider = openaiProviderInstance;
|
|
20
|
+
|
|
21
|
+
if (!provider) {
|
|
22
|
+
setOpenAIClient(null);
|
|
23
|
+
return;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
const client = typeof provider.getClient === 'function'
|
|
27
|
+
? provider.getClient()
|
|
28
|
+
: provider.client;
|
|
29
|
+
|
|
30
|
+
if (client) {
|
|
31
|
+
setOpenAIClient(client);
|
|
32
|
+
}
|
|
33
|
+
};
|
|
34
|
+
|
|
35
|
+
const getOpenAIProvider = ({ instantiate = true } = {}) => {
|
|
36
|
+
if (openaiProviderInstance) return openaiProviderInstance;
|
|
37
|
+
if (!instantiate) return null;
|
|
38
|
+
if (!openaiClient) return null;
|
|
39
|
+
const provider = new OpenAIProvider({ client: openaiClient });
|
|
40
|
+
setOpenAIProvider(provider);
|
|
41
|
+
return provider;
|
|
42
|
+
};
|
|
43
|
+
|
|
44
|
+
const requireOpenAIProvider = (options) => {
|
|
45
|
+
const provider = getOpenAIProvider(options);
|
|
46
|
+
if (!provider) {
|
|
47
|
+
throw new Error('OpenAI provider not configured. Call configureLLMProvider first.');
|
|
48
|
+
}
|
|
49
|
+
return provider;
|
|
50
|
+
};
|
|
4
51
|
|
|
5
52
|
const resolveAnthropicClient = () => {
|
|
6
53
|
if (anthropicClient) return anthropicClient;
|
|
@@ -21,7 +68,12 @@ const resolveAnthropicClient = () => {
|
|
|
21
68
|
};
|
|
22
69
|
|
|
23
70
|
module.exports = {
|
|
24
|
-
openaiClient
|
|
71
|
+
openaiClient,
|
|
72
|
+
openaiProvider: openaiProviderInstance,
|
|
73
|
+
setOpenAIClient,
|
|
74
|
+
setOpenAIProvider,
|
|
75
|
+
getOpenAIProvider,
|
|
76
|
+
requireOpenAIProvider,
|
|
25
77
|
get anthropicClient() {
|
|
26
78
|
return resolveAnthropicClient();
|
|
27
79
|
}
|
|
@@ -16,9 +16,8 @@ const mode = process.env.NODE_ENV || 'dev';
|
|
|
16
16
|
|
|
17
17
|
async function checkRunStatus(assistant, thread_id, run_id, retryCount = 0, maxRetries = 30) {
|
|
18
18
|
try {
|
|
19
|
-
const
|
|
20
|
-
|
|
21
|
-
const run = await client.beta.threads.runs.retrieve(thread_id, run_id);
|
|
19
|
+
const provider = llmConfig.requireOpenAIProvider();
|
|
20
|
+
const run = await provider.getRun({ conversationId: thread_id, runId: run_id });
|
|
22
21
|
console.log(`Status: ${run.status} ${thread_id} ${run_id} (attempt ${retryCount + 1})`);
|
|
23
22
|
|
|
24
23
|
if (run.status === 'failed' || run.status === 'expired' || run.status === 'incomplete') {
|
|
@@ -49,9 +48,8 @@ async function checkRunStatus(assistant, thread_id, run_id, retryCount = 0, maxR
|
|
|
49
48
|
|
|
50
49
|
async function checkIfFinished(text) {
|
|
51
50
|
try {
|
|
52
|
-
const
|
|
53
|
-
|
|
54
|
-
const completion = await client.chat.completions.create({
|
|
51
|
+
const provider = llmConfig.requireOpenAIProvider();
|
|
52
|
+
const completion = await provider.createChatCompletion({
|
|
55
53
|
model: 'gpt-4o-mini',
|
|
56
54
|
messages: [
|
|
57
55
|
{
|
|
@@ -189,6 +187,7 @@ async function downloadMediaAndCreateFile(code, reply) {
|
|
|
189
187
|
|
|
190
188
|
async function processMessage(code, reply, thread) {
|
|
191
189
|
try {
|
|
190
|
+
const provider = llmConfig.requireOpenAIProvider();
|
|
192
191
|
const formattedMessage = formatMessage(reply);
|
|
193
192
|
const isNotAssistant = !reply.from_me;
|
|
194
193
|
let messagesChat = [];
|
|
@@ -234,9 +233,7 @@ async function processMessage(code, reply, thread) {
|
|
|
234
233
|
});
|
|
235
234
|
} else {
|
|
236
235
|
console.log('Add attachment');
|
|
237
|
-
const
|
|
238
|
-
if (!client) throw new Error('OpenAI client not configured');
|
|
239
|
-
const file = await client.files.create({
|
|
236
|
+
const file = await provider.uploadFile({
|
|
240
237
|
file: fs.createReadStream(fileName),
|
|
241
238
|
purpose: 'vision',
|
|
242
239
|
});
|
|
@@ -246,18 +243,16 @@ async function processMessage(code, reply, thread) {
|
|
|
246
243
|
});
|
|
247
244
|
}
|
|
248
245
|
} else if (fileName.includes('audio')) {
|
|
249
|
-
const
|
|
250
|
-
if (!client) throw new Error('OpenAI client not configured');
|
|
251
|
-
const audioTranscript = await client.audio.transcriptions.create({
|
|
252
|
-
model: 'whisper-1',
|
|
246
|
+
const audioTranscript = await provider.transcribeAudio({
|
|
253
247
|
file: fs.createReadStream(fileName),
|
|
254
|
-
|
|
248
|
+
responseFormat: 'text',
|
|
255
249
|
language: 'es'
|
|
256
250
|
});
|
|
257
|
-
|
|
251
|
+
const transcriptText = audioTranscript?.text || audioTranscript;
|
|
252
|
+
console.log('Inside AUDIO', transcriptText);
|
|
258
253
|
messagesChat.push({
|
|
259
254
|
type: 'text',
|
|
260
|
-
text:
|
|
255
|
+
text: transcriptText,
|
|
261
256
|
});
|
|
262
257
|
}
|
|
263
258
|
}
|
|
@@ -266,9 +261,8 @@ async function processMessage(code, reply, thread) {
|
|
|
266
261
|
console.log('messagesChat', messagesChat);
|
|
267
262
|
console.log('attachments', attachments);
|
|
268
263
|
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
await client.beta.threads.messages.create(thread.thread_id, {
|
|
264
|
+
await provider.addMessage({
|
|
265
|
+
conversationId: thread.thread_id,
|
|
272
266
|
role: 'user',
|
|
273
267
|
content: messagesChat,
|
|
274
268
|
attachments: attachments
|
package/lib/index.js
CHANGED
|
@@ -131,11 +131,17 @@ class Nexus {
|
|
|
131
131
|
if (llm === 'openai') {
|
|
132
132
|
this.llmProvider = new DefaultLLMProvider(llmConfig);
|
|
133
133
|
try {
|
|
134
|
-
|
|
135
|
-
|
|
134
|
+
const providerInstance = typeof this.llmProvider.getProvider === 'function'
|
|
135
|
+
? this.llmProvider.getProvider()
|
|
136
|
+
: null;
|
|
137
|
+
|
|
138
|
+
if (providerInstance && typeof llmConfigModule.setOpenAIProvider === 'function') {
|
|
139
|
+
llmConfigModule.setOpenAIProvider(providerInstance);
|
|
140
|
+
} else if (typeof this.llmProvider.getClient === 'function' && typeof llmConfigModule.setOpenAIClient === 'function') {
|
|
141
|
+
llmConfigModule.setOpenAIClient(this.llmProvider.getClient());
|
|
136
142
|
}
|
|
137
143
|
} catch (err) {
|
|
138
|
-
console.warn('[Nexus] Failed to expose OpenAI
|
|
144
|
+
console.warn('[Nexus] Failed to expose OpenAI provider:', err?.message || err);
|
|
139
145
|
}
|
|
140
146
|
}
|
|
141
147
|
|
|
@@ -163,9 +169,16 @@ class Nexus {
|
|
|
163
169
|
// Configure Assistants (registry + overrides)
|
|
164
170
|
const assistantsConfig = assistantsOpt || assistantOpt;
|
|
165
171
|
try {
|
|
166
|
-
if (this.llmProvider && typeof configureAssistantsLLM === 'function'
|
|
167
|
-
|
|
168
|
-
|
|
172
|
+
if (this.llmProvider && typeof configureAssistantsLLM === 'function') {
|
|
173
|
+
const providerInstance = typeof this.llmProvider.getProvider === 'function'
|
|
174
|
+
? this.llmProvider.getProvider()
|
|
175
|
+
: null;
|
|
176
|
+
|
|
177
|
+
if (providerInstance) {
|
|
178
|
+
configureAssistantsLLM(providerInstance);
|
|
179
|
+
} else if (typeof this.llmProvider.getClient === 'function') {
|
|
180
|
+
configureAssistantsLLM(this.llmProvider.getClient());
|
|
181
|
+
}
|
|
169
182
|
}
|
|
170
183
|
if (assistantsConfig) {
|
|
171
184
|
if (assistantsConfig.registry && typeof assistantsConfig.registry === 'object') {
|
|
@@ -313,6 +326,7 @@ module.exports = {
|
|
|
313
326
|
MongoStorage,
|
|
314
327
|
MessageParser,
|
|
315
328
|
DefaultLLMProvider,
|
|
329
|
+
OpenAIProvider: require('./providers/OpenAIProvider').OpenAIProvider,
|
|
316
330
|
BaseAssistant: CoreBaseAssistant,
|
|
317
331
|
registerAssistant,
|
|
318
332
|
configureAssistantsLLM,
|
|
@@ -0,0 +1,297 @@
|
|
|
1
|
+
const { OpenAI } = require('openai');
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Wrapper around the OpenAI SDK that exposes a higher level interface for
|
|
5
|
+
* common operations while remaining compatible with existing callers that
|
|
6
|
+
* expect the raw SDK surface (e.g. `.beta`).
|
|
7
|
+
*/
|
|
8
|
+
class OpenAIProvider {
|
|
9
|
+
constructor(options = {}) {
|
|
10
|
+
const {
|
|
11
|
+
apiKey = process.env.OPENAI_API_KEY,
|
|
12
|
+
organization,
|
|
13
|
+
client,
|
|
14
|
+
defaultModels = {},
|
|
15
|
+
} = options;
|
|
16
|
+
|
|
17
|
+
if (!client && !apiKey) {
|
|
18
|
+
throw new Error('OpenAIProvider requires an API key or a preconfigured client');
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
this.client = client || new OpenAI({ apiKey, organization });
|
|
22
|
+
this.defaults = {
|
|
23
|
+
responseModel: 'o4-mini',
|
|
24
|
+
chatModel: 'gpt-4o-mini',
|
|
25
|
+
transcriptionModel: 'whisper-1',
|
|
26
|
+
reasoningEffort: 'medium',
|
|
27
|
+
...defaultModels,
|
|
28
|
+
};
|
|
29
|
+
|
|
30
|
+
// Expose raw SDK sub-clients for backward compatibility
|
|
31
|
+
this.beta = this.client.beta;
|
|
32
|
+
this.responses = this.client.responses;
|
|
33
|
+
this.chat = this.client.chat;
|
|
34
|
+
this.audio = this.client.audio;
|
|
35
|
+
this.files = this.client.files;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
getClient() {
|
|
39
|
+
return this.client;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
async createConversation({ metadata, messages = [], toolResources } = {}) {
|
|
43
|
+
const thread = await this.client.beta.threads.create({
|
|
44
|
+
metadata,
|
|
45
|
+
tool_resources: toolResources,
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
if (Array.isArray(messages) && messages.length > 0) {
|
|
49
|
+
for (const message of messages) {
|
|
50
|
+
await this.addMessage({ conversationId: thread.id, ...message });
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
return thread;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
async deleteConversation(conversationId) {
|
|
58
|
+
await this.client.beta.threads.del(this._ensureId(conversationId));
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
async addMessage({ conversationId, role = 'user', content, attachments = [], metadata }) {
|
|
62
|
+
const formattedContent = this._normalizeContent(content);
|
|
63
|
+
|
|
64
|
+
const payload = {
|
|
65
|
+
role,
|
|
66
|
+
content: formattedContent,
|
|
67
|
+
attachments,
|
|
68
|
+
metadata,
|
|
69
|
+
};
|
|
70
|
+
|
|
71
|
+
if (payload.metadata && Object.keys(payload.metadata).length === 0) {
|
|
72
|
+
delete payload.metadata;
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
if (!payload.attachments || payload.attachments.length === 0) {
|
|
76
|
+
delete payload.attachments;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
return this.client.beta.threads.messages.create(this._ensureId(conversationId), payload);
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
async listMessages({ conversationId, runId, order = 'desc', limit } = {}) {
|
|
83
|
+
return this.client.beta.threads.messages.list(this._ensureId(conversationId), {
|
|
84
|
+
run_id: runId,
|
|
85
|
+
order,
|
|
86
|
+
limit,
|
|
87
|
+
});
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
async getRunText({
|
|
91
|
+
conversationId,
|
|
92
|
+
runId,
|
|
93
|
+
messageIndex = 0,
|
|
94
|
+
contentIndex = 0,
|
|
95
|
+
fallback = '',
|
|
96
|
+
} = {}) {
|
|
97
|
+
const messages = await this.listMessages({ conversationId, runId });
|
|
98
|
+
const message = messages?.data?.[messageIndex];
|
|
99
|
+
const content = message?.content?.[contentIndex];
|
|
100
|
+
|
|
101
|
+
if (!content) {
|
|
102
|
+
return fallback;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
if (content?.text?.value) {
|
|
106
|
+
return content.text.value;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
if (content?.text && typeof content.text === 'string') {
|
|
110
|
+
return content.text;
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
return fallback;
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
async runConversation({
|
|
117
|
+
conversationId,
|
|
118
|
+
assistantId,
|
|
119
|
+
instructions,
|
|
120
|
+
additionalMessages = [],
|
|
121
|
+
additionalInstructions,
|
|
122
|
+
metadata,
|
|
123
|
+
topP,
|
|
124
|
+
temperature,
|
|
125
|
+
maxOutputTokens,
|
|
126
|
+
truncationStrategy,
|
|
127
|
+
tools = [],
|
|
128
|
+
} = {}) {
|
|
129
|
+
const payload = {
|
|
130
|
+
assistant_id: assistantId,
|
|
131
|
+
instructions,
|
|
132
|
+
additional_messages: additionalMessages,
|
|
133
|
+
additional_instructions: additionalInstructions,
|
|
134
|
+
metadata,
|
|
135
|
+
top_p: topP,
|
|
136
|
+
temperature,
|
|
137
|
+
max_output_tokens: maxOutputTokens,
|
|
138
|
+
truncation_strategy: truncationStrategy,
|
|
139
|
+
tools,
|
|
140
|
+
};
|
|
141
|
+
|
|
142
|
+
if (Array.isArray(payload.additional_messages) && payload.additional_messages.length === 0) {
|
|
143
|
+
delete payload.additional_messages;
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
if (Array.isArray(payload.tools) && payload.tools.length === 0) {
|
|
147
|
+
delete payload.tools;
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
if (payload.metadata && Object.keys(payload.metadata).length === 0) {
|
|
151
|
+
delete payload.metadata;
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
Object.keys(payload).forEach((key) => {
|
|
155
|
+
if (payload[key] === undefined || payload[key] === null) {
|
|
156
|
+
delete payload[key];
|
|
157
|
+
}
|
|
158
|
+
});
|
|
159
|
+
|
|
160
|
+
return this.client.beta.threads.runs.create(this._ensureId(conversationId), payload);
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
async getRun({ conversationId, runId }) {
|
|
164
|
+
return this.client.beta.threads.runs.retrieve(this._ensureId(conversationId), this._ensureId(runId));
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
async listRuns({ conversationId, limit, order = 'desc' } = {}) {
|
|
168
|
+
return this.client.beta.threads.runs.list(this._ensureId(conversationId), {
|
|
169
|
+
limit,
|
|
170
|
+
order,
|
|
171
|
+
});
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
async submitToolOutputs({ conversationId, runId, toolOutputs }) {
|
|
175
|
+
return this.client.beta.threads.runs.submitToolOutputs(
|
|
176
|
+
this._ensureId(conversationId),
|
|
177
|
+
this._ensureId(runId),
|
|
178
|
+
{ tool_outputs: toolOutputs }
|
|
179
|
+
);
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
async cancelRun({ conversationId, runId }) {
|
|
183
|
+
return this.client.beta.threads.runs.cancel(
|
|
184
|
+
this._ensureId(conversationId),
|
|
185
|
+
this._ensureId(runId)
|
|
186
|
+
);
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
async runPrompt({
|
|
190
|
+
model,
|
|
191
|
+
input,
|
|
192
|
+
instructions,
|
|
193
|
+
reasoningEffort = this.defaults.reasoningEffort,
|
|
194
|
+
responseFormat,
|
|
195
|
+
metadata,
|
|
196
|
+
temperature,
|
|
197
|
+
maxOutputTokens,
|
|
198
|
+
tools,
|
|
199
|
+
} = {}) {
|
|
200
|
+
const payload = {
|
|
201
|
+
model: model || this.defaults.responseModel,
|
|
202
|
+
input,
|
|
203
|
+
instructions,
|
|
204
|
+
reasoning: reasoningEffort ? { effort: reasoningEffort } : undefined,
|
|
205
|
+
text: responseFormat ? { format: responseFormat } : undefined,
|
|
206
|
+
metadata,
|
|
207
|
+
temperature,
|
|
208
|
+
max_output_tokens: maxOutputTokens,
|
|
209
|
+
tools,
|
|
210
|
+
};
|
|
211
|
+
|
|
212
|
+
if (Array.isArray(payload.tools) && payload.tools.length === 0) {
|
|
213
|
+
delete payload.tools;
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
if (payload.metadata && Object.keys(payload.metadata).length === 0) {
|
|
217
|
+
delete payload.metadata;
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
Object.keys(payload).forEach((key) => {
|
|
221
|
+
if (payload[key] === undefined || payload[key] === null) {
|
|
222
|
+
delete payload[key];
|
|
223
|
+
}
|
|
224
|
+
});
|
|
225
|
+
|
|
226
|
+
return this.client.responses.create(payload);
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
async createChatCompletion({ model, messages, temperature, maxTokens, topP, metadata, responseFormat } = {}) {
|
|
230
|
+
return this.client.chat.completions.create({
|
|
231
|
+
model: model || this.defaults.chatModel,
|
|
232
|
+
messages,
|
|
233
|
+
temperature,
|
|
234
|
+
max_tokens: maxTokens,
|
|
235
|
+
top_p: topP,
|
|
236
|
+
metadata,
|
|
237
|
+
response_format: responseFormat,
|
|
238
|
+
});
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
async uploadFile({ file, purpose }) {
|
|
242
|
+
if (!file) {
|
|
243
|
+
throw new Error('uploadFile requires a readable file stream or object');
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
return this.client.files.create({ file, purpose: purpose || 'assistants' });
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
async transcribeAudio({ file, model, language, responseFormat, temperature, prompt } = {}) {
|
|
250
|
+
return this.client.audio.transcriptions.create({
|
|
251
|
+
model: model || this.defaults.transcriptionModel,
|
|
252
|
+
file,
|
|
253
|
+
language,
|
|
254
|
+
response_format: responseFormat,
|
|
255
|
+
temperature,
|
|
256
|
+
prompt,
|
|
257
|
+
});
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
_normalizeContent(content) {
|
|
261
|
+
if (content === undefined || content === null) {
|
|
262
|
+
return [{ type: 'text', text: '' }];
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
if (Array.isArray(content)) {
|
|
266
|
+
return content.map((item) => {
|
|
267
|
+
if (typeof item === 'string') {
|
|
268
|
+
return { type: 'text', text: item };
|
|
269
|
+
}
|
|
270
|
+
return item;
|
|
271
|
+
});
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
if (typeof content === 'string') {
|
|
275
|
+
return [{ type: 'text', text: content }];
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
return [content];
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
_ensureId(value) {
|
|
282
|
+
if (!value) {
|
|
283
|
+
throw new Error('Identifier value is required');
|
|
284
|
+
}
|
|
285
|
+
if (typeof value === 'string') {
|
|
286
|
+
return value;
|
|
287
|
+
}
|
|
288
|
+
if (typeof value === 'object' && value.id) {
|
|
289
|
+
return value.id;
|
|
290
|
+
}
|
|
291
|
+
throw new Error('Unable to resolve identifier value');
|
|
292
|
+
}
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
module.exports = {
|
|
296
|
+
OpenAIProvider,
|
|
297
|
+
};
|
|
@@ -5,10 +5,21 @@ const { addRecord } = require('../services/airtableService.js');
|
|
|
5
5
|
const runtimeConfig = require('../config/runtimeConfig');
|
|
6
6
|
const llmConfig = require('../config/llmConfig');
|
|
7
7
|
const { BaseAssistant } = require('../assistants/BaseAssistant');
|
|
8
|
+
const { OpenAIProvider } = require('../providers/OpenAIProvider');
|
|
8
9
|
|
|
9
|
-
let llmProvider = null;
|
|
10
10
|
const configureLLMProvider = (provider) => {
|
|
11
|
-
|
|
11
|
+
if (!provider) {
|
|
12
|
+
throw new Error('configureLLMProvider requires an OpenAI provider or raw client');
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
if (provider instanceof OpenAIProvider || typeof provider.runConversation === 'function') {
|
|
16
|
+
llmConfig.setOpenAIProvider(provider);
|
|
17
|
+
return provider;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
const wrappedProvider = new OpenAIProvider({ client: provider });
|
|
21
|
+
llmConfig.setOpenAIProvider(wrappedProvider);
|
|
22
|
+
return wrappedProvider;
|
|
12
23
|
};
|
|
13
24
|
|
|
14
25
|
let assistantConfig = null;
|
|
@@ -29,6 +40,60 @@ const configureAssistants = (config) => {
|
|
|
29
40
|
assistantConfig = config;
|
|
30
41
|
};
|
|
31
42
|
|
|
43
|
+
const runAssistantAndWait = async ({
|
|
44
|
+
thread,
|
|
45
|
+
assistant,
|
|
46
|
+
runConfig = {}
|
|
47
|
+
}) => {
|
|
48
|
+
if (!thread || !thread.thread_id) {
|
|
49
|
+
throw new Error('runAssistantAndWait requires a thread with a valid thread_id');
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
if (!assistant) {
|
|
53
|
+
throw new Error('runAssistantAndWait requires an assistant instance');
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
const provider = llmConfig.requireOpenAIProvider();
|
|
57
|
+
const { polling, ...conversationConfig } = runConfig || {};
|
|
58
|
+
|
|
59
|
+
const run = await provider.runConversation({
|
|
60
|
+
conversationId: thread.thread_id,
|
|
61
|
+
assistantId: thread.assistant_id,
|
|
62
|
+
...conversationConfig,
|
|
63
|
+
});
|
|
64
|
+
|
|
65
|
+
const filter = thread.code ? { code: thread.code, active: true } : null;
|
|
66
|
+
if (filter) {
|
|
67
|
+
await Thread.updateOne(filter, { $set: { run_id: run.id } });
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
const maxRetries = polling?.maxRetries ?? 30;
|
|
71
|
+
let completed = false;
|
|
72
|
+
|
|
73
|
+
try {
|
|
74
|
+
completed = await checkRunStatus(assistant, run.thread_id, run.id, 0, maxRetries);
|
|
75
|
+
} finally {
|
|
76
|
+
if (filter) {
|
|
77
|
+
await Thread.updateOne(filter, { $set: { run_id: null } });
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
let finalRun = run;
|
|
82
|
+
try {
|
|
83
|
+
finalRun = await provider.getRun({ conversationId: run.thread_id, runId: run.id });
|
|
84
|
+
} catch (error) {
|
|
85
|
+
console.warn('Warning: unable to retrieve final run state:', error?.message || error);
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
if (!completed) {
|
|
89
|
+
return { run: finalRun, completed: false, output: '' };
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
const output = await provider.getRunText({ conversationId: run.thread_id, runId: run.id, fallback: '' });
|
|
93
|
+
|
|
94
|
+
return { run: finalRun, completed: true, output };
|
|
95
|
+
};
|
|
96
|
+
|
|
32
97
|
const registerAssistant = (assistantId, definition) => {
|
|
33
98
|
if (!assistantId || typeof assistantId !== 'string') {
|
|
34
99
|
throw new Error('registerAssistant requires a string assistantId');
|
|
@@ -49,10 +114,17 @@ const registerAssistant = (assistantId, definition) => {
|
|
|
49
114
|
|
|
50
115
|
class ConfiguredAssistant extends ParentClass {
|
|
51
116
|
constructor(options = {}) {
|
|
117
|
+
const provider = options.provider || llmConfig.getOpenAIProvider({ instantiate: false });
|
|
118
|
+
const sharedClient = options.client
|
|
119
|
+
|| provider?.getClient?.()
|
|
120
|
+
|| llmConfig.openaiClient
|
|
121
|
+
|| null;
|
|
122
|
+
|
|
52
123
|
super({
|
|
53
124
|
...options,
|
|
54
125
|
assistantId,
|
|
55
|
-
client:
|
|
126
|
+
client: sharedClient,
|
|
127
|
+
provider,
|
|
56
128
|
tools: [...tools, ...(options.tools || [])]
|
|
57
129
|
});
|
|
58
130
|
|
|
@@ -100,20 +172,27 @@ const getAssistantById = (assistant_id, thread) => {
|
|
|
100
172
|
throw new Error(`Assistant '${assistant_id}' not found. Available assistants: ${Object.keys(assistantRegistry).join(', ')}`);
|
|
101
173
|
}
|
|
102
174
|
|
|
103
|
-
const
|
|
175
|
+
const provider = llmConfig.getOpenAIProvider({ instantiate: false });
|
|
176
|
+
const sharedClient = provider?.getClient?.() || llmConfig.openaiClient || null;
|
|
104
177
|
|
|
105
178
|
if (AssistantClass.prototype instanceof BaseAssistant) {
|
|
106
179
|
return new AssistantClass({
|
|
107
180
|
assistantId: assistant_id,
|
|
108
181
|
thread,
|
|
109
|
-
client: sharedClient
|
|
182
|
+
client: sharedClient,
|
|
183
|
+
provider
|
|
110
184
|
});
|
|
111
185
|
}
|
|
112
186
|
|
|
113
187
|
try {
|
|
114
188
|
return new AssistantClass(thread);
|
|
115
189
|
} catch (error) {
|
|
116
|
-
return new AssistantClass({
|
|
190
|
+
return new AssistantClass({
|
|
191
|
+
thread,
|
|
192
|
+
assistantId: assistant_id,
|
|
193
|
+
client: sharedClient,
|
|
194
|
+
provider
|
|
195
|
+
});
|
|
117
196
|
}
|
|
118
197
|
};
|
|
119
198
|
|
|
@@ -135,10 +214,13 @@ const createAssistant = async (code, assistant_id, messages=[], prevThread=null)
|
|
|
135
214
|
const initialThread = await assistant.create(code, curRow[0]);
|
|
136
215
|
|
|
137
216
|
// Add new messages to memory
|
|
217
|
+
const provider = llmConfig.requireOpenAIProvider();
|
|
138
218
|
for (const message of messages) {
|
|
139
|
-
await
|
|
140
|
-
initialThread.id,
|
|
141
|
-
|
|
219
|
+
await provider.addMessage({
|
|
220
|
+
conversationId: initialThread.id,
|
|
221
|
+
role: 'assistant',
|
|
222
|
+
content: message
|
|
223
|
+
});
|
|
142
224
|
}
|
|
143
225
|
|
|
144
226
|
// Define new thread data
|
|
@@ -159,7 +241,7 @@ const createAssistant = async (code, assistant_id, messages=[], prevThread=null)
|
|
|
159
241
|
|
|
160
242
|
// Delete previous thread
|
|
161
243
|
if (prevThread) {
|
|
162
|
-
await
|
|
244
|
+
await provider.deleteConversation(prevThread.thread_id);
|
|
163
245
|
}
|
|
164
246
|
|
|
165
247
|
return thread;
|
|
@@ -171,32 +253,24 @@ const addMsgAssistant = async (code, inMessages, reply = false) => {
|
|
|
171
253
|
console.log(thread);
|
|
172
254
|
if (thread === null) return null;
|
|
173
255
|
|
|
256
|
+
const provider = llmConfig.requireOpenAIProvider();
|
|
257
|
+
|
|
174
258
|
for (const message of inMessages) {
|
|
175
259
|
console.log(message);
|
|
176
|
-
await
|
|
177
|
-
thread.thread_id,
|
|
178
|
-
|
|
260
|
+
await provider.addMessage({
|
|
261
|
+
conversationId: thread.thread_id,
|
|
262
|
+
role: 'assistant',
|
|
263
|
+
content: message
|
|
264
|
+
});
|
|
179
265
|
}
|
|
180
266
|
|
|
181
267
|
if (!reply) return null;
|
|
182
268
|
|
|
183
269
|
const assistant = getAssistantById(thread.assistant_id, thread);
|
|
184
|
-
const
|
|
185
|
-
|
|
186
|
-
{
|
|
187
|
-
assistant_id: thread.assistant_id
|
|
188
|
-
}
|
|
189
|
-
);
|
|
190
|
-
|
|
191
|
-
await Thread.updateOne({ code: thread.code, active: true }, { $set: { run_id: run.id } });
|
|
192
|
-
await checkRunStatus(assistant, run.thread_id, run.id);
|
|
193
|
-
await Thread.updateOne({ code: thread.code, active: true }, { $set: { run_id: null } });
|
|
270
|
+
const { output } = await runAssistantAndWait({ thread, assistant });
|
|
271
|
+
console.log('THE ANS IS', output);
|
|
194
272
|
|
|
195
|
-
|
|
196
|
-
const ans = messages.data[0].content[0].text.value;
|
|
197
|
-
console.log('THE ANS IS', ans);
|
|
198
|
-
|
|
199
|
-
return ans;
|
|
273
|
+
return output;
|
|
200
274
|
} catch (error) {
|
|
201
275
|
console.log(error);
|
|
202
276
|
return null;
|
|
@@ -210,25 +284,19 @@ const addInsAssistant = async (code, instruction) => {
|
|
|
210
284
|
if (thread === null) return null;
|
|
211
285
|
|
|
212
286
|
const assistant = getAssistantById(thread.assistant_id, thread);
|
|
213
|
-
const
|
|
214
|
-
thread
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
287
|
+
const { output } = await runAssistantAndWait({
|
|
288
|
+
thread,
|
|
289
|
+
assistant,
|
|
290
|
+
runConfig: {
|
|
291
|
+
additionalInstructions: instruction,
|
|
292
|
+
additionalMessages: [
|
|
218
293
|
{ role: 'user', content: instruction }
|
|
219
294
|
]
|
|
220
295
|
}
|
|
221
|
-
);
|
|
222
|
-
|
|
223
|
-
await Thread.updateOne({ code: thread.code, active: true }, { $set: { run_id: run.id } });
|
|
224
|
-
await checkRunStatus(assistant, run.thread_id, run.id);
|
|
225
|
-
await Thread.updateOne({ code: thread.code, active: true }, { $set: { run_id: null } });
|
|
296
|
+
});
|
|
297
|
+
console.log('RUN RESPONSE', output);
|
|
226
298
|
|
|
227
|
-
|
|
228
|
-
console.log(messages.data[0].content);
|
|
229
|
-
const ans = messages.data[0].content[0].text.value;
|
|
230
|
-
|
|
231
|
-
return ans;
|
|
299
|
+
return output;
|
|
232
300
|
} catch (error) {
|
|
233
301
|
console.log(error);
|
|
234
302
|
return null;
|
|
@@ -250,9 +318,11 @@ const getThread = async (code, message = null) => {
|
|
|
250
318
|
return null;
|
|
251
319
|
}
|
|
252
320
|
|
|
321
|
+
const provider = llmConfig.getOpenAIProvider({ instantiate: false });
|
|
253
322
|
while (thread && thread.run_id) {
|
|
254
323
|
console.log(`Wait for ${thread.run_id} to be executed`);
|
|
255
|
-
const
|
|
324
|
+
const activeProvider = provider || llmConfig.requireOpenAIProvider();
|
|
325
|
+
const run = await activeProvider.getRun({ conversationId: thread.thread_id, runId: thread.run_id });
|
|
256
326
|
if (run.status === 'cancelled' || run.status === 'expired' || run.status === 'completed') {
|
|
257
327
|
await Thread.updateOne({ code: code }, { $set: { run_id: null } });
|
|
258
328
|
}
|
|
@@ -283,18 +353,22 @@ const replyAssistant = async function (code, message_ = null, thread_ = null, ru
|
|
|
283
353
|
console.log('THREAD STOPPED', code, thread?.active);
|
|
284
354
|
if (!thread || !thread.active) return null;
|
|
285
355
|
|
|
286
|
-
const patientReply = await getLastMessages(code);
|
|
356
|
+
const patientReply = message_ ? [message_] : await getLastMessages(code);
|
|
287
357
|
console.log('UNREAD DATA', patientReply);
|
|
288
358
|
if (!patientReply) {
|
|
289
359
|
console.log('No relevant data found for this assistant.');
|
|
290
360
|
return null;
|
|
291
361
|
}
|
|
292
362
|
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
363
|
+
const provider = llmConfig.requireOpenAIProvider();
|
|
364
|
+
|
|
365
|
+
let activeRuns = await provider.listRuns({ conversationId: thread.thread_id });
|
|
366
|
+
let activeRunsCount = activeRuns?.data?.length || 0;
|
|
367
|
+
console.log('ACTIVE RUNS:', activeRunsCount);
|
|
368
|
+
while (activeRunsCount > 0) {
|
|
296
369
|
console.log(`ACTIVE RUNS ${thread.thread_id}`);
|
|
297
|
-
activeRuns = await
|
|
370
|
+
activeRuns = await provider.listRuns({ conversationId: thread.thread_id });
|
|
371
|
+
activeRunsCount = activeRuns?.data?.length || 0;
|
|
298
372
|
await delay(5000);
|
|
299
373
|
}
|
|
300
374
|
|
|
@@ -335,27 +409,18 @@ const replyAssistant = async function (code, message_ = null, thread_ = null, ru
|
|
|
335
409
|
if (!patientMsg || !thread || thread?.stopped) return null;
|
|
336
410
|
|
|
337
411
|
const assistant = getAssistantById(thread.assistant_id, thread);
|
|
338
|
-
|
|
339
|
-
thread.thread_id,
|
|
340
|
-
{
|
|
341
|
-
assistant_id: thread.assistant_id,
|
|
342
|
-
...runOptions
|
|
343
|
-
}
|
|
344
|
-
);
|
|
345
|
-
console.log('RUN LAST ERROR:', run.last_error);
|
|
346
|
-
|
|
347
|
-
assistant.set_replies(patientReply);
|
|
348
|
-
|
|
349
|
-
await Thread.updateOne({ code: thread.code, active: true }, { $set: { run_id: run.id } });
|
|
350
|
-
const runStatus = await checkRunStatus(assistant, run.thread_id, run.id);
|
|
351
|
-
console.log('RUN STATUS', runStatus);
|
|
352
|
-
await Thread.updateOne({ code: thread.code, active: true }, { $set: { run_id: null } });
|
|
412
|
+
assistant.setReplies(patientReply);
|
|
353
413
|
|
|
354
|
-
const
|
|
355
|
-
|
|
356
|
-
|
|
414
|
+
const { run, output, completed } = await runAssistantAndWait({
|
|
415
|
+
thread,
|
|
416
|
+
assistant,
|
|
417
|
+
runConfig: runOptions
|
|
418
|
+
});
|
|
419
|
+
console.log('RUN LAST ERROR:', run?.last_error);
|
|
420
|
+
console.log('RUN STATUS', completed);
|
|
421
|
+
console.log(output);
|
|
357
422
|
|
|
358
|
-
return
|
|
423
|
+
return output;
|
|
359
424
|
} catch (err) {
|
|
360
425
|
console.log(`Error inside reply assistant ${err} ${code}`);
|
|
361
426
|
}
|
|
@@ -386,5 +451,6 @@ module.exports = {
|
|
|
386
451
|
configureAssistants,
|
|
387
452
|
registerAssistant,
|
|
388
453
|
configureLLMProvider,
|
|
389
|
-
overrideGetAssistantById
|
|
454
|
+
overrideGetAssistantById,
|
|
455
|
+
runAssistantAndWait
|
|
390
456
|
};
|
|
@@ -1,21 +1,19 @@
|
|
|
1
|
-
const
|
|
1
|
+
const { OpenAIProvider } = require('../providers/OpenAIProvider');
|
|
2
2
|
|
|
3
3
|
/**
|
|
4
4
|
* Default LLM Provider using OpenAI
|
|
5
5
|
*/
|
|
6
6
|
class DefaultLLMProvider {
|
|
7
7
|
constructor(config = {}) {
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
if (!apiKey) {
|
|
11
|
-
throw new Error('OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass apiKey in config.');
|
|
12
|
-
}
|
|
8
|
+
this.provider = new OpenAIProvider(config);
|
|
9
|
+
}
|
|
13
10
|
|
|
14
|
-
|
|
11
|
+
getProvider() {
|
|
12
|
+
return this.provider;
|
|
15
13
|
}
|
|
16
14
|
|
|
17
15
|
getClient() {
|
|
18
|
-
return this.
|
|
16
|
+
return this.provider.getClient();
|
|
19
17
|
}
|
|
20
18
|
}
|
|
21
19
|
|