@peopl-health/nexus 1.7.12 → 2.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,11 +1,11 @@
1
1
  const { OpenAI } = require('openai');
2
2
 
3
3
  /**
4
- * Wrapper around the OpenAI SDK that exposes a higher level interface for
5
- * common operations while remaining compatible with existing callers that
6
- * expect the raw SDK surface (e.g. `.beta`).
4
+ * Provider wrapper that targets the legacy Assistants (threads/runs) API surface.
5
+ * Consumers can continue using the existing helper methods while we gradually
6
+ * migrate to the new Responses API.
7
7
  */
8
- class OpenAIProvider {
8
+ class OpenAIAssistantsProvider {
9
9
  constructor(options = {}) {
10
10
  const {
11
11
  apiKey = process.env.OPENAI_API_KEY,
@@ -15,7 +15,7 @@ class OpenAIProvider {
15
15
  } = options;
16
16
 
17
17
  if (!client && !apiKey) {
18
- throw new Error('OpenAIProvider requires an API key or a preconfigured client');
18
+ throw new Error('OpenAIAssistantsProvider requires an API key or a preconfigured client');
19
19
  }
20
20
 
21
21
  this.client = client || new OpenAI({ apiKey, organization });
@@ -27,12 +27,11 @@ class OpenAIProvider {
27
27
  ...defaultModels,
28
28
  };
29
29
 
30
- // Expose raw SDK sub-clients for backward compatibility
31
- this.beta = this.client.beta;
32
- this.responses = this.client.responses;
33
- this.chat = this.client.chat;
34
- this.audio = this.client.audio;
35
- this.files = this.client.files;
30
+ this.variant = 'assistants';
31
+ }
32
+
33
+ getVariant() {
34
+ return this.variant;
36
35
  }
37
36
 
38
37
  getClient() {
@@ -47,24 +46,23 @@ class OpenAIProvider {
47
46
 
48
47
  if (Array.isArray(messages) && messages.length > 0) {
49
48
  for (const message of messages) {
50
- await this.addMessage({ conversationId: thread.id, ...message });
49
+ await this.addMessage({ threadId: thread.id, ...message });
51
50
  }
52
51
  }
53
52
 
54
53
  return thread;
55
54
  }
56
55
 
57
- async deleteConversation(conversationId) {
58
- await this.client.beta.threads.del(this._ensureId(conversationId));
56
+ async deleteConversation(threadId) {
57
+ await this.client.beta.threads.del(this._ensureId(threadId));
59
58
  }
60
59
 
61
- async addMessage({ conversationId, role = 'user', content, attachments = [], metadata }) {
60
+ async addMessage({ threadId, role = 'user', content, metadata }) {
62
61
  const formattedContent = this._normalizeContent(content);
63
62
 
64
63
  const payload = {
65
64
  role,
66
65
  content: formattedContent,
67
- attachments,
68
66
  metadata,
69
67
  };
70
68
 
@@ -72,15 +70,11 @@ class OpenAIProvider {
72
70
  delete payload.metadata;
73
71
  }
74
72
 
75
- if (!payload.attachments || payload.attachments.length === 0) {
76
- delete payload.attachments;
77
- }
78
-
79
- return this.client.beta.threads.messages.create(this._ensureId(conversationId), payload);
73
+ return this.client.beta.threads.messages.create(this._ensureId(threadId), payload);
80
74
  }
81
75
 
82
- async listMessages({ conversationId, runId, order = 'desc', limit } = {}) {
83
- return this.client.beta.threads.messages.list(this._ensureId(conversationId), {
76
+ async listMessages({ threadId, runId, order = 'desc', limit } = {}) {
77
+ return this.client.beta.threads.messages.list(this._ensureId(threadId), {
84
78
  run_id: runId,
85
79
  order,
86
80
  limit,
@@ -88,13 +82,13 @@ class OpenAIProvider {
88
82
  }
89
83
 
90
84
  async getRunText({
91
- conversationId,
85
+ threadId,
92
86
  runId,
93
87
  messageIndex = 0,
94
88
  contentIndex = 0,
95
89
  fallback = '',
96
90
  } = {}) {
97
- const messages = await this.listMessages({ conversationId, runId });
91
+ const messages = await this.listMessages({ threadId, runId });
98
92
  const message = messages?.data?.[messageIndex];
99
93
  const content = message?.content?.[contentIndex];
100
94
 
@@ -114,7 +108,7 @@ class OpenAIProvider {
114
108
  }
115
109
 
116
110
  async runConversation({
117
- conversationId,
111
+ threadId,
118
112
  assistantId,
119
113
  instructions,
120
114
  additionalMessages = [],
@@ -157,85 +151,48 @@ class OpenAIProvider {
157
151
  }
158
152
  });
159
153
 
160
- return this.client.beta.threads.runs.create(this._ensureId(conversationId), payload);
154
+ return this.client.beta.threads.runs.create(this._ensureId(threadId), payload);
161
155
  }
162
156
 
163
- async getRun({ conversationId, runId }) {
164
- return this.client.beta.threads.runs.retrieve(this._ensureId(conversationId), this._ensureId(runId));
157
+ async getRun({ threadId, runId }) {
158
+ const run = await this.client.beta.threads.runs.retrieve(this._ensureId(runId), { thread_id: this._ensureId(threadId) });
159
+ return run;
165
160
  }
166
161
 
167
- async listRuns({ conversationId, limit, order = 'desc', activeOnly = false } = {}) {
168
- const runs = await this.client.beta.threads.runs.list(this._ensureId(conversationId), {
162
+ async listRuns({ threadId, limit, order = 'desc', activeOnly = false } = {}) {
163
+ const runs = await this.client.beta.threads.runs.list(this._ensureId(threadId), {
169
164
  limit,
170
165
  order,
171
166
  });
172
-
167
+
173
168
  if (activeOnly) {
174
169
  const activeStatuses = ['in_progress', 'queued', 'requires_action'];
175
170
  return {
176
171
  ...runs,
177
- data: runs.data.filter(run => activeStatuses.includes(run.status))
172
+ data: runs.data.filter((run) => activeStatuses.includes(run.status)),
178
173
  };
179
174
  }
180
-
175
+
181
176
  return runs;
182
177
  }
183
178
 
184
- async submitToolOutputs({ conversationId, runId, toolOutputs }) {
179
+ async submitToolOutputs({ threadId, runId, toolOutputs }) {
185
180
  return this.client.beta.threads.runs.submitToolOutputs(
186
- this._ensureId(conversationId),
187
181
  this._ensureId(runId),
188
- { tool_outputs: toolOutputs }
182
+ {
183
+ thread_id: this._ensureId(threadId) ,
184
+ tool_outputs: toolOutputs
185
+ }
189
186
  );
190
187
  }
191
188
 
192
- async cancelRun({ conversationId, runId }) {
189
+ async cancelRun({ threadId, runId }) {
193
190
  return this.client.beta.threads.runs.cancel(
194
- this._ensureId(conversationId),
195
- this._ensureId(runId)
191
+ this._ensureId(runId),
192
+ { thread_id: this._ensureId(threadId) }
196
193
  );
197
194
  }
198
195
 
199
- async runPrompt({
200
- model,
201
- input,
202
- instructions,
203
- reasoningEffort = this.defaults.reasoningEffort,
204
- responseFormat,
205
- metadata,
206
- temperature,
207
- maxOutputTokens,
208
- tools,
209
- } = {}) {
210
- const payload = {
211
- model: model || this.defaults.responseModel,
212
- input,
213
- instructions,
214
- reasoning: reasoningEffort ? { effort: reasoningEffort } : undefined,
215
- text: responseFormat ? { format: responseFormat } : undefined,
216
- metadata,
217
- temperature,
218
- max_output_tokens: maxOutputTokens,
219
- tools,
220
- };
221
-
222
- if (Array.isArray(payload.tools) && payload.tools.length === 0) {
223
- delete payload.tools;
224
- }
225
-
226
- if (payload.metadata && Object.keys(payload.metadata).length === 0) {
227
- delete payload.metadata;
228
- }
229
-
230
- Object.keys(payload).forEach((key) => {
231
- if (payload[key] === undefined || payload[key] === null) {
232
- delete payload[key];
233
- }
234
- });
235
-
236
- return this.client.responses.create(payload);
237
- }
238
-
239
196
  async createChatCompletion({ model, messages, temperature, maxTokens, topP, metadata, responseFormat } = {}) {
240
197
  return this.client.chat.completions.create({
241
198
  model: model || this.defaults.chatModel,
@@ -303,5 +260,5 @@ class OpenAIProvider {
303
260
  }
304
261
 
305
262
  module.exports = {
306
- OpenAIProvider,
263
+ OpenAIAssistantsProvider,
307
264
  };
@@ -0,0 +1,380 @@
1
+ const { OpenAI } = require('openai');
2
+
3
+ const CONVERSATION_PREFIX = 'conv_';
4
+ const RESPONSE_PREFIX = 'resp_';
5
+
6
+ /**
7
+ * Provider wrapper that targets the Conversations + Responses API surface.
8
+ */
9
+ class OpenAIResponsesProvider {
10
+ constructor(options = {}) {
11
+ const {
12
+ apiKey = process.env.OPENAI_API_KEY,
13
+ organization,
14
+ client,
15
+ defaultModels = {},
16
+ } = options;
17
+
18
+ if (!client && !apiKey) {
19
+ throw new Error('OpenAIResponsesProvider requires an API key or a preconfigured client');
20
+ }
21
+
22
+ this.client = client || new OpenAI({ apiKey, organization });
23
+ this.defaults = {
24
+ responseModel: 'o4-mini',
25
+ chatModel: 'gpt-4o-mini',
26
+ transcriptionModel: 'whisper-1',
27
+ reasoningEffort: 'medium',
28
+ ...defaultModels,
29
+ };
30
+
31
+ this.variant = 'responses';
32
+
33
+ this.responses = this.client.responses;
34
+ this.conversations = this.client.conversations;
35
+ }
36
+
37
+ getVariant() {
38
+ return this.variant;
39
+ }
40
+
41
+ getClient() {
42
+ return this.client;
43
+ }
44
+
45
+ /**
46
+ * Conversations helpers
47
+ */
48
+ async createConversation({ metadata, messages = [], toolResources } = {}) {
49
+ const payload = this._cleanObject({
50
+ metadata,
51
+ items: this._conversationItems(messages),
52
+ tool_resources: toolResources,
53
+ });
54
+
55
+ if (this.conversations && typeof this.conversations.create === 'function') {
56
+ return await this.conversations.create(payload);
57
+ }
58
+
59
+ return await this._post('/conversations', payload);
60
+ }
61
+
62
+ async deleteConversation(threadId) {
63
+ const id = this._ensurethreadId(threadId);
64
+ if (this.conversations && typeof this.conversations.del === 'function') {
65
+ return await this.conversations.del(id);
66
+ }
67
+ return await this._delete(`/conversations/${id}`);
68
+ }
69
+
70
+ async addMessage({ threadId, role = 'user', content, metadata }) {
71
+ const id = this._ensurethreadId(threadId);
72
+ const payload = this._cleanObject({
73
+ role,
74
+ content: this._normalizeContent(role, content),
75
+ type: 'message',
76
+ });
77
+ console.log('payload', payload);
78
+
79
+ if (payload.content) {
80
+ if (this.conversations?.items?.create) {
81
+ return await this.conversations.items.create(id, {items: [payload]});
82
+ }
83
+ return await this._post(`/conversations/${id}/items`, {items: [payload]});
84
+ }
85
+ }
86
+
87
+ async listMessages({ threadId, order = 'desc', limit } = {}) {
88
+ const id = this._ensurethreadId(threadId);
89
+ const query = this._cleanObject({ order, limit });
90
+
91
+ if (this.conversations?.items && typeof this.conversations.items.list === 'function') {
92
+ return await this.conversations.items.list(id, query);
93
+ }
94
+
95
+ return await this._get(`/conversations/${id}/items`, query);
96
+ }
97
+
98
+ async runConversation({
99
+ assistantId,
100
+ threadId,
101
+ conversationId,
102
+ promptId,
103
+ instructions,
104
+ additionalMessages = [],
105
+ additionalInstructions,
106
+ metadata,
107
+ topP,
108
+ temperature,
109
+ maxOutputTokens,
110
+ truncationStrategy,
111
+ tools = [],
112
+ } = {}) {
113
+ const id = this._ensurethreadId(conversationId || threadId);
114
+
115
+ const payload = this._cleanObject({
116
+ conversation: id,
117
+ prompt: { id: promptId },
118
+ instructions: additionalInstructions || instructions,
119
+ input: this._responseInput(additionalMessages),
120
+ metadata,
121
+ top_p: topP,
122
+ temperature,
123
+ max_output_tokens: maxOutputTokens,
124
+ truncation_strategy: truncationStrategy,
125
+ tools: Array.isArray(tools) && tools.length ? tools : undefined,
126
+ });
127
+
128
+ const response = await this.client.responses.create(payload);
129
+ return {
130
+ ...response,
131
+ thread_id: id,
132
+ assistant_id: assistantId,
133
+ object: response.object || 'response',
134
+ };
135
+ }
136
+
137
+ async getRun({ threadId, runId }) {
138
+ const id = this._ensureResponseId(runId);
139
+ return await this.client.responses.retrieve(id);
140
+ }
141
+
142
+ async listRuns({ threadId, limit, order = 'desc', activeOnly = false } = {}) {
143
+ const id = this._ensurethreadId(threadId);
144
+ const query = this._cleanObject({ conversation: id, limit, order });
145
+
146
+ let responseList;
147
+ try {
148
+ responseList = await this._get('/responses', query);
149
+ } catch (error) {
150
+ console.warn('[OpenAIResponsesProvider] Failed to list responses:', error?.message || error);
151
+ responseList = { data: [] };
152
+ }
153
+
154
+ if (!activeOnly) {
155
+ return responseList;
156
+ }
157
+
158
+ const activeStatuses = ['queued', 'in_progress', 'processing', 'requires_action'];
159
+ const data = Array.isArray(responseList?.data)
160
+ ? responseList.data.filter((run) => activeStatuses.includes(run.status))
161
+ : [];
162
+
163
+ return { ...responseList, data };
164
+ }
165
+
166
+ async submitToolOutputs({ threadId, runId, toolOutputs }) {
167
+ const responseId = this._ensureResponseId(runId);
168
+ return await this._post(`/responses/${responseId}/submit_tool_outputs`, {
169
+ tool_outputs: toolOutputs,
170
+ });
171
+ }
172
+
173
+ async cancelRun({ threadId, runId }) {
174
+ const responseId = this._ensureResponseId(runId);
175
+ return this._post(`/responses/${responseId}/cancel`);
176
+ }
177
+
178
+ async getRunText({
179
+ threadId,
180
+ runId,
181
+ messageIndex = 0,
182
+ contentIndex = 0,
183
+ fallback = '',
184
+ } = {}) {
185
+ const response = await this.client.responses.retrieve(this._ensureResponseId(runId));
186
+ if (!response) return fallback;
187
+
188
+ if (response.output_text) {
189
+ return response.output_text;
190
+ }
191
+
192
+ const output = Array.isArray(response.output) ? response.output : [];
193
+ const item = output[messageIndex];
194
+ if (!item || !Array.isArray(item.content)) {
195
+ return fallback;
196
+ }
197
+ const content = item.content[contentIndex];
198
+ if (content?.type === 'output_text' && typeof content.text === 'string') {
199
+ return content.text;
200
+ }
201
+
202
+ return fallback;
203
+ }
204
+
205
+ /**
206
+ * Generic helpers
207
+ */
208
+
209
+ async createChatCompletion({ model, messages, temperature, maxTokens, topP, metadata, responseFormat } = {}) {
210
+ return this.client.chat.completions.create({
211
+ model: model || this.defaults.chatModel,
212
+ messages,
213
+ temperature,
214
+ max_tokens: maxTokens,
215
+ top_p: topP,
216
+ metadata,
217
+ response_format: responseFormat,
218
+ });
219
+ }
220
+
221
+ async uploadFile({ file, purpose }) {
222
+ if (!file) {
223
+ throw new Error('uploadFile requires a readable file stream or object');
224
+ }
225
+
226
+ return this.client.files.create({ file, purpose: purpose || 'assistants' });
227
+ }
228
+
229
+ async transcribeAudio({ file, model, language, responseFormat, temperature, prompt } = {}) {
230
+ return this.client.audio.transcriptions.create({
231
+ model: model || this.defaults.transcriptionModel,
232
+ file,
233
+ language,
234
+ response_format: responseFormat,
235
+ temperature,
236
+ prompt,
237
+ });
238
+ }
239
+
240
+ /**
241
+ * Internal helpers
242
+ */
243
+ _ensurethreadId(value) {
244
+ const id = this._ensureId(value);
245
+ if (!id.startsWith(CONVERSATION_PREFIX)) {
246
+ throw new Error(`Expected conversation id to start with '${CONVERSATION_PREFIX}'`);
247
+ }
248
+ return id;
249
+ }
250
+
251
+ _ensureResponseId(value) {
252
+ const id = this._ensureId(value);
253
+ if (!id.startsWith(RESPONSE_PREFIX)) {
254
+ throw new Error(`Expected response id to start with '${RESPONSE_PREFIX}'`);
255
+ }
256
+ return id;
257
+ }
258
+
259
+ _ensureId(value) {
260
+ if (!value) {
261
+ throw new Error('Identifier value is required');
262
+ }
263
+ if (typeof value === 'string') {
264
+ return value;
265
+ }
266
+ if (typeof value === 'object' && value.id) {
267
+ return value.id;
268
+ }
269
+ throw new Error('Unable to resolve identifier value');
270
+ }
271
+
272
+ _cleanObject(payload) {
273
+ if (payload === undefined || payload === null) {
274
+ return undefined;
275
+ }
276
+
277
+ if (Array.isArray(payload)) {
278
+ return payload
279
+ .map((entry) => this._cleanObject(entry))
280
+ .filter((entry) => !(entry === undefined || entry === null || (typeof entry === 'object' && !Array.isArray(entry) && Object.keys(entry).length === 0)));
281
+ }
282
+
283
+ if (typeof payload !== 'object') {
284
+ return payload;
285
+ }
286
+
287
+ const cleaned = { ...payload };
288
+ Object.keys(cleaned).forEach((key) => {
289
+ const value = cleaned[key];
290
+ if (value === undefined || value === null) {
291
+ delete cleaned[key];
292
+ return;
293
+ }
294
+
295
+ if (typeof value === 'object') {
296
+ const nested = this._cleanObject(value);
297
+ if (nested === undefined || nested === null || (typeof nested === 'object' && !Array.isArray(nested) && Object.keys(nested).length === 0)) {
298
+ delete cleaned[key];
299
+ } else {
300
+ cleaned[key] = nested;
301
+ }
302
+ }
303
+ });
304
+ return cleaned;
305
+ }
306
+
307
+ _normalizeContent(role, content) {
308
+ const normalized = this._normalizeContentParts(content);
309
+ return normalized.map((part) => {
310
+ if (part.type === 'text') {
311
+ return { type: role === 'assistant' ? 'output_text' : 'input_text', text: part.text };
312
+ } else if (part.type === 'image_file') {
313
+ return { type: 'input_image', file_id: part?.image_file?.file_id };
314
+ }
315
+ return part;
316
+ });
317
+ }
318
+
319
+ _normalizeContentParts(content) {
320
+ if (content === undefined || content === null) {
321
+ return [{ type: 'text', text: '' }];
322
+ }
323
+
324
+ if (Array.isArray(content)) {
325
+ return content.map((item) => {
326
+ if (typeof item === 'string') {
327
+ return { type: 'text', text: item };
328
+ }
329
+ return item;
330
+ });
331
+ }
332
+
333
+ if (typeof content === 'string') {
334
+ return [{ type: 'text', text: content }];
335
+ }
336
+
337
+ return [content];
338
+ }
339
+
340
+ _conversationItems(messages = []) {
341
+ if (!Array.isArray(messages)) return [];
342
+ return messages
343
+ .map((message) => {
344
+ if (!message) return null;
345
+ const role = message.role || 'user';
346
+ return {
347
+ role,
348
+ content: this._normalizeContent(role, message.content),
349
+ };
350
+ })
351
+ .filter(Boolean);
352
+ }
353
+
354
+ _responseInput(messages = []) {
355
+ const items = this._conversationItems(messages);
356
+ return items.length ? items : undefined;
357
+ }
358
+
359
+ async _post(path, body, options = {}) {
360
+ return this.client.post(path, {
361
+ ...options,
362
+ body: this._cleanObject(body),
363
+ });
364
+ }
365
+
366
+ async _get(path, query, options = {}) {
367
+ return this.client.get(path, {
368
+ ...options,
369
+ query: this._cleanObject(query),
370
+ });
371
+ }
372
+
373
+ async _delete(path, options = {}) {
374
+ return this.client.delete(path, options);
375
+ }
376
+ }
377
+
378
+ module.exports = {
379
+ OpenAIResponsesProvider,
380
+ };
@@ -0,0 +1,24 @@
1
+ const { OpenAIAssistantsProvider } = require('./OpenAIAssistantsProvider');
2
+ const { OpenAIResponsesProvider } = require('./OpenAIResponsesProvider');
3
+
4
+ const PROVIDER_VARIANTS = {
5
+ assistants: OpenAIAssistantsProvider,
6
+ responses: OpenAIResponsesProvider,
7
+ };
8
+
9
+ /**
10
+ * Returns the appropriate OpenAI provider implementation for the requested variant.
11
+ */
12
+ function createProvider(config = {}) {
13
+ const variant = (config.variant || config.providerVariant || config.llmVariant || 'assistants')
14
+ .toString()
15
+ .toLowerCase();
16
+
17
+ const ProviderClass = PROVIDER_VARIANTS[variant] || OpenAIAssistantsProvider;
18
+ return new ProviderClass({ ...config, variant });
19
+ }
20
+
21
+ module.exports = {
22
+ createProvider,
23
+ PROVIDER_VARIANTS,
24
+ };