@peopl-health/nexus 2.5.5-message-tracking → 2.5.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/assistants/BaseAssistant.js +11 -5
- package/lib/controllers/assistantController.js +1 -4
- package/lib/controllers/conversationController.js +1 -2
- package/lib/helpers/assistantHelper.js +30 -117
- package/lib/providers/OpenAIAssistantsProvider.js +122 -121
- package/lib/providers/OpenAIResponsesProvider.js +168 -398
- package/lib/providers/OpenAIResponsesProviderTools.js +49 -96
- package/lib/services/airtableService.js +1 -1
- package/lib/services/assistantServiceCore.js +61 -27
- package/lib/templates/templateStructure.js +1 -1
- package/lib/utils/retryHelper.js +3 -2
- package/package.json +1 -1
|
@@ -1,15 +1,15 @@
|
|
|
1
1
|
const { OpenAI } = require('openai');
|
|
2
|
+
const { Thread } = require('../models/threadModel');
|
|
2
3
|
const { retryWithBackoff } = require('../utils/retryHelper');
|
|
3
4
|
const {
|
|
5
|
+
handleFunctionCalls: handleFunctionCallsUtil,
|
|
4
6
|
handlePendingFunctionCalls: handlePendingFunctionCallsUtil,
|
|
5
|
-
handleRequiresAction: handleRequiresActionUtil,
|
|
6
7
|
transformToolsForResponsesAPI: transformToolsForResponsesAPIUtil
|
|
7
8
|
} = require('./OpenAIResponsesProviderTools');
|
|
8
9
|
const { logger } = require('../utils/logger');
|
|
9
10
|
|
|
10
11
|
const CONVERSATION_PREFIX = 'conv_';
|
|
11
12
|
const RESPONSE_PREFIX = 'resp_';
|
|
12
|
-
const DEFAULT_MAX_RETRIES = parseInt(process.env.MAX_RETRIES || '10', 10);
|
|
13
13
|
const MAX_ITEMS_ON_CREATE = 20;
|
|
14
14
|
const MAX_ITEMS_PER_BATCH = 20;
|
|
15
15
|
const DEFAULT_MAX_HISTORICAL_MESSAGES = parseInt(process.env.MAX_HISTORICAL_MESSAGES || '50', 10);
|
|
@@ -55,26 +55,7 @@ class OpenAIResponsesProvider {
|
|
|
55
55
|
}
|
|
56
56
|
|
|
57
57
|
/**
|
|
58
|
-
*
|
|
59
|
-
* @private
|
|
60
|
-
*/
|
|
61
|
-
async _retryWithBackoff(operation, options = {}) {
|
|
62
|
-
return retryWithBackoff(operation, {
|
|
63
|
-
...options,
|
|
64
|
-
providerName: PROVIDER_NAME,
|
|
65
|
-
});
|
|
66
|
-
}
|
|
67
|
-
|
|
68
|
-
/**
|
|
69
|
-
* @deprecated Use _retryWithBackoff instead
|
|
70
|
-
* Kept for backward compatibility
|
|
71
|
-
*/
|
|
72
|
-
async _retryWithRateLimit(operation, maxRetries = 3, retryCount = 0) {
|
|
73
|
-
return this._retryWithBackoff(operation, { maxRetries, retryCount });
|
|
74
|
-
}
|
|
75
|
-
|
|
76
|
-
/**
|
|
77
|
-
* Conversations helpers
|
|
58
|
+
* Create Conversation
|
|
78
59
|
*/
|
|
79
60
|
async createConversation({ metadata, messages = [], toolResources } = {}) {
|
|
80
61
|
const messagesToProcess = messages.length > DEFAULT_MAX_HISTORICAL_MESSAGES
|
|
@@ -85,52 +66,16 @@ class OpenAIResponsesProvider {
|
|
|
85
66
|
logger.warn(`[OpenAIResponsesProvider] Capped ${messages.length} → ${DEFAULT_MAX_HISTORICAL_MESSAGES} messages`);
|
|
86
67
|
}
|
|
87
68
|
|
|
88
|
-
const allItems = this.
|
|
89
|
-
const totalItems = allItems.length;
|
|
90
|
-
|
|
91
|
-
// Create empty conversation if no messages
|
|
92
|
-
if (totalItems === 0) {
|
|
93
|
-
const payload = this._cleanObject({
|
|
94
|
-
metadata,
|
|
95
|
-
tool_resources: toolResources,
|
|
96
|
-
});
|
|
97
|
-
|
|
98
|
-
return this._retryWithRateLimit(async () => {
|
|
99
|
-
if (this.conversations && typeof this.conversations.create === 'function') {
|
|
100
|
-
return await this.conversations.create(payload);
|
|
101
|
-
}
|
|
102
|
-
return await this._post('/conversations', payload);
|
|
103
|
-
});
|
|
104
|
-
}
|
|
105
|
-
|
|
106
|
-
// Split items: first batch for initial creation, rest to add in batches
|
|
69
|
+
const allItems = this._convertItemsToApiFormat(messagesToProcess);
|
|
107
70
|
const initialItems = allItems.slice(0, MAX_ITEMS_ON_CREATE);
|
|
108
71
|
const remainingItems = allItems.slice(MAX_ITEMS_ON_CREATE);
|
|
109
|
-
const totalBatches = Math.ceil(remainingItems.length / MAX_ITEMS_PER_BATCH);
|
|
110
72
|
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
const payload = this._cleanObject({
|
|
115
|
-
metadata,
|
|
116
|
-
items: initialItems,
|
|
117
|
-
tool_resources: toolResources,
|
|
118
|
-
});
|
|
119
|
-
|
|
120
|
-
let conversation;
|
|
121
|
-
try {
|
|
122
|
-
conversation = await this._retryWithRateLimit(async () => {
|
|
123
|
-
if (this.conversations && typeof this.conversations.create === 'function') {
|
|
124
|
-
return await this.conversations.create(payload);
|
|
125
|
-
}
|
|
126
|
-
return await this._post('/conversations', payload);
|
|
127
|
-
});
|
|
128
|
-
} catch (error) {
|
|
129
|
-
logger.error('[OpenAIResponsesProvider] Failed to create conversation:', error?.message || error);
|
|
130
|
-
throw error;
|
|
73
|
+
if (remainingItems.length > 0) {
|
|
74
|
+
logger.info(`[OpenAIResponsesProvider] Batching ${allItems.length} messages: ${initialItems.length} + ${remainingItems.length}`);
|
|
131
75
|
}
|
|
132
76
|
|
|
133
|
-
|
|
77
|
+
const conversation = await this._createConversationWithItems(metadata, initialItems, toolResources);
|
|
78
|
+
|
|
134
79
|
if (remainingItems.length > 0) {
|
|
135
80
|
try {
|
|
136
81
|
await this._addItemsInBatches(conversation.id, remainingItems);
|
|
@@ -142,95 +87,78 @@ class OpenAIResponsesProvider {
|
|
|
142
87
|
return conversation;
|
|
143
88
|
}
|
|
144
89
|
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
90
|
+
async _createConversationWithItems(metadata, items, toolResources) {
|
|
91
|
+
const payload = {
|
|
92
|
+
metadata,
|
|
93
|
+
items: items.length > 0 ? items : undefined,
|
|
94
|
+
tool_resources: toolResources,
|
|
95
|
+
};
|
|
96
|
+
|
|
97
|
+
try {
|
|
98
|
+
const { result } = await retryWithBackoff(async () => {
|
|
99
|
+
return await this.conversations.create(payload);
|
|
100
|
+
}, { providerName: PROVIDER_NAME });
|
|
101
|
+
return result;
|
|
102
|
+
} catch (error) {
|
|
103
|
+
logger.error('[OpenAIResponsesProvider] Failed to create conversation:', error?.message || error);
|
|
104
|
+
throw error;
|
|
153
105
|
}
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
async _addItemsInBatches(threadId, items, batchSize = MAX_ITEMS_PER_BATCH) {
|
|
109
|
+
if (!items?.length) return;
|
|
154
110
|
|
|
155
111
|
const id = this._ensurethreadId(threadId);
|
|
156
112
|
const totalBatches = Math.ceil(items.length / batchSize);
|
|
157
113
|
|
|
158
114
|
for (let i = 0; i < items.length; i += batchSize) {
|
|
159
115
|
const batch = items.slice(i, i + batchSize);
|
|
160
|
-
const batchNumber = Math.floor(i / batchSize) + 1;
|
|
161
|
-
|
|
162
116
|
const batchPayload = this._convertItemsToApiFormat(batch);
|
|
163
117
|
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
return await this.conversations.items.create(id, { items: batchPayload });
|
|
168
|
-
}
|
|
169
|
-
return await this._post(`/conversations/${id}/items`, { items: batchPayload });
|
|
170
|
-
});
|
|
171
|
-
} catch (error) {
|
|
172
|
-
logger.error(`[OpenAIResponsesProvider] Batch ${batchNumber}/${totalBatches} failed:`, error?.message || error);
|
|
173
|
-
throw error;
|
|
174
|
-
}
|
|
118
|
+
await retryWithBackoff(() =>
|
|
119
|
+
this.conversations.items.create(id, { items: batchPayload })
|
|
120
|
+
, { providerName: PROVIDER_NAME });
|
|
175
121
|
}
|
|
176
122
|
|
|
177
123
|
logger.info(`[OpenAIResponsesProvider] Successfully added ${items.length} messages in ${totalBatches} batches`);
|
|
178
124
|
}
|
|
179
125
|
|
|
180
|
-
/**
|
|
181
|
-
* Convert conversation items to API format for items.create endpoint
|
|
182
|
-
* Adds type: 'message' which is required by the items.create API
|
|
183
|
-
* @private
|
|
184
|
-
*/
|
|
185
126
|
_convertItemsToApiFormat(items) {
|
|
186
127
|
return items.map(item => ({
|
|
187
|
-
role: item.role,
|
|
188
|
-
content: item.content,
|
|
189
|
-
type: 'message',
|
|
128
|
+
role: item.role || 'user',
|
|
129
|
+
content: this._normalizeContent(item.content),
|
|
130
|
+
type: item.type || 'message',
|
|
190
131
|
}));
|
|
191
132
|
}
|
|
192
133
|
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
if (
|
|
196
|
-
|
|
134
|
+
_normalizeContent(content) {
|
|
135
|
+
if (typeof content === 'string') return content;
|
|
136
|
+
if (Array.isArray(content)) return content;
|
|
137
|
+
if (content && typeof content === 'object') {
|
|
138
|
+
if (content.text) return content.text;
|
|
139
|
+
if (content.type === 'text' && content.text) return content.text;
|
|
140
|
+
return JSON.stringify(content);
|
|
197
141
|
}
|
|
198
|
-
return
|
|
142
|
+
return content || '';
|
|
199
143
|
}
|
|
200
144
|
|
|
145
|
+
/**
|
|
146
|
+
* Add message to thread/conversation
|
|
147
|
+
*/
|
|
201
148
|
async addMessage({ threadId, messages, role = 'user', content, metadata }) {
|
|
202
149
|
const id = this._ensurethreadId(threadId);
|
|
203
150
|
const messagesToAdd = messages || [{ role, content, metadata }];
|
|
204
151
|
|
|
205
|
-
const payloads = messagesToAdd.
|
|
206
|
-
this._cleanObject({
|
|
207
|
-
role: msg.role || 'user',
|
|
208
|
-
content: this._normalizeContent(msg.role || 'user', msg.content),
|
|
209
|
-
type: 'message',
|
|
210
|
-
})
|
|
211
|
-
).filter(p => p.content);
|
|
212
|
-
|
|
152
|
+
const payloads = this._convertItemsToApiFormat(messagesToAdd).filter(p => p.content);
|
|
213
153
|
if (payloads.length === 0) return null;
|
|
214
154
|
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
await this._addItemsInBatches(id, payloads, MAX_ITEMS_PER_BATCH);
|
|
218
|
-
return { batched: true, count: payloads.length };
|
|
219
|
-
}
|
|
220
|
-
|
|
221
|
-
return this._retryWithRateLimit(async () => {
|
|
222
|
-
if (this.conversations?.items?.create) {
|
|
223
|
-
return await this.conversations.items.create(id, { items: payloads });
|
|
224
|
-
}
|
|
225
|
-
return await this._post(`/conversations/${id}/items`, { items: payloads });
|
|
226
|
-
});
|
|
155
|
+
await this._addItemsInBatches(id, payloads);
|
|
156
|
+
return { success: true, count: payloads.length };
|
|
227
157
|
}
|
|
228
158
|
|
|
229
159
|
async listMessages({ threadId, order = 'desc', limit } = {}) {
|
|
230
160
|
const id = this._ensurethreadId(threadId);
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
return await this.client.conversations.items.list(id, query);
|
|
161
|
+
return await this.client.conversations.items.list(id, { order, limit });
|
|
234
162
|
}
|
|
235
163
|
|
|
236
164
|
async cleanupOrphanedFunctionCalls(threadId, deleteAll = false) {
|
|
@@ -243,36 +171,90 @@ class OpenAIResponsesProvider {
|
|
|
243
171
|
|
|
244
172
|
if (deleteAll) {
|
|
245
173
|
logger.info(`[OpenAIResponsesProvider] Deleting all ${items.length} items from conversation`);
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
174
|
+
await Promise.all(items.map(item =>
|
|
175
|
+
this.conversations.items.delete(item.id, {conversation_id: id})
|
|
176
|
+
));
|
|
249
177
|
return;
|
|
250
178
|
}
|
|
251
179
|
|
|
252
|
-
const
|
|
253
|
-
const
|
|
180
|
+
const outputCallIds = new Set(items.filter(i => i.type === 'function_call_output').map(o => o.call_id));
|
|
181
|
+
const orphanedCalls = items.filter(i => i.type === 'function_call' && !outputCallIds.has(i.call_id));
|
|
254
182
|
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
if (!hasOutput) {
|
|
259
|
-
logger.info(`[OpenAIResponsesProvider] Deleting orphaned function_call: ${functionCall.id} (${functionCall.call_id})`);
|
|
260
|
-
await this.conversations.items.delete(functionCall.id, {conversation_id: id});
|
|
261
|
-
}
|
|
183
|
+
if (orphanedCalls.length > 0) {
|
|
184
|
+
await Promise.all(orphanedCalls.map(call => this.conversations.items.delete(call.id, {conversation_id: id})));
|
|
262
185
|
}
|
|
263
186
|
} catch (error) {
|
|
264
187
|
logger.warn('[OpenAIResponsesProvider] Failed to cleanup conversation:', error?.message);
|
|
265
188
|
}
|
|
266
189
|
}
|
|
267
190
|
|
|
191
|
+
_normalizeThread(thread) {
|
|
192
|
+
return {
|
|
193
|
+
conversationId: thread.conversation_id || thread.getConversationId?.(),
|
|
194
|
+
assistantId: thread.prompt_id || thread.getAssistantId?.()
|
|
195
|
+
};
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
/**
|
|
199
|
+
* Main entry point for running assistant
|
|
200
|
+
*/
|
|
201
|
+
async executeRun({ thread, assistant, tools = [], config = {} }) {
|
|
202
|
+
const { conversationId, assistantId } = this._normalizeThread(thread);
|
|
203
|
+
const preparedConfig = {
|
|
204
|
+
...config,
|
|
205
|
+
assistant,
|
|
206
|
+
toolMetadata: {
|
|
207
|
+
numero: thread.code,
|
|
208
|
+
assistant_id: assistantId
|
|
209
|
+
}
|
|
210
|
+
};
|
|
211
|
+
|
|
212
|
+
logger.info('[OpenAIResponsesProvider] Starting run', {
|
|
213
|
+
conversationId,
|
|
214
|
+
assistantId
|
|
215
|
+
});
|
|
216
|
+
|
|
217
|
+
const filter = thread.code ? { code: thread.code, active: true } : null;
|
|
218
|
+
const result = await this.runConversation({
|
|
219
|
+
threadId: conversationId,
|
|
220
|
+
assistantId,
|
|
221
|
+
tools,
|
|
222
|
+
...preparedConfig
|
|
223
|
+
});
|
|
224
|
+
|
|
225
|
+
const completed = result.status === 'completed';
|
|
226
|
+
const output = await this.getRunText({
|
|
227
|
+
runId: result.id,
|
|
228
|
+
fallback: ''
|
|
229
|
+
});
|
|
230
|
+
|
|
231
|
+
if (filter) {
|
|
232
|
+
await Thread.updateOne(filter, { $set: { run_id: null } });
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
logger.info('[OpenAIResponsesProvider] Run complete', {
|
|
236
|
+
runId: result.id,
|
|
237
|
+
completed,
|
|
238
|
+
toolsExecuted: result.tools_executed?.length || 0
|
|
239
|
+
});
|
|
240
|
+
|
|
241
|
+
return {
|
|
242
|
+
run: result,
|
|
243
|
+
completed,
|
|
244
|
+
output,
|
|
245
|
+
tools_executed: result.tools_executed || [],
|
|
246
|
+
retries: result.retries || 0
|
|
247
|
+
};
|
|
248
|
+
}
|
|
249
|
+
|
|
268
250
|
async runConversation({
|
|
269
|
-
assistantId,
|
|
270
251
|
threadId,
|
|
271
|
-
|
|
252
|
+
assistantId,
|
|
272
253
|
additionalMessages = [],
|
|
254
|
+
instructions = null,
|
|
255
|
+
additionalInstructions = null,
|
|
273
256
|
toolOutputs = [],
|
|
274
|
-
|
|
275
|
-
metadata,
|
|
257
|
+
metadata = {},
|
|
276
258
|
topP,
|
|
277
259
|
temperature,
|
|
278
260
|
maxOutputTokens,
|
|
@@ -284,56 +266,57 @@ class OpenAIResponsesProvider {
|
|
|
284
266
|
} = {}) {
|
|
285
267
|
try {
|
|
286
268
|
const id = this._ensurethreadId(threadId);
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
let toolsExecuted = [];
|
|
269
|
+
let input = this._convertItemsToApiFormat(additionalMessages);
|
|
270
|
+
let allToolsExecuted = [];
|
|
271
|
+
let totalRetries = 0;
|
|
291
272
|
|
|
273
|
+
// Handle pending function calls
|
|
292
274
|
if (assistant && toolOutputs.length === 0) {
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
if (result.outputs && result.outputs.length > 0) {
|
|
298
|
-
toolOutputs = result.outputs;
|
|
299
|
-
toolsExecuted = result.toolsExecuted || [];
|
|
300
|
-
}
|
|
301
|
-
} catch (error) {
|
|
302
|
-
logger.warn('[OpenAIResponsesProvider] Error checking for pending function calls:', error?.message);
|
|
303
|
-
}
|
|
275
|
+
const conversationMessages = await this.listMessages({ threadId: id, order: 'desc', limit: 50 });
|
|
276
|
+
const result = await handlePendingFunctionCallsUtil(assistant, conversationMessages?.data || [], toolMetadata || { thread_id: id, assistant_id: assistantId });
|
|
277
|
+
toolOutputs = result.outputs || [];
|
|
278
|
+
allToolsExecuted = result.toolsExecuted || [];
|
|
304
279
|
}
|
|
305
280
|
|
|
306
|
-
|
|
307
|
-
const
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
281
|
+
input = [...input, ...toolOutputs];
|
|
282
|
+
const makeAPICall = (inputData) => retryWithBackoff(() =>
|
|
283
|
+
this.client.responses.create({
|
|
284
|
+
conversation: id,
|
|
285
|
+
prompt: { id: assistantId },
|
|
286
|
+
model: model || this.defaults.responseModel,
|
|
287
|
+
instructions: additionalInstructions || instructions,
|
|
288
|
+
input: inputData,
|
|
289
|
+
metadata, top_p: topP, temperature, max_output_tokens: maxOutputTokens,
|
|
290
|
+
truncation_strategy: truncationStrategy,
|
|
291
|
+
tools: transformToolsForResponsesAPIUtil(this.variant, tools),
|
|
292
|
+
}), { providerName: PROVIDER_NAME });
|
|
293
|
+
|
|
294
|
+
const { result: response, retries } = await makeAPICall(input);
|
|
295
|
+
totalRetries += retries;
|
|
296
|
+
let finalResponse = response;
|
|
297
|
+
|
|
298
|
+
if (assistant && response.output) {
|
|
299
|
+
const functionCalls = response.output.filter(item => item.type === 'function_call');
|
|
300
|
+
|
|
301
|
+
if (functionCalls.length > 0) {
|
|
302
|
+
const { outputs, toolsExecuted } = await handleFunctionCallsUtil(functionCalls, assistant, toolMetadata || { thread_id: id, assistant_id: assistantId });
|
|
303
|
+
|
|
304
|
+
input.push(...outputs);
|
|
305
|
+
allToolsExecuted.push(...toolsExecuted);
|
|
306
|
+
|
|
307
|
+
const { result: followUp, retries: followUpRetries } = await makeAPICall(input);
|
|
308
|
+
totalRetries += followUpRetries;
|
|
309
|
+
finalResponse = followUp;
|
|
310
|
+
}
|
|
329
311
|
}
|
|
330
312
|
|
|
331
313
|
return {
|
|
332
|
-
...
|
|
314
|
+
...finalResponse,
|
|
333
315
|
thread_id: id,
|
|
334
316
|
assistant_id: assistantId,
|
|
335
|
-
object:
|
|
336
|
-
tools_executed:
|
|
317
|
+
object: finalResponse.object || 'response',
|
|
318
|
+
tools_executed: allToolsExecuted,
|
|
319
|
+
retries: totalRetries,
|
|
337
320
|
};
|
|
338
321
|
} catch (error) {
|
|
339
322
|
logger.error('[OpenAIResponsesProvider] Error running conversation:', error);
|
|
@@ -346,73 +329,19 @@ class OpenAIResponsesProvider {
|
|
|
346
329
|
return await this.client.responses.retrieve(id);
|
|
347
330
|
}
|
|
348
331
|
|
|
349
|
-
async
|
|
350
|
-
return { data: [] };
|
|
351
|
-
}
|
|
352
|
-
|
|
353
|
-
async submitToolOutputs({ threadId, runId, toolOutputs }) {
|
|
354
|
-
const responseId = this._ensureResponseId(runId);
|
|
355
|
-
return await this._post(`/responses/${responseId}/submit_tool_outputs`, {
|
|
356
|
-
tool_outputs: toolOutputs,
|
|
357
|
-
});
|
|
358
|
-
}
|
|
359
|
-
|
|
360
|
-
async cancelRun({ threadId, runId }) {
|
|
361
|
-
const responseId = this._ensureResponseId(runId);
|
|
362
|
-
return this._post(`/responses/${responseId}/cancel`);
|
|
363
|
-
}
|
|
364
|
-
|
|
365
|
-
async getRunText({
|
|
366
|
-
threadId,
|
|
367
|
-
runId,
|
|
368
|
-
messageIndex = 0,
|
|
369
|
-
contentIndex = 0,
|
|
370
|
-
fallback = '',
|
|
371
|
-
} = {}) {
|
|
332
|
+
async getRunText({ runId, messageIndex = 0, contentIndex = 0, fallback = '' } = {}) {
|
|
372
333
|
const response = await this.client.responses.retrieve(this._ensureResponseId(runId));
|
|
373
334
|
if (!response) return fallback;
|
|
374
|
-
|
|
375
|
-
if (response.output_text)
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
const output = Array.isArray(response.output) ? response.output : [];
|
|
380
|
-
const item = output[messageIndex];
|
|
381
|
-
if (!item || !Array.isArray(item.content)) {
|
|
382
|
-
return fallback;
|
|
383
|
-
}
|
|
384
|
-
const content = item.content[contentIndex];
|
|
385
|
-
if (content?.type === 'output_text' && typeof content.text === 'string') {
|
|
386
|
-
return content.text;
|
|
387
|
-
}
|
|
388
|
-
|
|
389
|
-
return fallback;
|
|
335
|
+
|
|
336
|
+
if (response.output_text) return response.output_text;
|
|
337
|
+
|
|
338
|
+
const text = response.output?.[messageIndex]?.content?.[contentIndex]?.text;
|
|
339
|
+
return text || fallback;
|
|
390
340
|
}
|
|
391
341
|
|
|
392
342
|
/**
|
|
393
343
|
* Generic helpers
|
|
394
344
|
*/
|
|
395
|
-
|
|
396
|
-
async createChatCompletion({ model, messages, temperature, maxTokens, topP, metadata, responseFormat } = {}) {
|
|
397
|
-
return this.client.chat.completions.create({
|
|
398
|
-
model: model || this.defaults.chatModel,
|
|
399
|
-
messages,
|
|
400
|
-
temperature,
|
|
401
|
-
max_tokens: maxTokens,
|
|
402
|
-
top_p: topP,
|
|
403
|
-
metadata,
|
|
404
|
-
response_format: responseFormat,
|
|
405
|
-
});
|
|
406
|
-
}
|
|
407
|
-
|
|
408
|
-
async uploadFile({ file, purpose }) {
|
|
409
|
-
if (!file) {
|
|
410
|
-
throw new Error('uploadFile requires a readable file stream or object');
|
|
411
|
-
}
|
|
412
|
-
|
|
413
|
-
return this.client.files.create({ file, purpose: purpose || 'assistants' });
|
|
414
|
-
}
|
|
415
|
-
|
|
416
345
|
async transcribeAudio({ file, model, language, responseFormat, temperature, prompt } = {}) {
|
|
417
346
|
return this.client.audio.transcriptions.create({
|
|
418
347
|
model: model || this.defaults.transcriptionModel,
|
|
@@ -424,78 +353,6 @@ class OpenAIResponsesProvider {
|
|
|
424
353
|
});
|
|
425
354
|
}
|
|
426
355
|
|
|
427
|
-
async handleRequiresAction(assistant, run, threadId) {
|
|
428
|
-
return await handleRequiresActionUtil(assistant, run);
|
|
429
|
-
}
|
|
430
|
-
|
|
431
|
-
async checkRunStatus(assistant, thread_id, run_id, retryCount = 0, maxRetries = DEFAULT_MAX_RETRIES, actionHandled = false, toolMetadata = {}, accumulatedTools = []) {
|
|
432
|
-
try {
|
|
433
|
-
let run = await this.getRun({ threadId: thread_id, runId: run_id });
|
|
434
|
-
logger.info(`Status: ${run.status} ${thread_id} ${run_id} (attempt ${retryCount + 1})`);
|
|
435
|
-
|
|
436
|
-
if (run.status === 'completed') {
|
|
437
|
-
return {run, completed: true, tools_executed: accumulatedTools};
|
|
438
|
-
}
|
|
439
|
-
|
|
440
|
-
if (run.status === 'failed' || run.status === 'cancelled' || run.status === 'expired') {
|
|
441
|
-
return {run, completed: false, tools_executed: accumulatedTools};
|
|
442
|
-
}
|
|
443
|
-
|
|
444
|
-
const needsFunctionCall = run.output?.some(item => item.type === 'function_call');
|
|
445
|
-
if (needsFunctionCall && !actionHandled) {
|
|
446
|
-
if (retryCount >= maxRetries) {
|
|
447
|
-
logger.warn('[OpenAIResponsesProvider] Max retries reached while handling function calls');
|
|
448
|
-
return {run, completed: false, tools_executed: accumulatedTools};
|
|
449
|
-
}
|
|
450
|
-
|
|
451
|
-
const execMetadata = { ...toolMetadata, thread_id, run_id };
|
|
452
|
-
const result = await handleRequiresActionUtil(assistant, run, execMetadata);
|
|
453
|
-
const outputs = result.outputs || [];
|
|
454
|
-
const toolsExecuted = result.toolsExecuted || [];
|
|
455
|
-
|
|
456
|
-
logger.info('[OpenAIResponsesProvider] Function call outputs:', outputs);
|
|
457
|
-
|
|
458
|
-
if (outputs.length > 0) {
|
|
459
|
-
try {
|
|
460
|
-
await this.submitToolOutputs({
|
|
461
|
-
threadId: thread_id,
|
|
462
|
-
runId: run_id,
|
|
463
|
-
toolOutputs: outputs
|
|
464
|
-
});
|
|
465
|
-
|
|
466
|
-
await new Promise(resolve => setTimeout(resolve, 1000));
|
|
467
|
-
|
|
468
|
-
return this.checkRunStatus(assistant, thread_id, run_id, retryCount + 1, maxRetries, true, toolMetadata, [...accumulatedTools, ...toolsExecuted]);
|
|
469
|
-
} catch (submitError) {
|
|
470
|
-
logger.error('[OpenAIResponsesProvider] Error submitting tool outputs:', submitError);
|
|
471
|
-
if (retryCount < maxRetries) {
|
|
472
|
-
await new Promise(resolve => setTimeout(resolve, 2000));
|
|
473
|
-
return this.checkRunStatus(assistant, thread_id, run_id, retryCount + 1, maxRetries, false, toolMetadata, accumulatedTools);
|
|
474
|
-
}
|
|
475
|
-
return {run, completed: false, tools_executed: accumulatedTools};
|
|
476
|
-
}
|
|
477
|
-
} else {
|
|
478
|
-
logger.warn('[OpenAIResponsesProvider] Function calls detected but no outputs generated');
|
|
479
|
-
return {run, completed: false, tools_executed: accumulatedTools};
|
|
480
|
-
}
|
|
481
|
-
}
|
|
482
|
-
|
|
483
|
-
if (retryCount < maxRetries) {
|
|
484
|
-
await new Promise(resolve => setTimeout(resolve, 1000));
|
|
485
|
-
return this.checkRunStatus(assistant, thread_id, run_id, retryCount + 1, maxRetries, actionHandled, toolMetadata, accumulatedTools);
|
|
486
|
-
}
|
|
487
|
-
|
|
488
|
-
return {run, completed: false, tools_executed: accumulatedTools};
|
|
489
|
-
} catch (error) {
|
|
490
|
-
logger.error('[OpenAIResponsesProvider] Error checking run status:', error);
|
|
491
|
-
return {run: null, completed: false, tools_executed: accumulatedTools};
|
|
492
|
-
}
|
|
493
|
-
}
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
/**
|
|
497
|
-
* Internal helpers
|
|
498
|
-
*/
|
|
499
356
|
_ensurethreadId(value) {
|
|
500
357
|
const id = this._ensureId(value);
|
|
501
358
|
if (!id.startsWith(CONVERSATION_PREFIX)) {
|
|
@@ -525,104 +382,17 @@ class OpenAIResponsesProvider {
|
|
|
525
382
|
throw new Error('Unable to resolve identifier value');
|
|
526
383
|
}
|
|
527
384
|
|
|
528
|
-
_cleanObject(payload) {
|
|
529
|
-
if (payload === undefined || payload === null) {
|
|
530
|
-
return undefined;
|
|
531
|
-
}
|
|
532
|
-
|
|
533
|
-
if (Array.isArray(payload)) {
|
|
534
|
-
return payload
|
|
535
|
-
.map((entry) => this._cleanObject(entry))
|
|
536
|
-
.filter((entry) => !(entry === undefined || entry === null || (typeof entry === 'object' && !Array.isArray(entry) && Object.keys(entry).length === 0)));
|
|
537
|
-
}
|
|
538
|
-
|
|
539
|
-
if (typeof payload !== 'object') {
|
|
540
|
-
return payload;
|
|
541
|
-
}
|
|
542
|
-
|
|
543
|
-
const cleaned = { ...payload };
|
|
544
|
-
Object.keys(cleaned).forEach((key) => {
|
|
545
|
-
const value = cleaned[key];
|
|
546
|
-
if (value === undefined || value === null) {
|
|
547
|
-
delete cleaned[key];
|
|
548
|
-
return;
|
|
549
|
-
}
|
|
550
|
-
|
|
551
|
-
if (typeof value === 'object') {
|
|
552
|
-
const nested = this._cleanObject(value);
|
|
553
|
-
if (nested === undefined || nested === null || (typeof nested === 'object' && !Array.isArray(nested) && Object.keys(nested).length === 0)) {
|
|
554
|
-
delete cleaned[key];
|
|
555
|
-
} else {
|
|
556
|
-
cleaned[key] = nested;
|
|
557
|
-
}
|
|
558
|
-
}
|
|
559
|
-
});
|
|
560
|
-
return cleaned;
|
|
561
|
-
}
|
|
562
|
-
|
|
563
|
-
_normalizeContent(role, content) {
|
|
564
|
-
const normalized = this._normalizeContentParts(content);
|
|
565
|
-
return normalized.map((part) => {
|
|
566
|
-
if (part.type === 'text') {
|
|
567
|
-
return { type: role === 'assistant' ? 'output_text' : 'input_text', text: part.text };
|
|
568
|
-
} else if (part.type === 'image_file') {
|
|
569
|
-
return { type: 'input_image', file_id: part?.image_file?.file_id };
|
|
570
|
-
}
|
|
571
|
-
return part;
|
|
572
|
-
});
|
|
573
|
-
}
|
|
574
|
-
|
|
575
|
-
_normalizeContentParts(content) {
|
|
576
|
-
if (content === undefined || content === null) {
|
|
577
|
-
return [{ type: 'text', text: '' }];
|
|
578
|
-
}
|
|
579
|
-
|
|
580
|
-
if (Array.isArray(content)) {
|
|
581
|
-
return content.map((item) => {
|
|
582
|
-
if (typeof item === 'string') {
|
|
583
|
-
return { type: 'text', text: item };
|
|
584
|
-
}
|
|
585
|
-
return item;
|
|
586
|
-
});
|
|
587
|
-
}
|
|
588
|
-
|
|
589
|
-
if (typeof content === 'string') {
|
|
590
|
-
return [{ type: 'text', text: content }];
|
|
591
|
-
}
|
|
592
|
-
|
|
593
|
-
return [content];
|
|
594
|
-
}
|
|
595
|
-
|
|
596
|
-
_conversationItems(messages = []) {
|
|
597
|
-
if (!Array.isArray(messages)) return [];
|
|
598
|
-
return messages
|
|
599
|
-
.map((message) => {
|
|
600
|
-
if (!message) return null;
|
|
601
|
-
const role = message.role || 'user';
|
|
602
|
-
return {
|
|
603
|
-
role,
|
|
604
|
-
content: this._normalizeContent(role, message.content),
|
|
605
|
-
};
|
|
606
|
-
})
|
|
607
|
-
.filter(Boolean);
|
|
608
|
-
}
|
|
609
|
-
|
|
610
|
-
_responseInput(messages = []) {
|
|
611
|
-
const items = this._conversationItems(messages);
|
|
612
|
-
return items.length ? items : undefined;
|
|
613
|
-
}
|
|
614
|
-
|
|
615
385
|
async _post(path, body, options = {}) {
|
|
616
386
|
return this.client.post(path, {
|
|
617
387
|
...options,
|
|
618
|
-
body
|
|
388
|
+
body,
|
|
619
389
|
});
|
|
620
390
|
}
|
|
621
391
|
|
|
622
392
|
async _get(path, query, options = {}) {
|
|
623
393
|
return this.client.get(path, {
|
|
624
394
|
...options,
|
|
625
|
-
query
|
|
395
|
+
query,
|
|
626
396
|
});
|
|
627
397
|
}
|
|
628
398
|
|