@pipedream/openai 0.1.9 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/actions/cancel-run/cancel-run.mjs +33 -0
- package/actions/chat/chat.mjs +9 -2
- package/actions/classify-items-into-categories/classify-items-into-categories.mjs +1 -1
- package/actions/common/common-helper.mjs +1 -1
- package/actions/common/common.mjs +25 -11
- package/actions/common/lang.mjs +16 -16
- package/actions/create-assistant/create-assistant.mjs +71 -0
- package/actions/create-embeddings/create-embeddings.mjs +2 -2
- package/actions/create-image/create-image.mjs +55 -2
- package/actions/create-message/create-message.mjs +64 -0
- package/actions/create-run/create-run.mjs +64 -0
- package/actions/create-thread/create-thread.mjs +36 -0
- package/actions/create-thread-and-run/create-thread-and-run.mjs +61 -0
- package/actions/create-transcription/create-transcription.mjs +2 -3
- package/actions/delete-file/delete-file.mjs +26 -0
- package/actions/list-files/list-files.mjs +26 -0
- package/actions/list-messages/list-messages.mjs +54 -0
- package/actions/list-run-steps/list-run-steps.mjs +65 -0
- package/actions/list-runs/list-runs.mjs +68 -0
- package/actions/modify-assistant/modify-assistant.mjs +82 -0
- package/actions/modify-message/modify-message.mjs +42 -0
- package/actions/modify-run/modify-run.mjs +42 -0
- package/actions/retrieve-file/retrieve-file.mjs +26 -0
- package/actions/retrieve-file-content/retrieve-file-content.mjs +25 -0
- package/actions/retrieve-run/retrieve-run.mjs +33 -0
- package/actions/retrieve-run-step/retrieve-run-step.mjs +40 -0
- package/actions/send-prompt/send-prompt.mjs +2 -2
- package/actions/submit-tool-outputs-to-run/submit-tool-outputs-to-run.mjs +41 -0
- package/actions/summarize/summarize.mjs +1 -1
- package/actions/translate-text/translate-text.mjs +1 -1
- package/actions/upload-file/upload-file.mjs +41 -0
- package/openai.app.mjs +627 -0
- package/package.json +1 -1
- package/app/openai.app.mjs +0 -180
package/openai.app.mjs
ADDED
|
@@ -0,0 +1,627 @@
|
|
|
1
|
+
import { axios } from "@pipedream/platform";
|
|
2
|
+
|
|
3
|
+
export default {
|
|
4
|
+
type: "app",
|
|
5
|
+
app: "openai",
|
|
6
|
+
propDefinitions: {
|
|
7
|
+
completionModelId: {
|
|
8
|
+
label: "Model",
|
|
9
|
+
description: "The ID of the model to use for completions. **This action doesn't support the ChatGPT `turbo` models**. Use the **Chat** action for those, instead.",
|
|
10
|
+
type: "string",
|
|
11
|
+
async options() {
|
|
12
|
+
return (await this.getCompletionModels({})).map((model) => model.id);
|
|
13
|
+
},
|
|
14
|
+
default: "text-davinci-003",
|
|
15
|
+
},
|
|
16
|
+
chatCompletionModelId: {
|
|
17
|
+
label: "Model",
|
|
18
|
+
description: "The ID of the model to use for chat completions",
|
|
19
|
+
type: "string",
|
|
20
|
+
async options() {
|
|
21
|
+
return (await this.getChatCompletionModels({})).map((model) => model.id);
|
|
22
|
+
},
|
|
23
|
+
default: "gpt-3.5-turbo",
|
|
24
|
+
},
|
|
25
|
+
embeddingsModelId: {
|
|
26
|
+
label: "Model",
|
|
27
|
+
description: "The ID of the embeddings model to use. OpenAI recommends using `text-embedding-ada-002` for nearly all use cases: \"It's better, cheaper, and simpler to use. [Read the blog post announcement](https://openai.com/blog/new-and-improved-embedding-model)\".",
|
|
28
|
+
type: "string",
|
|
29
|
+
async options() {
|
|
30
|
+
return (await this.getEmbeddingsModels({})).map((model) => model.id);
|
|
31
|
+
},
|
|
32
|
+
default: "text-embedding-ada-002",
|
|
33
|
+
},
|
|
34
|
+
assistantModel: {
|
|
35
|
+
type: "string",
|
|
36
|
+
label: "Model",
|
|
37
|
+
description: "The ID of the model to use for the assistant",
|
|
38
|
+
async options() {
|
|
39
|
+
const models = await this.models({});
|
|
40
|
+
return models.map((model) => ({
|
|
41
|
+
label: model.id,
|
|
42
|
+
value: model.id,
|
|
43
|
+
}));
|
|
44
|
+
},
|
|
45
|
+
},
|
|
46
|
+
assistant: {
|
|
47
|
+
type: "string",
|
|
48
|
+
label: "Assistant",
|
|
49
|
+
description: "Select an assistant to modify",
|
|
50
|
+
async options() {
|
|
51
|
+
const assistants = await this.listAssistants({});
|
|
52
|
+
return assistants.map((assistant) => ({
|
|
53
|
+
label: assistant.name || assistant.id,
|
|
54
|
+
value: assistant.id,
|
|
55
|
+
}));
|
|
56
|
+
},
|
|
57
|
+
},
|
|
58
|
+
name: {
|
|
59
|
+
type: "string",
|
|
60
|
+
label: "Name",
|
|
61
|
+
description: "The name of the assistant.",
|
|
62
|
+
optional: true,
|
|
63
|
+
},
|
|
64
|
+
description: {
|
|
65
|
+
type: "string",
|
|
66
|
+
label: "Description",
|
|
67
|
+
description: "The description of the assistant.",
|
|
68
|
+
},
|
|
69
|
+
threadId: {
|
|
70
|
+
type: "string",
|
|
71
|
+
label: "Thread ID",
|
|
72
|
+
description: "The unique identifier for the thread.",
|
|
73
|
+
},
|
|
74
|
+
runId: {
|
|
75
|
+
type: "string",
|
|
76
|
+
label: "Run ID",
|
|
77
|
+
description: "The unique identifier for the run.",
|
|
78
|
+
},
|
|
79
|
+
assistantId: {
|
|
80
|
+
type: "string",
|
|
81
|
+
label: "Assistant ID",
|
|
82
|
+
description: "The unique identifier for the assistant.",
|
|
83
|
+
},
|
|
84
|
+
model: {
|
|
85
|
+
type: "string",
|
|
86
|
+
label: "Model",
|
|
87
|
+
description: "The ID of the model to use.",
|
|
88
|
+
optional: true,
|
|
89
|
+
},
|
|
90
|
+
instructions: {
|
|
91
|
+
type: "string",
|
|
92
|
+
label: "Instructions",
|
|
93
|
+
description: "The system instructions that the assistant uses.",
|
|
94
|
+
optional: true,
|
|
95
|
+
},
|
|
96
|
+
tools: {
|
|
97
|
+
type: "string[]",
|
|
98
|
+
label: "Tools",
|
|
99
|
+
description: "A list of tools enabled on the assistant. There can be a maximum of 128 tools per assistant",
|
|
100
|
+
options: [
|
|
101
|
+
"code_interpreter",
|
|
102
|
+
"retrieval",
|
|
103
|
+
],
|
|
104
|
+
optional: true,
|
|
105
|
+
},
|
|
106
|
+
file_ids: {
|
|
107
|
+
type: "string[]",
|
|
108
|
+
label: "File IDs",
|
|
109
|
+
description: "A list of [file](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant.",
|
|
110
|
+
optional: true,
|
|
111
|
+
},
|
|
112
|
+
metadata: {
|
|
113
|
+
type: "object",
|
|
114
|
+
label: "Metadata",
|
|
115
|
+
description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.",
|
|
116
|
+
optional: true,
|
|
117
|
+
},
|
|
118
|
+
messages: {
|
|
119
|
+
type: "string[]",
|
|
120
|
+
label: "Messages",
|
|
121
|
+
description: "An array of messages to start the thread with.",
|
|
122
|
+
},
|
|
123
|
+
messageId: {
|
|
124
|
+
type: "string",
|
|
125
|
+
label: "Message ID",
|
|
126
|
+
description: "The ID of the message to modify",
|
|
127
|
+
},
|
|
128
|
+
content: {
|
|
129
|
+
type: "string",
|
|
130
|
+
label: "Content",
|
|
131
|
+
description: "The content of the message",
|
|
132
|
+
},
|
|
133
|
+
role: {
|
|
134
|
+
type: "string",
|
|
135
|
+
label: "Role",
|
|
136
|
+
description: "The role of the entity creating the message",
|
|
137
|
+
options: [
|
|
138
|
+
{
|
|
139
|
+
label: "User",
|
|
140
|
+
value: "user",
|
|
141
|
+
},
|
|
142
|
+
],
|
|
143
|
+
default: "user",
|
|
144
|
+
},
|
|
145
|
+
fileIds: {
|
|
146
|
+
type: "string[]",
|
|
147
|
+
label: "File IDs",
|
|
148
|
+
description: "List of file IDs to attach to the message",
|
|
149
|
+
optional: true,
|
|
150
|
+
},
|
|
151
|
+
toolOutputs: {
|
|
152
|
+
type: "string[]",
|
|
153
|
+
label: "Tool Outputs",
|
|
154
|
+
description: "The outputs from the tool calls.",
|
|
155
|
+
},
|
|
156
|
+
limit: {
|
|
157
|
+
type: "integer",
|
|
158
|
+
label: "Limit",
|
|
159
|
+
description: "Number of items to retrieve.",
|
|
160
|
+
optional: true,
|
|
161
|
+
},
|
|
162
|
+
order: {
|
|
163
|
+
type: "string",
|
|
164
|
+
label: "Order",
|
|
165
|
+
description: "Sort order by the created_at timestamp of the objects.",
|
|
166
|
+
options: [
|
|
167
|
+
{
|
|
168
|
+
label: "Ascending",
|
|
169
|
+
value: "asc",
|
|
170
|
+
},
|
|
171
|
+
{
|
|
172
|
+
label: "Descending",
|
|
173
|
+
value: "desc",
|
|
174
|
+
},
|
|
175
|
+
],
|
|
176
|
+
optional: true,
|
|
177
|
+
},
|
|
178
|
+
after: {
|
|
179
|
+
type: "string",
|
|
180
|
+
label: "After",
|
|
181
|
+
description: "A cursor for use in pagination to fetch the next set of items.",
|
|
182
|
+
optional: true,
|
|
183
|
+
},
|
|
184
|
+
before: {
|
|
185
|
+
type: "string",
|
|
186
|
+
label: "Before",
|
|
187
|
+
description: "A cursor for use in pagination, identifying the message ID to end the list before",
|
|
188
|
+
optional: true,
|
|
189
|
+
},
|
|
190
|
+
file_id: {
|
|
191
|
+
type: "string",
|
|
192
|
+
label: "File ID",
|
|
193
|
+
description: "The ID of the file to use for this request.",
|
|
194
|
+
async options({ prevContext }) {
|
|
195
|
+
const files = await this.listFiles({
|
|
196
|
+
purpose: prevContext
|
|
197
|
+
? prevContext.purpose
|
|
198
|
+
: undefined,
|
|
199
|
+
});
|
|
200
|
+
return files.map((file) => ({
|
|
201
|
+
label: file.filename,
|
|
202
|
+
value: file.id,
|
|
203
|
+
}));
|
|
204
|
+
},
|
|
205
|
+
},
|
|
206
|
+
purpose: {
|
|
207
|
+
type: "string",
|
|
208
|
+
label: "Purpose",
|
|
209
|
+
description: "The intended purpose of the file.",
|
|
210
|
+
optional: true,
|
|
211
|
+
},
|
|
212
|
+
file: {
|
|
213
|
+
type: "string",
|
|
214
|
+
label: "File",
|
|
215
|
+
description: "The file content to be uploaded, represented as a string. The size of individual files can be a maximum of 512mb.",
|
|
216
|
+
},
|
|
217
|
+
},
|
|
218
|
+
methods: {
|
|
219
|
+
_apiKey() {
|
|
220
|
+
return this.$auth.api_key;
|
|
221
|
+
},
|
|
222
|
+
_baseApiUrl() {
|
|
223
|
+
return "https://api.openai.com/v1";
|
|
224
|
+
},
|
|
225
|
+
_commonHeaders() {
|
|
226
|
+
return {
|
|
227
|
+
"Authorization": `Bearer ${this._apiKey()}`,
|
|
228
|
+
"Accept": "application/json",
|
|
229
|
+
"User-Agent": "@PipedreamHQ/pipedream v1.0",
|
|
230
|
+
};
|
|
231
|
+
},
|
|
232
|
+
_betaHeaders() {
|
|
233
|
+
return {
|
|
234
|
+
...this._commonHeaders(),
|
|
235
|
+
"OpenAI-Beta": "assistants=v1",
|
|
236
|
+
};
|
|
237
|
+
},
|
|
238
|
+
async _makeRequest({
|
|
239
|
+
$ = this,
|
|
240
|
+
path,
|
|
241
|
+
...args
|
|
242
|
+
} = {}) {
|
|
243
|
+
return axios($, {
|
|
244
|
+
url: `${this._baseApiUrl()}${path}`,
|
|
245
|
+
headers: {
|
|
246
|
+
...this._commonHeaders(),
|
|
247
|
+
},
|
|
248
|
+
maxBodyLength: Infinity,
|
|
249
|
+
...args,
|
|
250
|
+
});
|
|
251
|
+
},
|
|
252
|
+
async models({ $ }) {
|
|
253
|
+
const { data: models } = await this._makeRequest({
|
|
254
|
+
$,
|
|
255
|
+
path: "/models",
|
|
256
|
+
});
|
|
257
|
+
return models.sort((a, b) => a?.id.localeCompare(b?.id));
|
|
258
|
+
},
|
|
259
|
+
async getChatCompletionModels({ $ }) {
|
|
260
|
+
const models = await this.models({
|
|
261
|
+
$,
|
|
262
|
+
});
|
|
263
|
+
return models.filter((model) => model.id.match(/turbo|gpt/gi));
|
|
264
|
+
},
|
|
265
|
+
async getCompletionModels({ $ }) {
|
|
266
|
+
const models = await this.models({
|
|
267
|
+
$,
|
|
268
|
+
});
|
|
269
|
+
return models.filter((model) => {
|
|
270
|
+
const { id } = model;
|
|
271
|
+
return (
|
|
272
|
+
id.match(/^(?=.*\b(babbage|davinci|ada|curie)\b)(?!.*\b(whisper|turbo|edit|insert|search|embedding|similarity|001)\b).*$/gm)
|
|
273
|
+
);
|
|
274
|
+
});
|
|
275
|
+
},
|
|
276
|
+
async getEmbeddingsModels({ $ }) {
|
|
277
|
+
const models = await this.models({
|
|
278
|
+
$,
|
|
279
|
+
});
|
|
280
|
+
return models.filter((model) => {
|
|
281
|
+
const { id } = model;
|
|
282
|
+
return (
|
|
283
|
+
id.match(/^(text-embedding-ada-002|.*-(davinci|curie|babbage|ada)-.*-001)$/gm)
|
|
284
|
+
);
|
|
285
|
+
});
|
|
286
|
+
},
|
|
287
|
+
async _makeCompletion({
|
|
288
|
+
$, path, args,
|
|
289
|
+
}) {
|
|
290
|
+
const data = await this._makeRequest({
|
|
291
|
+
$,
|
|
292
|
+
path,
|
|
293
|
+
method: "POST",
|
|
294
|
+
data: args,
|
|
295
|
+
});
|
|
296
|
+
|
|
297
|
+
// For completions, return the text of the first choice at the top-level
|
|
298
|
+
let generated_text;
|
|
299
|
+
if (path === "/completions") {
|
|
300
|
+
const { choices } = data;
|
|
301
|
+
generated_text = choices?.[0]?.text;
|
|
302
|
+
}
|
|
303
|
+
// For chat completions, return the assistant message at the top-level
|
|
304
|
+
let generated_message;
|
|
305
|
+
if (path === "/chat/completions") {
|
|
306
|
+
const { choices } = data;
|
|
307
|
+
generated_message = choices?.[0]?.message;
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
return {
|
|
311
|
+
generated_text,
|
|
312
|
+
generated_message,
|
|
313
|
+
...data,
|
|
314
|
+
};
|
|
315
|
+
},
|
|
316
|
+
async createCompletion({
|
|
317
|
+
$, args,
|
|
318
|
+
}) {
|
|
319
|
+
return this._makeCompletion({
|
|
320
|
+
$,
|
|
321
|
+
path: "/completions",
|
|
322
|
+
args,
|
|
323
|
+
});
|
|
324
|
+
},
|
|
325
|
+
async createChatCompletion({
|
|
326
|
+
$, args,
|
|
327
|
+
}) {
|
|
328
|
+
return this._makeCompletion({
|
|
329
|
+
$,
|
|
330
|
+
path: "/chat/completions",
|
|
331
|
+
args,
|
|
332
|
+
});
|
|
333
|
+
},
|
|
334
|
+
async createImage({
|
|
335
|
+
$, args,
|
|
336
|
+
}) {
|
|
337
|
+
return this._makeRequest({
|
|
338
|
+
$,
|
|
339
|
+
path: "/images/generations",
|
|
340
|
+
data: args,
|
|
341
|
+
method: "POST",
|
|
342
|
+
});
|
|
343
|
+
},
|
|
344
|
+
async createEmbeddings({
|
|
345
|
+
$, args,
|
|
346
|
+
}) {
|
|
347
|
+
return this._makeRequest({
|
|
348
|
+
$,
|
|
349
|
+
path: "/embeddings",
|
|
350
|
+
data: args,
|
|
351
|
+
method: "POST",
|
|
352
|
+
});
|
|
353
|
+
},
|
|
354
|
+
async createTranscription({
|
|
355
|
+
$, form,
|
|
356
|
+
}) {
|
|
357
|
+
return this._makeRequest({
|
|
358
|
+
$,
|
|
359
|
+
path: "/audio/transcriptions",
|
|
360
|
+
method: "POST",
|
|
361
|
+
headers: {
|
|
362
|
+
...this._commonHeaders(),
|
|
363
|
+
"Content-Type": `multipart/form-data; boundary=${form._boundary}`,
|
|
364
|
+
},
|
|
365
|
+
data: form,
|
|
366
|
+
});
|
|
367
|
+
},
|
|
368
|
+
async listAssistants({ $ }) {
|
|
369
|
+
const { data: assistants } = await this._makeRequest({
|
|
370
|
+
$,
|
|
371
|
+
path: "/assistants",
|
|
372
|
+
headers: this._betaHeaders(),
|
|
373
|
+
});
|
|
374
|
+
return assistants;
|
|
375
|
+
},
|
|
376
|
+
async createAssistant({
|
|
377
|
+
$,
|
|
378
|
+
model,
|
|
379
|
+
name,
|
|
380
|
+
description,
|
|
381
|
+
instructions,
|
|
382
|
+
tools,
|
|
383
|
+
file_ids,
|
|
384
|
+
metadata,
|
|
385
|
+
}) {
|
|
386
|
+
return this._makeRequest({
|
|
387
|
+
$,
|
|
388
|
+
method: "POST",
|
|
389
|
+
path: "/assistants",
|
|
390
|
+
headers: this._betaHeaders(),
|
|
391
|
+
data: {
|
|
392
|
+
model,
|
|
393
|
+
name,
|
|
394
|
+
description,
|
|
395
|
+
instructions,
|
|
396
|
+
tools,
|
|
397
|
+
file_ids,
|
|
398
|
+
metadata,
|
|
399
|
+
},
|
|
400
|
+
});
|
|
401
|
+
},
|
|
402
|
+
async modifyAssistant({
|
|
403
|
+
$,
|
|
404
|
+
assistant,
|
|
405
|
+
model,
|
|
406
|
+
name,
|
|
407
|
+
description,
|
|
408
|
+
instructions,
|
|
409
|
+
tools,
|
|
410
|
+
file_ids,
|
|
411
|
+
metadata,
|
|
412
|
+
}) {
|
|
413
|
+
return this._makeRequest({
|
|
414
|
+
$,
|
|
415
|
+
method: "POST",
|
|
416
|
+
path: `/assistants/${assistant}`,
|
|
417
|
+
headers: this._betaHeaders(),
|
|
418
|
+
data: {
|
|
419
|
+
model,
|
|
420
|
+
name,
|
|
421
|
+
description,
|
|
422
|
+
instructions,
|
|
423
|
+
tools,
|
|
424
|
+
file_ids,
|
|
425
|
+
metadata,
|
|
426
|
+
},
|
|
427
|
+
});
|
|
428
|
+
},
|
|
429
|
+
async createThread({
|
|
430
|
+
$,
|
|
431
|
+
messages,
|
|
432
|
+
metadata,
|
|
433
|
+
}) {
|
|
434
|
+
return this._makeRequest({
|
|
435
|
+
$,
|
|
436
|
+
method: "POST",
|
|
437
|
+
path: "/threads",
|
|
438
|
+
headers: this._betaHeaders(),
|
|
439
|
+
data: {
|
|
440
|
+
messages,
|
|
441
|
+
metadata,
|
|
442
|
+
},
|
|
443
|
+
});
|
|
444
|
+
},
|
|
445
|
+
async createMessage({
|
|
446
|
+
threadId, content, role, fileIds, metadata,
|
|
447
|
+
}) {
|
|
448
|
+
const parsedMetadata = metadata
|
|
449
|
+
? JSON.parse(metadata)
|
|
450
|
+
: undefined;
|
|
451
|
+
return this._makeRequest({
|
|
452
|
+
method: "POST",
|
|
453
|
+
path: `/threads/${threadId}/messages`,
|
|
454
|
+
headers: this._betaHeaders(),
|
|
455
|
+
data: {
|
|
456
|
+
role,
|
|
457
|
+
content,
|
|
458
|
+
file_ids: fileIds,
|
|
459
|
+
metadata: parsedMetadata,
|
|
460
|
+
},
|
|
461
|
+
});
|
|
462
|
+
},
|
|
463
|
+
async listMessages({
|
|
464
|
+
threadId, limit, order, after, before,
|
|
465
|
+
}) {
|
|
466
|
+
return this._makeRequest({
|
|
467
|
+
path: `/threads/${threadId}/messages`,
|
|
468
|
+
headers: this._betaHeaders(),
|
|
469
|
+
params: {
|
|
470
|
+
limit,
|
|
471
|
+
order,
|
|
472
|
+
after,
|
|
473
|
+
before,
|
|
474
|
+
},
|
|
475
|
+
});
|
|
476
|
+
},
|
|
477
|
+
async modifyMessage({
|
|
478
|
+
threadId, messageId, metadata,
|
|
479
|
+
}) {
|
|
480
|
+
const parsedMetadata = metadata
|
|
481
|
+
? JSON.parse(metadata)
|
|
482
|
+
: undefined;
|
|
483
|
+
return this._makeRequest({
|
|
484
|
+
method: "PATCH",
|
|
485
|
+
headers: this._betaHeaders(),
|
|
486
|
+
path: `/threads/${threadId}/messages/${messageId}`,
|
|
487
|
+
data: {
|
|
488
|
+
metadata: parsedMetadata,
|
|
489
|
+
},
|
|
490
|
+
});
|
|
491
|
+
},
|
|
492
|
+
async createRun({
|
|
493
|
+
threadId, assistantId, ...opts
|
|
494
|
+
}) {
|
|
495
|
+
return this._makeRequest({
|
|
496
|
+
path: `/threads/${threadId}/runs`,
|
|
497
|
+
method: "POST",
|
|
498
|
+
headers: this._betaHeaders(),
|
|
499
|
+
data: {
|
|
500
|
+
assistant_id: assistantId,
|
|
501
|
+
...opts,
|
|
502
|
+
},
|
|
503
|
+
});
|
|
504
|
+
},
|
|
505
|
+
async retrieveRun({
|
|
506
|
+
threadId, runId,
|
|
507
|
+
}) {
|
|
508
|
+
return this._makeRequest({
|
|
509
|
+
headers: this._betaHeaders(),
|
|
510
|
+
path: `/threads/${threadId}/runs/${runId}`,
|
|
511
|
+
});
|
|
512
|
+
},
|
|
513
|
+
async modifyRun({
|
|
514
|
+
threadId, runId, ...opts
|
|
515
|
+
}) {
|
|
516
|
+
return this._makeRequest({
|
|
517
|
+
path: `/threads/${threadId}/runs/${runId}`,
|
|
518
|
+
headers: this._betaHeaders(),
|
|
519
|
+
method: "PATCH", // Assuming modification is done via PATCH
|
|
520
|
+
data: opts,
|
|
521
|
+
});
|
|
522
|
+
},
|
|
523
|
+
async listRuns({
|
|
524
|
+
threadId, ...opts
|
|
525
|
+
}) {
|
|
526
|
+
return this._makeRequest({
|
|
527
|
+
path: `/threads/${threadId}/runs`,
|
|
528
|
+
headers: this._betaHeaders(),
|
|
529
|
+
params: opts,
|
|
530
|
+
});
|
|
531
|
+
},
|
|
532
|
+
async submitToolOutputs({
|
|
533
|
+
threadId, runId, toolOutputs,
|
|
534
|
+
}) {
|
|
535
|
+
// Assuming toolOutputs should be parsed as JSON objects
|
|
536
|
+
const parsedToolOutputs = toolOutputs.map(JSON.parse);
|
|
537
|
+
return this._makeRequest({
|
|
538
|
+
path: `/threads/${threadId}/runs/${runId}/submit_tool_outputs`,
|
|
539
|
+
headers: this._betaHeaders(),
|
|
540
|
+
method: "POST",
|
|
541
|
+
data: {
|
|
542
|
+
tool_outputs: parsedToolOutputs,
|
|
543
|
+
},
|
|
544
|
+
});
|
|
545
|
+
},
|
|
546
|
+
async cancelRun({
|
|
547
|
+
threadId, runId,
|
|
548
|
+
}) {
|
|
549
|
+
return this._makeRequest({
|
|
550
|
+
path: `/threads/${threadId}/runs/${runId}/cancel`,
|
|
551
|
+
headers: this._betaHeaders(),
|
|
552
|
+
method: "POST",
|
|
553
|
+
});
|
|
554
|
+
},
|
|
555
|
+
async createThreadAndRun({
|
|
556
|
+
assistantId, ...opts
|
|
557
|
+
}) {
|
|
558
|
+
return this._makeRequest({
|
|
559
|
+
path: "/threads/runs",
|
|
560
|
+
headers: this._betaHeaders(),
|
|
561
|
+
method: "POST",
|
|
562
|
+
data: {
|
|
563
|
+
assistant_id: assistantId,
|
|
564
|
+
...opts,
|
|
565
|
+
},
|
|
566
|
+
});
|
|
567
|
+
},
|
|
568
|
+
async retrieveRunStep({
|
|
569
|
+
threadId, runId, stepId,
|
|
570
|
+
}) {
|
|
571
|
+
return this._makeRequest({
|
|
572
|
+
path: `/threads/${threadId}/runs/${runId}/steps/${stepId}`,
|
|
573
|
+
headers: this._betaHeaders(),
|
|
574
|
+
});
|
|
575
|
+
},
|
|
576
|
+
async listRunSteps({
|
|
577
|
+
threadId, runId, ...opts
|
|
578
|
+
}) {
|
|
579
|
+
return this._makeRequest({
|
|
580
|
+
path: `/threads/${threadId}/runs/${runId}/steps`,
|
|
581
|
+
headers: this._betaHeaders(),
|
|
582
|
+
params: opts,
|
|
583
|
+
});
|
|
584
|
+
},
|
|
585
|
+
async listFiles({ purpose } = {}) {
|
|
586
|
+
return this._makeRequest({
|
|
587
|
+
path: "/files",
|
|
588
|
+
headers: this._betaHeaders(),
|
|
589
|
+
params: {
|
|
590
|
+
purpose,
|
|
591
|
+
},
|
|
592
|
+
});
|
|
593
|
+
},
|
|
594
|
+
async uploadFile({
|
|
595
|
+
file, purpose,
|
|
596
|
+
}) {
|
|
597
|
+
return this._makeRequest({
|
|
598
|
+
method: "POST",
|
|
599
|
+
path: "/files",
|
|
600
|
+
headers: this._betaHeaders(),
|
|
601
|
+
data: {
|
|
602
|
+
file,
|
|
603
|
+
purpose,
|
|
604
|
+
},
|
|
605
|
+
});
|
|
606
|
+
},
|
|
607
|
+
async deleteFile({ file_id }) {
|
|
608
|
+
return this._makeRequest({
|
|
609
|
+
method: "DELETE",
|
|
610
|
+
headers: this._betaHeaders(),
|
|
611
|
+
path: `/files/${file_id}`,
|
|
612
|
+
});
|
|
613
|
+
},
|
|
614
|
+
async retrieveFile({ file_id }) {
|
|
615
|
+
return this._makeRequest({
|
|
616
|
+
headers: this._betaHeaders(),
|
|
617
|
+
path: `/files/${file_id}`,
|
|
618
|
+
});
|
|
619
|
+
},
|
|
620
|
+
async retrieveFileContent({ file_id }) {
|
|
621
|
+
return this._makeRequest({
|
|
622
|
+
headers: this._betaHeaders(),
|
|
623
|
+
path: `/files/${file_id}/content`,
|
|
624
|
+
});
|
|
625
|
+
},
|
|
626
|
+
},
|
|
627
|
+
};
|