@pipedream/openai 0.5.0 → 0.5.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/README.md +89 -20
  2. package/actions/analyze-image-content/analyze-image-content.mjs +131 -0
  3. package/actions/cancel-run/cancel-run.mjs +2 -1
  4. package/actions/chat/chat.mjs +5 -7
  5. package/actions/chat-with-assistant/chat-with-assistant.mjs +129 -0
  6. package/actions/classify-items-into-categories/classify-items-into-categories.mjs +27 -10
  7. package/actions/common/common-assistants.mjs +128 -0
  8. package/actions/common/common-helper.mjs +10 -3
  9. package/actions/{create-speech/create-speech.mjs → convert-text-to-speech/convert-text-to-speech.mjs} +3 -3
  10. package/actions/create-assistant/create-assistant.mjs +14 -22
  11. package/actions/create-batch/create-batch.mjs +78 -0
  12. package/actions/create-embeddings/create-embeddings.mjs +3 -3
  13. package/actions/create-fine-tuning-job/create-fine-tuning-job.mjs +14 -3
  14. package/actions/create-image/create-image.mjs +42 -88
  15. package/actions/create-moderation/create-moderation.mjs +35 -0
  16. package/actions/create-thread/create-thread.mjs +110 -8
  17. package/actions/create-transcription/create-transcription.mjs +4 -4
  18. package/actions/delete-file/delete-file.mjs +6 -5
  19. package/actions/list-files/list-files.mjs +3 -2
  20. package/actions/list-messages/list-messages.mjs +18 -21
  21. package/actions/list-run-steps/list-run-steps.mjs +18 -25
  22. package/actions/list-runs/list-runs.mjs +17 -34
  23. package/actions/modify-assistant/modify-assistant.mjs +13 -23
  24. package/actions/retrieve-file/retrieve-file.mjs +6 -5
  25. package/actions/retrieve-file-content/retrieve-file-content.mjs +29 -6
  26. package/actions/retrieve-run/retrieve-run.mjs +2 -1
  27. package/actions/retrieve-run-step/retrieve-run-step.mjs +8 -1
  28. package/actions/send-prompt/send-prompt.mjs +8 -3
  29. package/actions/submit-tool-outputs-to-run/submit-tool-outputs-to-run.mjs +18 -3
  30. package/actions/summarize/summarize.mjs +11 -10
  31. package/actions/translate-text/translate-text.mjs +9 -5
  32. package/actions/upload-file/upload-file.mjs +1 -1
  33. package/common/constants.mjs +154 -3
  34. package/openai.app.mjs +230 -269
  35. package/package.json +1 -1
  36. package/sources/{common.mjs → common/common.mjs} +11 -9
  37. package/sources/new-batch-completed/new-batch-completed.mjs +46 -0
  38. package/sources/new-batch-completed/test-event.mjs +29 -0
  39. package/sources/new-file-created/new-file-created.mjs +5 -3
  40. package/sources/new-file-created/test-event.mjs +10 -0
  41. package/sources/new-fine-tuning-job-created/new-fine-tuning-job-created.mjs +5 -3
  42. package/sources/new-fine-tuning-job-created/test-event.mjs +19 -0
  43. package/sources/new-run-state-changed/new-run-state-changed.mjs +4 -2
  44. package/sources/new-run-state-changed/test-event.mjs +36 -0
  45. package/actions/common/constants.mjs +0 -14
  46. package/actions/create-message/create-message.mjs +0 -64
  47. package/actions/create-run/create-run.mjs +0 -65
  48. package/actions/create-thread-and-run/create-thread-and-run.mjs +0 -62
  49. package/actions/modify-message/modify-message.mjs +0 -42
  50. package/actions/modify-run/modify-run.mjs +0 -45
package/openai.app.mjs CHANGED
@@ -1,5 +1,5 @@
1
1
  import { axios } from "@pipedream/platform";
2
- import { FINE_TUNING_MODEL_OPTIONS } from "./common/constants.mjs";
2
+ import constants from "./common/constants.mjs";
3
3
 
4
4
  export default {
5
5
  type: "app",
@@ -12,7 +12,7 @@ export default {
12
12
  async options() {
13
13
  return (await this.getCompletionModels({})).map((model) => model.id);
14
14
  },
15
- default: "text-davinci-003",
15
+ default: "davinci-002",
16
16
  },
17
17
  chatCompletionModelId: {
18
18
  label: "Model",
@@ -30,30 +30,41 @@ export default {
30
30
  async options() {
31
31
  return (await this.getEmbeddingsModels({})).map((model) => model.id);
32
32
  },
33
- default: "text-embedding-ada-002",
33
+ default: "text-embedding-3-small",
34
34
  },
35
35
  assistantModel: {
36
36
  type: "string",
37
37
  label: "Model",
38
38
  description: "The ID of the model to use for the assistant",
39
39
  async options() {
40
- const models = await this.models({});
41
- return models.map((model) => ({
42
- label: model.id,
43
- value: model.id,
44
- }));
40
+ const models = (await this.models({})).filter(({ id }) => (id.includes("gpt-3.5-turbo") || id.includes("gpt-4-turbo") || id.includes("gpt-4o")) && (id !== "gpt-3.5-turbo-0301"));
41
+ return models.map(({ id }) => id);
45
42
  },
46
43
  },
47
44
  assistant: {
48
45
  type: "string",
49
46
  label: "Assistant",
50
47
  description: "Select an assistant to modify",
51
- async options() {
52
- const assistants = await this.listAssistants({});
53
- return assistants.map((assistant) => ({
54
- label: assistant.name || assistant.id,
55
- value: assistant.id,
56
- }));
48
+ async options({ prevContext }) {
49
+ const params = prevContext?.after
50
+ ? {
51
+ after: prevContext.after,
52
+ }
53
+ : {};
54
+ const {
55
+ data: assistants, last_id: after,
56
+ } = await this.listAssistants({
57
+ params,
58
+ });
59
+ return {
60
+ options: assistants.map((assistant) => ({
61
+ label: assistant.name || assistant.id,
62
+ value: assistant.id,
63
+ })),
64
+ context: {
65
+ after,
66
+ },
67
+ };
57
68
  },
58
69
  },
59
70
  name: {
@@ -66,41 +77,68 @@ export default {
66
77
  type: "string",
67
78
  label: "Description",
68
79
  description: "The description of the assistant.",
80
+ optional: true,
69
81
  },
70
82
  threadId: {
71
83
  type: "string",
72
84
  label: "Thread ID",
73
- description: "The unique identifier for the thread.",
85
+ description: "The unique identifier for the thread. Example: `thread_abc123`. To locate the thread ID, make sure your OpenAI Threads setting (Settings -> Organization/Personal -> General -> Features and capabilities -> Threads) is set to \"Visible to organization owners\" or \"Visible to everyone\". You can then access the list of threads and click on individual threads to reveal their IDs",
74
86
  },
75
87
  runId: {
76
88
  type: "string",
77
89
  label: "Run ID",
78
90
  description: "The unique identifier for the run.",
79
- async options({ threadId }) {
91
+ async options({
92
+ threadId, prevContext,
93
+ }) {
80
94
  if (!threadId) {
81
95
  return [];
82
96
  }
83
- const { data: runs } = await this.listRuns({
97
+ const params = prevContext?.after
98
+ ? {
99
+ after: prevContext.after,
100
+ }
101
+ : {};
102
+ const {
103
+ data: runs, last_id: after,
104
+ } = await this.listRuns({
84
105
  threadId,
106
+ params,
85
107
  });
86
- return runs.map(({ id }) => id);
108
+ return {
109
+ options: runs.map(({ id }) => id),
110
+ context: {
111
+ after,
112
+ },
113
+ };
87
114
  },
88
115
  },
89
116
  stepId: {
90
117
  type: "string",
91
118
  label: "Step ID",
92
119
  description: "The unique identifier for the step.",
93
- },
94
- assistantId: {
95
- type: "string",
96
- label: "Assistant ID",
97
- description: "The unique identifier for the assistant.",
98
- },
99
- model: {
100
- type: "string",
101
- label: "Model",
102
- description: "The ID of the model to use.",
103
- optional: true,
120
+ async options({
121
+ threadId, runId, prevContext,
122
+ }) {
123
+ const params = prevContext?.after
124
+ ? {
125
+ after: prevContext.after,
126
+ }
127
+ : {};
128
+ const {
129
+ data, last_id: after,
130
+ } = await this.listRunSteps({
131
+ threadId,
132
+ runId,
133
+ params,
134
+ });
135
+ return {
136
+ options: data?.map(({ id }) => id) || [],
137
+ context: {
138
+ after,
139
+ },
140
+ };
141
+ },
104
142
  },
105
143
  instructions: {
106
144
  type: "string",
@@ -108,18 +146,6 @@ export default {
108
146
  description: "The system instructions that the assistant uses.",
109
147
  optional: true,
110
148
  },
111
- tools: {
112
- type: "string[]",
113
- label: "Tools",
114
- description: "Each tool should be a valid JSON object. [See the documentation](https://platform.openai.com/docs/api-reference/assistants/createAssistant#assistants-createassistant-tools) for more information. Examples of function tools [can be found here](https://cookbook.openai.com/examples/how_to_call_functions_with_chat_models#basic-concepts).",
115
- optional: true,
116
- },
117
- file_ids: {
118
- type: "string[]",
119
- label: "File IDs",
120
- description: "A list of [file](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant.",
121
- optional: true,
122
- },
123
149
  metadata: {
124
150
  type: "object",
125
151
  label: "Metadata",
@@ -131,11 +157,6 @@ export default {
131
157
  label: "Messages",
132
158
  description: "An array of messages to start the thread with.",
133
159
  },
134
- messageId: {
135
- type: "string",
136
- label: "Message ID",
137
- description: "The ID of the message to modify",
138
- },
139
160
  content: {
140
161
  type: "string",
141
162
  label: "Content",
@@ -145,24 +166,13 @@ export default {
145
166
  type: "string",
146
167
  label: "Role",
147
168
  description: "The role of the entity creating the message",
148
- options: [
149
- {
150
- label: "User",
151
- value: "user",
152
- },
153
- ],
169
+ options: constants.USER_OPTIONS,
154
170
  default: "user",
155
171
  },
156
- fileIds: {
157
- type: "string[]",
158
- label: "File IDs",
159
- description: "List of file IDs to attach to the message",
160
- optional: true,
161
- },
162
172
  toolOutputs: {
163
173
  type: "string[]",
164
174
  label: "Tool Outputs",
165
- description: "The outputs from the tool calls.",
175
+ description: "The outputs from the tool calls. Each object in the array should contain properties `tool_call_id` and `output`.",
166
176
  },
167
177
  limit: {
168
178
  type: "integer",
@@ -174,44 +184,21 @@ export default {
174
184
  type: "string",
175
185
  label: "Order",
176
186
  description: "Sort order by the created_at timestamp of the objects.",
177
- options: [
178
- {
179
- label: "Ascending",
180
- value: "asc",
181
- },
182
- {
183
- label: "Descending",
184
- value: "desc",
185
- },
186
- ],
187
- optional: true,
188
- },
189
- after: {
190
- type: "string",
191
- label: "After",
192
- description: "A cursor for use in pagination to fetch the next set of items.",
193
- optional: true,
194
- },
195
- before: {
196
- type: "string",
197
- label: "Before",
198
- description: "A cursor for use in pagination, identifying the message ID to end the list before",
187
+ options: constants.ORDER_OPTIONS,
199
188
  optional: true,
200
189
  },
201
- file_id: {
190
+ fileId: {
202
191
  type: "string",
203
192
  label: "File ID",
204
193
  description: "The ID of the file to use for this request.",
205
- async options({ prevContext }) {
206
- const files = await this.listFiles({
207
- purpose: prevContext
208
- ? prevContext.purpose
209
- : undefined,
194
+ async options({ purpose }) {
195
+ const { data: files } = await this.listFiles({
196
+ purpose: purpose || undefined,
210
197
  });
211
- return files.map((file) => ({
198
+ return files?.map((file) => ({
212
199
  label: file.filename,
213
200
  value: file.id,
214
- }));
201
+ })) || [];
215
202
  },
216
203
  },
217
204
  file: {
@@ -223,25 +210,19 @@ export default {
223
210
  type: "string",
224
211
  label: "Purpose",
225
212
  description: "The intended purpose of the uploaded file. Use 'fine-tune' for fine-tuning and 'assistants' for assistants and messages.",
226
- options: [
227
- "fine-tune",
228
- "assistants",
229
- ],
213
+ options: constants.PURPOSES,
230
214
  },
231
215
  ttsModel: {
232
216
  type: "string",
233
217
  label: "Model",
234
218
  description: "One of the available [TTS models](https://platform.openai.com/docs/models/tts). `tts-1` is optimized for speed, while `tts-1-hd` is optimized for quality.",
235
- options: [
236
- "tts-1",
237
- "tts-1-hd",
238
- ],
219
+ options: constants.TTS_MODELS,
239
220
  },
240
221
  fineTuningModel: {
241
222
  type: "string",
242
223
  label: "Fine Tuning Model",
243
224
  description: "The name of the model to fine-tune. [See the supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned).",
244
- options: FINE_TUNING_MODEL_OPTIONS,
225
+ options: constants.FINE_TUNING_MODEL_OPTIONS,
245
226
  },
246
227
  input: {
247
228
  type: "string",
@@ -252,25 +233,13 @@ export default {
252
233
  type: "string",
253
234
  label: "Voice",
254
235
  description: "The voice to use when generating the audio.",
255
- options: [
256
- "alloy",
257
- "echo",
258
- "fable",
259
- "onyx",
260
- "nova",
261
- "shimmer",
262
- ],
236
+ options: constants.VOICES,
263
237
  },
264
238
  responseFormat: {
265
239
  type: "string",
266
240
  label: "Response Format",
267
- description: "The format to audio in.",
268
- options: [
269
- "mp3",
270
- "opus",
271
- "aac",
272
- "flac",
273
- ],
241
+ description: "The format to generate audio in. Supported formats are mp3, opus, aac, flac, wav, and pcm.",
242
+ options: constants.AUDIO_RESPONSE_FORMATS,
274
243
  optional: true,
275
244
  },
276
245
  speed: {
@@ -280,11 +249,6 @@ export default {
280
249
  default: "1.0",
281
250
  optional: true,
282
251
  },
283
- trainingFile: {
284
- type: "string",
285
- label: "Training File",
286
- description: "The ID of an uploaded file that contains training data. You can use the **Upload File** action and reference the returned ID here.",
287
- },
288
252
  },
289
253
  methods: {
290
254
  _apiKey() {
@@ -300,22 +264,23 @@ export default {
300
264
  "User-Agent": "@PipedreamHQ/pipedream v1.0",
301
265
  };
302
266
  },
303
- _betaHeaders() {
267
+ _betaHeaders(version = "v1") {
304
268
  return {
305
269
  ...this._commonHeaders(),
306
- "OpenAI-Beta": "assistants=v1",
270
+ "OpenAI-Beta": `assistants=${version}`,
307
271
  };
308
272
  },
309
- async _makeRequest({
273
+ _makeRequest({
310
274
  $ = this,
311
275
  path,
276
+ headers,
312
277
  ...args
313
278
  } = {}) {
314
279
  return axios($, {
315
280
  ...args,
316
281
  url: `${this._baseApiUrl()}${path}`,
317
282
  headers: {
318
- ...args.headers,
283
+ ...headers,
319
284
  ...this._commonHeaders(),
320
285
  },
321
286
  maxBodyLength: Infinity,
@@ -352,18 +317,17 @@ export default {
352
317
  return models.filter((model) => {
353
318
  const { id } = model;
354
319
  return (
355
- id.match(/^(text-embedding-ada-002|.*-(davinci|curie|babbage|ada)-.*-001)$/gm)
320
+ id.match(/^(text-embedding-ada-002|text-embedding-3.*|.*-(davinci|curie|babbage|ada)-.*-001)$/gm)
356
321
  );
357
322
  });
358
323
  },
359
324
  async _makeCompletion({
360
- $, path, args,
325
+ path, ...args
361
326
  }) {
362
327
  const data = await this._makeRequest({
363
- $,
364
328
  path,
365
329
  method: "POST",
366
- data: args,
330
+ ...args,
367
331
  });
368
332
 
369
333
  // For completions, return the text of the first choice at the top-level
@@ -385,45 +349,33 @@ export default {
385
349
  ...data,
386
350
  };
387
351
  },
388
- async createCompletion({
389
- $, args,
390
- }) {
352
+ createCompletion(args = {}) {
391
353
  return this._makeCompletion({
392
- $,
393
354
  path: "/completions",
394
- args,
355
+ ...args,
395
356
  });
396
357
  },
397
- async createChatCompletion({
398
- $, args,
399
- }) {
358
+ createChatCompletion(args = {}) {
400
359
  return this._makeCompletion({
401
- $,
402
360
  path: "/chat/completions",
403
- args,
361
+ ...args,
404
362
  });
405
363
  },
406
- async createImage({
407
- $, args,
408
- }) {
364
+ createImage(args = {}) {
409
365
  return this._makeRequest({
410
- $,
411
366
  path: "/images/generations",
412
- data: args,
413
367
  method: "POST",
368
+ ...args,
414
369
  });
415
370
  },
416
- async createEmbeddings({
417
- $, args,
418
- }) {
371
+ createEmbeddings(args = {}) {
419
372
  return this._makeRequest({
420
- $,
421
373
  path: "/embeddings",
422
- data: args,
423
374
  method: "POST",
375
+ ...args,
424
376
  });
425
377
  },
426
- async createTranscription({
378
+ createTranscription({
427
379
  $, form,
428
380
  }) {
429
381
  return this._makeRequest({
@@ -437,85 +389,41 @@ export default {
437
389
  data: form,
438
390
  });
439
391
  },
440
- async listAssistants({ $ }) {
441
- const { data: assistants } = await this._makeRequest({
442
- $,
392
+ listAssistants(args = {}) {
393
+ return this._makeRequest({
443
394
  path: "/assistants",
444
395
  headers: this._betaHeaders(),
396
+ ...args,
445
397
  });
446
- return assistants;
447
- },
448
- async createAssistant({
449
- $,
450
- model,
451
- name,
452
- description,
453
- instructions,
454
- tools,
455
- file_ids,
456
- metadata,
457
- }) {
398
+ },
399
+ createAssistant(args = {}) {
458
400
  return this._makeRequest({
459
- $,
460
401
  method: "POST",
461
402
  path: "/assistants",
462
- headers: this._betaHeaders(),
463
- data: {
464
- model,
465
- name,
466
- description,
467
- instructions,
468
- tools,
469
- file_ids,
470
- metadata,
471
- },
403
+ headers: this._betaHeaders("v2"),
404
+ ...args,
472
405
  });
473
406
  },
474
- async modifyAssistant({
475
- $,
476
- assistant,
477
- model,
478
- name,
479
- description,
480
- instructions,
481
- tools,
482
- file_ids,
483
- metadata,
407
+ modifyAssistant({
408
+ assistant, ...args
484
409
  }) {
485
410
  return this._makeRequest({
486
- $,
487
411
  method: "POST",
488
412
  path: `/assistants/${assistant}`,
489
- headers: this._betaHeaders(),
490
- data: {
491
- model,
492
- name,
493
- description,
494
- instructions,
495
- tools,
496
- file_ids,
497
- metadata,
498
- },
413
+ headers: this._betaHeaders("v2"),
414
+ ...args,
499
415
  });
500
416
  },
501
- async createThread({
502
- $,
503
- messages,
504
- metadata,
505
- }) {
417
+ createThread(args = {}) {
506
418
  return this._makeRequest({
507
- $,
508
419
  method: "POST",
509
420
  path: "/threads",
510
- headers: this._betaHeaders(),
511
- data: {
512
- messages,
513
- metadata,
514
- },
421
+ headers: this._betaHeaders("v2"),
422
+ ...args,
515
423
  });
516
424
  },
517
- async createMessage({
518
- threadId, content, role, fileIds, metadata,
425
+ createMessage({
426
+ threadId, metadata, ...args
519
427
  }) {
520
428
  const parsedMetadata = metadata
521
429
  ? JSON.parse(metadata)
@@ -525,29 +433,23 @@ export default {
525
433
  path: `/threads/${threadId}/messages`,
526
434
  headers: this._betaHeaders(),
527
435
  data: {
528
- role,
529
- content,
530
- file_ids: fileIds,
436
+ ...args.data,
531
437
  metadata: parsedMetadata,
532
438
  },
439
+ ...args,
533
440
  });
534
441
  },
535
- async listMessages({
536
- threadId, limit, order, after, before,
442
+ listMessages({
443
+ threadId, ...args
537
444
  }) {
538
445
  return this._makeRequest({
539
446
  path: `/threads/${threadId}/messages`,
540
- headers: this._betaHeaders(),
541
- params: {
542
- limit,
543
- order,
544
- after,
545
- before,
546
- },
447
+ headers: this._betaHeaders("v2"),
448
+ ...args,
547
449
  });
548
450
  },
549
- async modifyMessage({
550
- threadId, messageId, metadata,
451
+ modifyMessage({
452
+ threadId, messageId, metadata, ...args
551
453
  }) {
552
454
  const parsedMetadata = metadata
553
455
  ? JSON.parse(metadata)
@@ -559,111 +461,107 @@ export default {
559
461
  data: {
560
462
  metadata: parsedMetadata,
561
463
  },
464
+ ...args,
562
465
  });
563
466
  },
564
- async createRun({
565
- threadId, assistantId, ...opts
467
+ createRun({
468
+ threadId, ...args
566
469
  }) {
567
470
  return this._makeRequest({
568
471
  path: `/threads/${threadId}/runs`,
569
472
  method: "POST",
570
- headers: this._betaHeaders(),
571
- data: {
572
- assistant_id: assistantId,
573
- ...opts,
574
- },
473
+ headers: this._betaHeaders("v2"),
474
+ ...args,
575
475
  });
576
476
  },
577
- async retrieveRun({
578
- threadId, runId,
477
+ retrieveRun({
478
+ threadId, runId, ...args
579
479
  }) {
580
480
  return this._makeRequest({
581
- headers: this._betaHeaders(),
481
+ headers: this._betaHeaders("v2"),
582
482
  path: `/threads/${threadId}/runs/${runId}`,
483
+ ...args,
583
484
  });
584
485
  },
585
- async modifyRun({
586
- threadId, runId, ...opts
486
+ modifyRun({
487
+ threadId, runId, data, ...args
587
488
  }) {
588
489
  return this._makeRequest({
589
490
  path: `/threads/${threadId}/runs/${runId}`,
590
491
  headers: this._betaHeaders(),
591
492
  method: "POST",
592
- data: opts,
493
+ data,
494
+ ...args,
593
495
  });
594
496
  },
595
- async listRuns({
596
- threadId, ...opts
497
+ listRuns({
498
+ threadId, ...args
597
499
  }) {
598
500
  return this._makeRequest({
599
501
  path: `/threads/${threadId}/runs`,
600
502
  headers: this._betaHeaders(),
601
- params: opts,
503
+ ...args,
602
504
  });
603
505
  },
604
- async submitToolOutputs({
605
- threadId, runId, toolOutputs,
506
+ submitToolOutputs({
507
+ threadId, runId, ...args
606
508
  }) {
607
- // Assuming toolOutputs should be parsed as JSON objects
608
- const parsedToolOutputs = toolOutputs.map(JSON.parse);
609
509
  return this._makeRequest({
610
510
  path: `/threads/${threadId}/runs/${runId}/submit_tool_outputs`,
611
511
  headers: this._betaHeaders(),
612
512
  method: "POST",
613
- data: {
614
- tool_outputs: parsedToolOutputs,
615
- },
513
+ ...args,
616
514
  });
617
515
  },
618
- async cancelRun({
619
- threadId, runId,
516
+ cancelRun({
517
+ threadId, runId, ...args
620
518
  }) {
621
519
  return this._makeRequest({
622
520
  path: `/threads/${threadId}/runs/${runId}/cancel`,
623
- headers: this._betaHeaders(),
521
+ headers: this._betaHeaders("v2"),
624
522
  method: "POST",
523
+ ...args,
625
524
  });
626
525
  },
627
- async createThreadAndRun({
628
- assistantId, ...opts
629
- }) {
526
+ createThreadAndRun(args = {}) {
630
527
  return this._makeRequest({
631
528
  path: "/threads/runs",
632
- headers: this._betaHeaders(),
529
+ headers: this._betaHeaders("v2"),
633
530
  method: "POST",
634
- data: {
635
- assistant_id: assistantId,
636
- ...opts,
637
- },
531
+ ...args,
638
532
  });
639
533
  },
640
- async retrieveRunStep({
641
- threadId, runId, stepId,
534
+ retrieveRunStep({
535
+ threadId, runId, stepId, ...args
642
536
  }) {
643
537
  return this._makeRequest({
644
538
  path: `/threads/${threadId}/runs/${runId}/steps/${stepId}`,
645
539
  headers: this._betaHeaders(),
540
+ ...args,
646
541
  });
647
542
  },
648
- async listRunSteps({
649
- threadId, runId, ...opts
543
+ listRunSteps({
544
+ threadId, runId, ...args
650
545
  }) {
651
546
  return this._makeRequest({
652
547
  path: `/threads/${threadId}/runs/${runId}/steps`,
653
548
  headers: this._betaHeaders(),
654
- params: opts,
549
+ ...args,
655
550
  });
656
551
  },
657
- async listFiles({ purpose } = {}) {
552
+ listFiles({
553
+ purpose, ...args
554
+ } = {}) {
658
555
  return this._makeRequest({
659
556
  path: "/files",
660
557
  headers: this._betaHeaders(),
661
558
  params: {
662
559
  purpose,
663
560
  },
561
+ ...args,
664
562
  });
665
563
  },
666
- async uploadFile(args) {
564
+ uploadFile(args) {
667
565
  return this._makeRequest({
668
566
  method: "POST",
669
567
  path: "/files",
@@ -674,44 +572,107 @@ export default {
674
572
  },
675
573
  });
676
574
  },
677
- async deleteFile({ file_id }) {
575
+ deleteFile({
576
+ file_id, ...args
577
+ }) {
678
578
  return this._makeRequest({
679
579
  method: "DELETE",
680
580
  headers: this._betaHeaders(),
681
581
  path: `/files/${file_id}`,
582
+ ...args,
682
583
  });
683
584
  },
684
- async retrieveFile({ file_id }) {
585
+ retrieveFile({
586
+ file_id, ...args
587
+ }) {
685
588
  return this._makeRequest({
686
589
  headers: this._betaHeaders(),
687
590
  path: `/files/${file_id}`,
591
+ ...args,
688
592
  });
689
593
  },
690
- async retrieveFileContent({ file_id }) {
594
+ retrieveFileContent({
595
+ file_id, ...args
596
+ }) {
691
597
  return this._makeRequest({
692
- headers: this._betaHeaders(),
598
+ headers: this._betaHeaders("v2"),
693
599
  path: `/files/${file_id}/content`,
600
+ ...args,
694
601
  });
695
602
  },
696
- async listFineTuningJobs(args) {
603
+ listFineTuningJobs(args = {}) {
697
604
  return this._makeRequest({
698
605
  path: "/fine_tuning/jobs",
699
606
  ...args,
700
607
  });
701
608
  },
702
- async createSpeech(args) {
609
+ createSpeech(args = {}) {
703
610
  return this._makeRequest({
704
611
  path: "/audio/speech",
705
612
  method: "POST",
706
613
  ...args,
707
614
  });
708
615
  },
709
- async createFineTuningJob(args) {
616
+ createFineTuningJob(args = {}) {
710
617
  return this._makeRequest({
711
618
  path: "/fine_tuning/jobs",
712
619
  method: "POST",
713
620
  ...args,
714
621
  });
715
622
  },
623
+ listVectorStores(args = {}) {
624
+ return this._makeRequest({
625
+ path: "/vector_stores",
626
+ headers: this._betaHeaders("v2"),
627
+ ...args,
628
+ });
629
+ },
630
+ createModeration(args = {}) {
631
+ return this._makeRequest({
632
+ method: "POST",
633
+ path: "/moderations",
634
+ ...args,
635
+ });
636
+ },
637
+ createBatch(args = {}) {
638
+ return this._makeRequest({
639
+ method: "POST",
640
+ path: "/batches",
641
+ ...args,
642
+ });
643
+ },
644
+ listBatches(args = {}) {
645
+ return this._makeRequest({
646
+ path: "/batches",
647
+ ...args,
648
+ });
649
+ },
650
+ async *paginate({
651
+ resourceFn,
652
+ args = {},
653
+ max,
654
+ }) {
655
+ args = {
656
+ ...args,
657
+ params: {
658
+ ...args.params,
659
+ },
660
+ };
661
+ let hasMore, count = 0;
662
+ do {
663
+ const {
664
+ data, last_id: after,
665
+ } = await resourceFn(args);
666
+ for (const item of data) {
667
+ yield item;
668
+ count++;
669
+ if (max && count >= max) {
670
+ return;
671
+ }
672
+ }
673
+ hasMore = data?.length;
674
+ args.params.after = after;
675
+ } while (hasMore);
676
+ },
716
677
  },
717
678
  };