openai 2.0.2 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/api.ts CHANGED
@@ -4,7 +4,7 @@
4
4
  * OpenAI API
5
5
  * APIs for sampling from and fine-tuning language models
6
6
  *
7
- * The version of the OpenAPI document: 1.0.0
7
+ * The version of the OpenAPI document: 1.0.5
8
8
  *
9
9
  *
10
10
  * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
@@ -28,29 +28,29 @@ import { BASE_PATH, COLLECTION_FORMATS, RequestArgs, BaseAPI, RequiredError } fr
28
28
  */
29
29
  export interface CreateAnswerRequest {
30
30
  /**
31
- * ID of the engine to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
31
+ * ID of the model to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
32
32
  * @type {string}
33
33
  * @memberof CreateAnswerRequest
34
34
  */
35
- 'model'?: string;
35
+ 'model': string;
36
36
  /**
37
37
  * Question to get answered.
38
38
  * @type {string}
39
39
  * @memberof CreateAnswerRequest
40
40
  */
41
- 'question'?: string;
41
+ 'question': string;
42
42
  /**
43
43
  * List of (question, answer) pairs that will help steer the model towards the tone and answer format you\'d like. We recommend adding 2 to 3 examples.
44
44
  * @type {Array<any>}
45
45
  * @memberof CreateAnswerRequest
46
46
  */
47
- 'examples'?: Array<any>;
47
+ 'examples': Array<any>;
48
48
  /**
49
49
  * A text snippet containing the contextual information used to generate the answers for the `examples` you provide.
50
50
  * @type {string}
51
51
  * @memberof CreateAnswerRequest
52
52
  */
53
- 'examples_context'?: string;
53
+ 'examples_context': string;
54
54
  /**
55
55
  * List of documents from which the answer for the input `question` should be derived. If this is an empty list, the question will be answered based on the question-answer examples. You should specify either `documents` or a `file`, but not both.
56
56
  * @type {Array<string>}
@@ -64,7 +64,7 @@ export interface CreateAnswerRequest {
64
64
  */
65
65
  'file'?: string | null;
66
66
  /**
67
- * ID of the engine to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`.
67
+ * ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`.
68
68
  * @type {string}
69
69
  * @memberof CreateAnswerRequest
70
70
  */
@@ -94,11 +94,11 @@ export interface CreateAnswerRequest {
94
94
  */
95
95
  'max_tokens'?: number | null;
96
96
  /**
97
- * Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
98
- * @type {string | Array<string>}
97
+ *
98
+ * @type {CreateAnswerRequestStop}
99
99
  * @memberof CreateAnswerRequest
100
100
  */
101
- 'stop'?: string | Array<string> | null;
101
+ 'stop'?: CreateAnswerRequestStop | null;
102
102
  /**
103
103
  * How many answers to generate for each question.
104
104
  * @type {number}
@@ -129,7 +129,20 @@ export interface CreateAnswerRequest {
129
129
  * @memberof CreateAnswerRequest
130
130
  */
131
131
  'expand'?: Array<any> | null;
132
+ /**
133
+ * A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse.
134
+ * @type {string}
135
+ * @memberof CreateAnswerRequest
136
+ */
137
+ 'user'?: string;
132
138
  }
139
+ /**
140
+ * @type CreateAnswerRequestStop
141
+ * Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
142
+ * @export
143
+ */
144
+ export type CreateAnswerRequestStop = Array<string> | string;
145
+
133
146
  /**
134
147
  *
135
148
  * @export
@@ -168,27 +181,27 @@ export interface CreateAnswerResponse {
168
181
  'answers'?: Array<string>;
169
182
  /**
170
183
  *
171
- * @type {Array<CreateAnswerResponseSelectedDocuments>}
184
+ * @type {Array<CreateAnswerResponseSelectedDocumentsInner>}
172
185
  * @memberof CreateAnswerResponse
173
186
  */
174
- 'selected_documents'?: Array<CreateAnswerResponseSelectedDocuments>;
187
+ 'selected_documents'?: Array<CreateAnswerResponseSelectedDocumentsInner>;
175
188
  }
176
189
  /**
177
190
  *
178
191
  * @export
179
- * @interface CreateAnswerResponseSelectedDocuments
192
+ * @interface CreateAnswerResponseSelectedDocumentsInner
180
193
  */
181
- export interface CreateAnswerResponseSelectedDocuments {
194
+ export interface CreateAnswerResponseSelectedDocumentsInner {
182
195
  /**
183
196
  *
184
197
  * @type {number}
185
- * @memberof CreateAnswerResponseSelectedDocuments
198
+ * @memberof CreateAnswerResponseSelectedDocumentsInner
186
199
  */
187
200
  'document'?: number;
188
201
  /**
189
202
  *
190
203
  * @type {string}
191
- * @memberof CreateAnswerResponseSelectedDocuments
204
+ * @memberof CreateAnswerResponseSelectedDocumentsInner
192
205
  */
193
206
  'text'?: string;
194
207
  }
@@ -199,17 +212,17 @@ export interface CreateAnswerResponseSelectedDocuments {
199
212
  */
200
213
  export interface CreateClassificationRequest {
201
214
  /**
202
- * ID of the engine to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
215
+ * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
203
216
  * @type {string}
204
217
  * @memberof CreateClassificationRequest
205
218
  */
206
- 'model'?: string;
219
+ 'model': string;
207
220
  /**
208
221
  * Query to be classified.
209
222
  * @type {string}
210
223
  * @memberof CreateClassificationRequest
211
224
  */
212
- 'query'?: string;
225
+ 'query': string;
213
226
  /**
214
227
  * A list of examples with labels, in the following format: `[[\"The movie is so interesting.\", \"Positive\"], [\"It is quite boring.\", \"Negative\"], ...]` All the label strings will be normalized to be capitalized. You should specify either `examples` or `file`, but not both.
215
228
  * @type {Array<any>}
@@ -229,7 +242,7 @@ export interface CreateClassificationRequest {
229
242
  */
230
243
  'labels'?: Array<string> | null;
231
244
  /**
232
- * ID of the engine to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`.
245
+ * ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`.
233
246
  * @type {string}
234
247
  * @memberof CreateClassificationRequest
235
248
  */
@@ -276,6 +289,12 @@ export interface CreateClassificationRequest {
276
289
  * @memberof CreateClassificationRequest
277
290
  */
278
291
  'expand'?: Array<any> | null;
292
+ /**
293
+ * A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse.
294
+ * @type {string}
295
+ * @memberof CreateClassificationRequest
296
+ */
297
+ 'user'?: string;
279
298
  }
280
299
  /**
281
300
  *
@@ -315,154 +334,62 @@ export interface CreateClassificationResponse {
315
334
  'label'?: string;
316
335
  /**
317
336
  *
318
- * @type {Array<CreateClassificationResponseSelectedExamples>}
337
+ * @type {Array<CreateClassificationResponseSelectedExamplesInner>}
319
338
  * @memberof CreateClassificationResponse
320
339
  */
321
- 'selected_examples'?: Array<CreateClassificationResponseSelectedExamples>;
340
+ 'selected_examples'?: Array<CreateClassificationResponseSelectedExamplesInner>;
322
341
  }
323
342
  /**
324
343
  *
325
344
  * @export
326
- * @interface CreateClassificationResponseSelectedExamples
345
+ * @interface CreateClassificationResponseSelectedExamplesInner
327
346
  */
328
- export interface CreateClassificationResponseSelectedExamples {
347
+ export interface CreateClassificationResponseSelectedExamplesInner {
329
348
  /**
330
349
  *
331
350
  * @type {number}
332
- * @memberof CreateClassificationResponseSelectedExamples
351
+ * @memberof CreateClassificationResponseSelectedExamplesInner
333
352
  */
334
353
  'document'?: number;
335
354
  /**
336
355
  *
337
356
  * @type {string}
338
- * @memberof CreateClassificationResponseSelectedExamples
357
+ * @memberof CreateClassificationResponseSelectedExamplesInner
339
358
  */
340
359
  'text'?: string;
341
360
  /**
342
361
  *
343
362
  * @type {string}
344
- * @memberof CreateClassificationResponseSelectedExamples
363
+ * @memberof CreateClassificationResponseSelectedExamplesInner
345
364
  */
346
365
  'label'?: string;
347
366
  }
348
367
  /**
349
368
  *
350
369
  * @export
351
- * @interface CreateCompletionFromModelRequest
370
+ * @interface CreateCompletionRequest
352
371
  */
353
- export interface CreateCompletionFromModelRequest {
354
- /**
355
- * The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.
356
- * @type {string | Array<string> | Array<number> | Array<any>}
357
- * @memberof CreateCompletionFromModelRequest
358
- */
359
- 'prompt'?: string | Array<string> | Array<number> | Array<any> | null;
360
- /**
361
- * The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except `code-davinci-001`, which supports 4096).
362
- * @type {number}
363
- * @memberof CreateCompletionFromModelRequest
364
- */
365
- 'max_tokens'?: number | null;
366
- /**
367
- * What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. We generally recommend altering this or `top_p` but not both.
368
- * @type {number}
369
- * @memberof CreateCompletionFromModelRequest
370
- */
371
- 'temperature'?: number | null;
372
- /**
373
- * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
374
- * @type {number}
375
- * @memberof CreateCompletionFromModelRequest
376
- */
377
- 'top_p'?: number | null;
378
- /**
379
- * How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
380
- * @type {number}
381
- * @memberof CreateCompletionFromModelRequest
382
- */
383
- 'n'?: number | null;
384
- /**
385
- * Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
386
- * @type {boolean}
387
- * @memberof CreateCompletionFromModelRequest
388
- */
389
- 'stream'?: boolean | null;
390
- /**
391
- * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. If you need more than this, please contact support@openai.com and describe your use case.
392
- * @type {number}
393
- * @memberof CreateCompletionFromModelRequest
394
- */
395
- 'logprobs'?: number | null;
396
- /**
397
- * Echo back the prompt in addition to the completion
398
- * @type {boolean}
399
- * @memberof CreateCompletionFromModelRequest
400
- */
401
- 'echo'?: boolean | null;
402
- /**
403
- * Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
404
- * @type {string | Array<string>}
405
- * @memberof CreateCompletionFromModelRequest
406
- */
407
- 'stop'?: string | Array<string> | null;
408
- /**
409
- * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
410
- * @type {number}
411
- * @memberof CreateCompletionFromModelRequest
412
- */
413
- 'presence_penalty'?: number | null;
414
- /**
415
- * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
416
- * @type {number}
417
- * @memberof CreateCompletionFromModelRequest
418
- */
419
- 'frequency_penalty'?: number | null;
420
- /**
421
- * Generates `best_of` completions server-side and returns the \"best\" (the one with the lowest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
422
- * @type {number}
423
- * @memberof CreateCompletionFromModelRequest
424
- */
425
- 'best_of'?: number | null;
426
- /**
427
- * Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.
428
- * @type {object}
429
- * @memberof CreateCompletionFromModelRequest
430
- */
431
- 'logit_bias'?: object | null;
372
+ export interface CreateCompletionRequest {
432
373
  /**
433
- * ID of the model to use for completion.
374
+ * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
434
375
  * @type {string}
435
- * @memberof CreateCompletionFromModelRequest
376
+ * @memberof CreateCompletionRequest
436
377
  */
437
- 'model'?: string;
438
- }
439
- /**
440
- *
441
- * @export
442
- * @interface CreateCompletionFromModelRequestAllOf
443
- */
444
- export interface CreateCompletionFromModelRequestAllOf {
378
+ 'model': string;
445
379
  /**
446
- * ID of the model to use for completion.
447
- * @type {string}
448
- * @memberof CreateCompletionFromModelRequestAllOf
380
+ *
381
+ * @type {CreateCompletionRequestPrompt}
382
+ * @memberof CreateCompletionRequest
449
383
  */
450
- 'model'?: string;
451
- }
452
- /**
453
- *
454
- * @export
455
- * @interface CreateCompletionRequest
456
- */
457
- export interface CreateCompletionRequest {
384
+ 'prompt'?: CreateCompletionRequestPrompt | null;
458
385
  /**
459
- * The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.
460
- * @type {string | Array<string> | Array<number> | Array<any>}
386
+ * The suffix that comes after a completion of inserted text.
387
+ * @type {string}
461
388
  * @memberof CreateCompletionRequest
462
389
  */
463
- 'prompt'?: string | Array<string> | Array<number> | Array<any> | null;
390
+ 'suffix'?: string | null;
464
391
  /**
465
- * The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except `code-davinci-001`, which supports 4096).
392
+ * The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).
466
393
  * @type {number}
467
394
  * @memberof CreateCompletionRequest
468
395
  */
@@ -504,11 +431,11 @@ export interface CreateCompletionRequest {
504
431
  */
505
432
  'echo'?: boolean | null;
506
433
  /**
507
- * Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
508
- * @type {string | Array<string>}
434
+ *
435
+ * @type {CreateCompletionRequestStop}
509
436
  * @memberof CreateCompletionRequest
510
437
  */
511
- 'stop'?: string | Array<string> | null;
438
+ 'stop'?: CreateCompletionRequestStop | null;
512
439
  /**
513
440
  * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
514
441
  * @type {number}
@@ -522,7 +449,7 @@ export interface CreateCompletionRequest {
522
449
  */
523
450
  'frequency_penalty'?: number | null;
524
451
  /**
525
- * Generates `best_of` completions server-side and returns the \"best\" (the one with the lowest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
452
+ * Generates `best_of` completions server-side and returns the \"best\" (the one with the highest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
526
453
  * @type {number}
527
454
  * @memberof CreateCompletionRequest
528
455
  */
@@ -533,7 +460,27 @@ export interface CreateCompletionRequest {
533
460
  * @memberof CreateCompletionRequest
534
461
  */
535
462
  'logit_bias'?: object | null;
463
+ /**
464
+ * A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse.
465
+ * @type {string}
466
+ * @memberof CreateCompletionRequest
467
+ */
468
+ 'user'?: string;
536
469
  }
470
+ /**
471
+ * @type CreateCompletionRequestPrompt
472
+ * The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.
473
+ * @export
474
+ */
475
+ export type CreateCompletionRequestPrompt = Array<any> | Array<number> | Array<string> | string;
476
+
477
+ /**
478
+ * @type CreateCompletionRequestStop
479
+ * Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
480
+ * @export
481
+ */
482
+ export type CreateCompletionRequestStop = Array<string> | string;
483
+
537
484
  /**
538
485
  *
539
486
  * @export
@@ -566,73 +513,153 @@ export interface CreateCompletionResponse {
566
513
  'model'?: string;
567
514
  /**
568
515
  *
569
- * @type {Array<CreateCompletionResponseChoices>}
516
+ * @type {Array<CreateCompletionResponseChoicesInner>}
570
517
  * @memberof CreateCompletionResponse
571
518
  */
572
- 'choices'?: Array<CreateCompletionResponseChoices>;
519
+ 'choices'?: Array<CreateCompletionResponseChoicesInner>;
573
520
  }
574
521
  /**
575
522
  *
576
523
  * @export
577
- * @interface CreateCompletionResponseChoices
524
+ * @interface CreateCompletionResponseChoicesInner
578
525
  */
579
- export interface CreateCompletionResponseChoices {
526
+ export interface CreateCompletionResponseChoicesInner {
580
527
  /**
581
528
  *
582
529
  * @type {string}
583
- * @memberof CreateCompletionResponseChoices
530
+ * @memberof CreateCompletionResponseChoicesInner
584
531
  */
585
532
  'text'?: string;
586
533
  /**
587
534
  *
588
535
  * @type {number}
589
- * @memberof CreateCompletionResponseChoices
536
+ * @memberof CreateCompletionResponseChoicesInner
590
537
  */
591
538
  'index'?: number;
592
539
  /**
593
540
  *
594
- * @type {CreateCompletionResponseLogprobs}
595
- * @memberof CreateCompletionResponseChoices
541
+ * @type {CreateCompletionResponseChoicesInnerLogprobs}
542
+ * @memberof CreateCompletionResponseChoicesInner
596
543
  */
597
- 'logprobs'?: CreateCompletionResponseLogprobs | null;
544
+ 'logprobs'?: CreateCompletionResponseChoicesInnerLogprobs | null;
598
545
  /**
599
546
  *
600
547
  * @type {string}
601
- * @memberof CreateCompletionResponseChoices
548
+ * @memberof CreateCompletionResponseChoicesInner
602
549
  */
603
550
  'finish_reason'?: string;
604
551
  }
605
552
  /**
606
553
  *
607
554
  * @export
608
- * @interface CreateCompletionResponseLogprobs
555
+ * @interface CreateCompletionResponseChoicesInnerLogprobs
609
556
  */
610
- export interface CreateCompletionResponseLogprobs {
557
+ export interface CreateCompletionResponseChoicesInnerLogprobs {
611
558
  /**
612
559
  *
613
560
  * @type {Array<string>}
614
- * @memberof CreateCompletionResponseLogprobs
561
+ * @memberof CreateCompletionResponseChoicesInnerLogprobs
615
562
  */
616
563
  'tokens'?: Array<string>;
617
564
  /**
618
565
  *
619
566
  * @type {Array<number>}
620
- * @memberof CreateCompletionResponseLogprobs
567
+ * @memberof CreateCompletionResponseChoicesInnerLogprobs
621
568
  */
622
569
  'token_logprobs'?: Array<number>;
623
570
  /**
624
571
  *
625
572
  * @type {Array<object>}
626
- * @memberof CreateCompletionResponseLogprobs
573
+ * @memberof CreateCompletionResponseChoicesInnerLogprobs
627
574
  */
628
575
  'top_logprobs'?: Array<object>;
629
576
  /**
630
577
  *
631
578
  * @type {Array<number>}
632
- * @memberof CreateCompletionResponseLogprobs
579
+ * @memberof CreateCompletionResponseChoicesInnerLogprobs
633
580
  */
634
581
  'text_offset'?: Array<number>;
635
582
  }
583
+ /**
584
+ *
585
+ * @export
586
+ * @interface CreateEditRequest
587
+ */
588
+ export interface CreateEditRequest {
589
+ /**
590
+ * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
591
+ * @type {string}
592
+ * @memberof CreateEditRequest
593
+ */
594
+ 'model': string;
595
+ /**
596
+ * The input text to use as a starting point for the edit.
597
+ * @type {string}
598
+ * @memberof CreateEditRequest
599
+ */
600
+ 'input'?: string | null;
601
+ /**
602
+ * The instruction that tells the model how to edit the prompt.
603
+ * @type {string}
604
+ * @memberof CreateEditRequest
605
+ */
606
+ 'instruction': string;
607
+ /**
608
+ * How many edits to generate for the input and instruction.
609
+ * @type {number}
610
+ * @memberof CreateEditRequest
611
+ */
612
+ 'n'?: number | null;
613
+ /**
614
+ * What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. We generally recommend altering this or `top_p` but not both.
615
+ * @type {number}
616
+ * @memberof CreateEditRequest
617
+ */
618
+ 'temperature'?: number | null;
619
+ /**
620
+ * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
621
+ * @type {number}
622
+ * @memberof CreateEditRequest
623
+ */
624
+ 'top_p'?: number | null;
625
+ }
626
+ /**
627
+ *
628
+ * @export
629
+ * @interface CreateEditResponse
630
+ */
631
+ export interface CreateEditResponse {
632
+ /**
633
+ *
634
+ * @type {string}
635
+ * @memberof CreateEditResponse
636
+ */
637
+ 'id'?: string;
638
+ /**
639
+ *
640
+ * @type {string}
641
+ * @memberof CreateEditResponse
642
+ */
643
+ 'object'?: string;
644
+ /**
645
+ *
646
+ * @type {number}
647
+ * @memberof CreateEditResponse
648
+ */
649
+ 'created'?: number;
650
+ /**
651
+ *
652
+ * @type {string}
653
+ * @memberof CreateEditResponse
654
+ */
655
+ 'model'?: string;
656
+ /**
657
+ *
658
+ * @type {Array<CreateCompletionResponseChoicesInner>}
659
+ * @memberof CreateEditResponse
660
+ */
661
+ 'choices'?: Array<CreateCompletionResponseChoicesInner>;
662
+ }
636
663
  /**
637
664
  *
638
665
  * @export
@@ -640,12 +667,31 @@ export interface CreateCompletionResponseLogprobs {
640
667
  */
641
668
  export interface CreateEmbeddingRequest {
642
669
  /**
643
- * Input text to get embeddings for, encoded as a string or array of tokens. To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed 2048 tokens in length. We suggest replacing newlines (`\\n`) in your input with a single space, as we have observed inferior results when newlines are present.
644
- * @type {string | Array<string> | Array<number> | Array<any>}
670
+ * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
671
+ * @type {string}
672
+ * @memberof CreateEmbeddingRequest
673
+ */
674
+ 'model': string;
675
+ /**
676
+ *
677
+ * @type {CreateEmbeddingRequestInput}
645
678
  * @memberof CreateEmbeddingRequest
646
679
  */
647
- 'input'?: string | Array<string> | Array<number> | Array<any>;
680
+ 'input': CreateEmbeddingRequestInput;
681
+ /**
682
+ * A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse.
683
+ * @type {string}
684
+ * @memberof CreateEmbeddingRequest
685
+ */
686
+ 'user'?: string;
648
687
  }
688
+ /**
689
+ * @type CreateEmbeddingRequestInput
690
+ * Input text to get embeddings for, encoded as a string or array of tokens. To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed 2048 tokens in length. Unless your are embedding code, we suggest replacing newlines (`\\n`) in your input with a single space, as we have observed inferior results when newlines are present.
691
+ * @export
692
+ */
693
+ export type CreateEmbeddingRequestInput = Array<any> | Array<number> | Array<string> | string;
694
+
649
695
  /**
650
696
  *
651
697
  * @export
@@ -666,33 +712,33 @@ export interface CreateEmbeddingResponse {
666
712
  'model'?: string;
667
713
  /**
668
714
  *
669
- * @type {Array<CreateEmbeddingResponseData>}
715
+ * @type {Array<CreateEmbeddingResponseDataInner>}
670
716
  * @memberof CreateEmbeddingResponse
671
717
  */
672
- 'data'?: Array<CreateEmbeddingResponseData>;
718
+ 'data'?: Array<CreateEmbeddingResponseDataInner>;
673
719
  }
674
720
  /**
675
721
  *
676
722
  * @export
677
- * @interface CreateEmbeddingResponseData
723
+ * @interface CreateEmbeddingResponseDataInner
678
724
  */
679
- export interface CreateEmbeddingResponseData {
725
+ export interface CreateEmbeddingResponseDataInner {
680
726
  /**
681
727
  *
682
728
  * @type {number}
683
- * @memberof CreateEmbeddingResponseData
729
+ * @memberof CreateEmbeddingResponseDataInner
684
730
  */
685
731
  'index'?: number;
686
732
  /**
687
733
  *
688
734
  * @type {string}
689
- * @memberof CreateEmbeddingResponseData
735
+ * @memberof CreateEmbeddingResponseDataInner
690
736
  */
691
737
  'object'?: string;
692
738
  /**
693
739
  *
694
740
  * @type {Array<number>}
695
- * @memberof CreateEmbeddingResponseData
741
+ * @memberof CreateEmbeddingResponseDataInner
696
742
  */
697
743
  'embedding'?: Array<number>;
698
744
  }
@@ -707,7 +753,7 @@ export interface CreateFineTuneRequest {
707
753
  * @type {string}
708
754
  * @memberof CreateFineTuneRequest
709
755
  */
710
- 'training_file'?: string;
756
+ 'training_file': string;
711
757
  /**
712
758
  * The ID of an uploaded file that contains validation data. If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in the [fine-tuning results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). Your train and validation data should be mutually exclusive. Your dataset must be formatted as a JSONL file, where each validation example is a JSON object with the keys \"prompt\" and \"completion\". Additionally, you must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details.
713
759
  * @type {string}
@@ -715,7 +761,7 @@ export interface CreateFineTuneRequest {
715
761
  */
716
762
  'validation_file'?: string | null;
717
763
  /**
718
- * The name of the base model to fine-tune. You can select one of \"ada\", \"babbage\", \"curie\", or \"davinci\". To learn more about these models, see the [Engines](https://beta.openai.com/docs/engines) documentation.
764
+ * The name of the base model to fine-tune. You can select one of \"ada\", \"babbage\", \"curie\", or \"davinci\". To learn more about these models, see the [Models](https://beta.openai.com/docs/models) documentation.
719
765
  * @type {string}
720
766
  * @memberof CreateFineTuneRequest
721
767
  */
@@ -768,6 +814,12 @@ export interface CreateFineTuneRequest {
768
814
  * @memberof CreateFineTuneRequest
769
815
  */
770
816
  'classification_betas'?: Array<number> | null;
817
+ /**
818
+ * A string of up to 40 characters that will be added to your fine-tuned model name. For example, a `suffix` of \"custom-model-name\" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`.
819
+ * @type {string}
820
+ * @memberof CreateFineTuneRequest
821
+ */
822
+ 'suffix'?: string | null;
771
823
  }
772
824
  /**
773
825
  *
@@ -775,6 +827,12 @@ export interface CreateFineTuneRequest {
775
827
  * @interface CreateSearchRequest
776
828
  */
777
829
  export interface CreateSearchRequest {
830
+ /**
831
+ * Query to search against the documents.
832
+ * @type {string}
833
+ * @memberof CreateSearchRequest
834
+ */
835
+ 'query': string;
778
836
  /**
779
837
  * Up to 200 documents to search over, provided as a list of strings. The maximum document length (in tokens) is 2034 minus the number of tokens in the query. You should specify either `documents` or a `file`, but not both.
780
838
  * @type {Array<string>}
@@ -787,12 +845,6 @@ export interface CreateSearchRequest {
787
845
  * @memberof CreateSearchRequest
788
846
  */
789
847
  'file'?: string | null;
790
- /**
791
- * Query to search against the documents.
792
- * @type {string}
793
- * @memberof CreateSearchRequest
794
- */
795
- 'query'?: string;
796
848
  /**
797
849
  * The maximum number of documents to be re-ranked and returned by search. This flag only takes effect when `file` is set.
798
850
  * @type {number}
@@ -805,6 +857,12 @@ export interface CreateSearchRequest {
805
857
  * @memberof CreateSearchRequest
806
858
  */
807
859
  'return_metadata'?: boolean | null;
860
+ /**
861
+ * A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse.
862
+ * @type {string}
863
+ * @memberof CreateSearchRequest
864
+ */
865
+ 'user'?: string;
808
866
  }
809
867
  /**
810
868
  *
@@ -826,33 +884,33 @@ export interface CreateSearchResponse {
826
884
  'model'?: string;
827
885
  /**
828
886
  *
829
- * @type {Array<CreateSearchResponseData>}
887
+ * @type {Array<CreateSearchResponseDataInner>}
830
888
  * @memberof CreateSearchResponse
831
889
  */
832
- 'data'?: Array<CreateSearchResponseData>;
890
+ 'data'?: Array<CreateSearchResponseDataInner>;
833
891
  }
834
892
  /**
835
893
  *
836
894
  * @export
837
- * @interface CreateSearchResponseData
895
+ * @interface CreateSearchResponseDataInner
838
896
  */
839
- export interface CreateSearchResponseData {
897
+ export interface CreateSearchResponseDataInner {
840
898
  /**
841
899
  *
842
900
  * @type {string}
843
- * @memberof CreateSearchResponseData
901
+ * @memberof CreateSearchResponseDataInner
844
902
  */
845
903
  'object'?: string;
846
904
  /**
847
905
  *
848
906
  * @type {number}
849
- * @memberof CreateSearchResponseData
907
+ * @memberof CreateSearchResponseDataInner
850
908
  */
851
909
  'document'?: number;
852
910
  /**
853
911
  *
854
912
  * @type {number}
855
- * @memberof CreateSearchResponseData
913
+ * @memberof CreateSearchResponseDataInner
856
914
  */
857
915
  'score'?: number;
858
916
  }
@@ -881,6 +939,31 @@ export interface DeleteFileResponse {
881
939
  */
882
940
  'deleted'?: boolean;
883
941
  }
942
+ /**
943
+ *
944
+ * @export
945
+ * @interface DeleteModelResponse
946
+ */
947
+ export interface DeleteModelResponse {
948
+ /**
949
+ *
950
+ * @type {string}
951
+ * @memberof DeleteModelResponse
952
+ */
953
+ 'id'?: string;
954
+ /**
955
+ *
956
+ * @type {string}
957
+ * @memberof DeleteModelResponse
958
+ */
959
+ 'object'?: string;
960
+ /**
961
+ *
962
+ * @type {boolean}
963
+ * @memberof DeleteModelResponse
964
+ */
965
+ 'deleted'?: boolean;
966
+ }
884
967
  /**
885
968
  *
886
969
  * @export
@@ -1104,6 +1187,56 @@ export interface ListFineTunesResponse {
1104
1187
  */
1105
1188
  'data'?: Array<FineTune>;
1106
1189
  }
1190
+ /**
1191
+ *
1192
+ * @export
1193
+ * @interface ListModelsResponse
1194
+ */
1195
+ export interface ListModelsResponse {
1196
+ /**
1197
+ *
1198
+ * @type {string}
1199
+ * @memberof ListModelsResponse
1200
+ */
1201
+ 'object'?: string;
1202
+ /**
1203
+ *
1204
+ * @type {Array<Model>}
1205
+ * @memberof ListModelsResponse
1206
+ */
1207
+ 'data'?: Array<Model>;
1208
+ }
1209
+ /**
1210
+ *
1211
+ * @export
1212
+ * @interface Model
1213
+ */
1214
+ export interface Model {
1215
+ /**
1216
+ *
1217
+ * @type {string}
1218
+ * @memberof Model
1219
+ */
1220
+ 'id'?: string;
1221
+ /**
1222
+ *
1223
+ * @type {string}
1224
+ * @memberof Model
1225
+ */
1226
+ 'object'?: string;
1227
+ /**
1228
+ *
1229
+ * @type {number}
1230
+ * @memberof Model
1231
+ */
1232
+ 'created'?: number;
1233
+ /**
1234
+ *
1235
+ * @type {string}
1236
+ * @memberof Model
1237
+ */
1238
+ 'owned_by'?: string;
1239
+ }
1107
1240
  /**
1108
1241
  *
1109
1242
  * @export
@@ -1205,6 +1338,7 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
1205
1338
  * @summary Answers the specified question using the provided documents and examples. The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions).
1206
1339
  * @param {CreateAnswerRequest} createAnswerRequest
1207
1340
  * @param {*} [options] Override http request option.
1341
+ * @deprecated
1208
1342
  * @throws {RequiredError}
1209
1343
  */
1210
1344
  createAnswer: async (createAnswerRequest: CreateAnswerRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
@@ -1241,6 +1375,7 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
1241
1375
  * @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
1242
1376
  * @param {CreateClassificationRequest} createClassificationRequest
1243
1377
  * @param {*} [options] Override http request option.
1378
+ * @deprecated
1244
1379
  * @throws {RequiredError}
1245
1380
  */
1246
1381
  createClassification: async (createClassificationRequest: CreateClassificationRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
@@ -1274,19 +1409,15 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
1274
1409
  },
1275
1410
  /**
1276
1411
  *
1277
- * @summary Creates a new completion for the provided prompt and parameters
1278
- * @param {string} engineId The ID of the engine to use for this request
1412
+ * @summary Creates a completion for the provided prompt and parameters
1279
1413
  * @param {CreateCompletionRequest} createCompletionRequest
1280
1414
  * @param {*} [options] Override http request option.
1281
1415
  * @throws {RequiredError}
1282
1416
  */
1283
- createCompletion: async (engineId: string, createCompletionRequest: CreateCompletionRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
1284
- // verify required parameter 'engineId' is not null or undefined
1285
- assertParamExists('createCompletion', 'engineId', engineId)
1417
+ createCompletion: async (createCompletionRequest: CreateCompletionRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
1286
1418
  // verify required parameter 'createCompletionRequest' is not null or undefined
1287
1419
  assertParamExists('createCompletion', 'createCompletionRequest', createCompletionRequest)
1288
- const localVarPath = `/engines/{engine_id}/completions`
1289
- .replace(`{${"engine_id"}}`, encodeURIComponent(String(engineId)));
1420
+ const localVarPath = `/completions`;
1290
1421
  // use dummy base URL string because the URL constructor only accepts absolute URLs.
1291
1422
  const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
1292
1423
  let baseOptions;
@@ -1314,15 +1445,15 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
1314
1445
  },
1315
1446
  /**
1316
1447
  *
1317
- * @summary Creates a completion using a fine-tuned model
1318
- * @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
1448
+ * @summary Creates a new edit for the provided input, instruction, and parameters
1449
+ * @param {CreateEditRequest} createEditRequest
1319
1450
  * @param {*} [options] Override http request option.
1320
1451
  * @throws {RequiredError}
1321
1452
  */
1322
- createCompletionFromModel: async (createCompletionFromModelRequest: CreateCompletionFromModelRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
1323
- // verify required parameter 'createCompletionFromModelRequest' is not null or undefined
1324
- assertParamExists('createCompletionFromModel', 'createCompletionFromModelRequest', createCompletionFromModelRequest)
1325
- const localVarPath = `/completions`;
1453
+ createEdit: async (createEditRequest: CreateEditRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
1454
+ // verify required parameter 'createEditRequest' is not null or undefined
1455
+ assertParamExists('createEdit', 'createEditRequest', createEditRequest)
1456
+ const localVarPath = `/edits`;
1326
1457
  // use dummy base URL string because the URL constructor only accepts absolute URLs.
1327
1458
  const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
1328
1459
  let baseOptions;
@@ -1341,7 +1472,7 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
1341
1472
  setSearchParams(localVarUrlObj, localVarQueryParameter);
1342
1473
  let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
1343
1474
  localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
1344
- localVarRequestOptions.data = serializeDataIfNeeded(createCompletionFromModelRequest, localVarRequestOptions, configuration)
1475
+ localVarRequestOptions.data = serializeDataIfNeeded(createEditRequest, localVarRequestOptions, configuration)
1345
1476
 
1346
1477
  return {
1347
1478
  url: toPathString(localVarUrlObj),
@@ -1350,19 +1481,15 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
1350
1481
  },
1351
1482
  /**
1352
1483
  *
1353
- * @summary Creates an embedding vector representing the input text.
1354
- * @param {string} engineId The ID of the engine to use for this request
1484
+ * @summary Creates an embedding vector representing the input text.
1355
1485
  * @param {CreateEmbeddingRequest} createEmbeddingRequest
1356
1486
  * @param {*} [options] Override http request option.
1357
1487
  * @throws {RequiredError}
1358
1488
  */
1359
- createEmbedding: async (engineId: string, createEmbeddingRequest: CreateEmbeddingRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
1360
- // verify required parameter 'engineId' is not null or undefined
1361
- assertParamExists('createEmbedding', 'engineId', engineId)
1489
+ createEmbedding: async (createEmbeddingRequest: CreateEmbeddingRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
1362
1490
  // verify required parameter 'createEmbeddingRequest' is not null or undefined
1363
1491
  assertParamExists('createEmbedding', 'createEmbeddingRequest', createEmbeddingRequest)
1364
- const localVarPath = `/engines/{engine_id}/embeddings`
1365
- .replace(`{${"engine_id"}}`, encodeURIComponent(String(engineId)));
1492
+ const localVarPath = `/embeddings`;
1366
1493
  // use dummy base URL string because the URL constructor only accepts absolute URLs.
1367
1494
  const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
1368
1495
  let baseOptions;
@@ -1391,8 +1518,8 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
1391
1518
  /**
1392
1519
  *
1393
1520
  * @summary Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit.
1394
- * @param {any} file Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. If the &#x60;purpose&#x60; is set to \\\&quot;search\\\&quot; or \\\&quot;answers\\\&quot;, each line is a JSON record with a \\\&quot;text\\\&quot; field and an optional \\\&quot;metadata\\\&quot; field. Only \\\&quot;text\\\&quot; field will be used for search. Specially, when the &#x60;purpose&#x60; is \\\&quot;answers\\\&quot;, \\\&quot;\\\\n\\\&quot; is used as a delimiter to chunk contents in the \\\&quot;text\\\&quot; field into multiple documents for finer-grained matching. If the &#x60;purpose&#x60; is set to \\\&quot;classifications\\\&quot;, each line is a JSON record representing a single training example with \\\&quot;text\\\&quot; and \\\&quot;label\\\&quot; fields along with an optional \\\&quot;metadata\\\&quot; field. If the &#x60;purpose&#x60; is set to \\\&quot;fine-tune\\\&quot;, each line is a JSON record with \\\&quot;prompt\\\&quot; and \\\&quot;completion\\\&quot; fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data).
1395
- * @param {string} purpose The intended purpose of the uploaded documents. Use \\\&quot;search\\\&quot; for [Search](/docs/api-reference/searches), \\\&quot;answers\\\&quot; for [Answers](/docs/api-reference/answers), \\\&quot;classifications\\\&quot; for [Classifications](/docs/api-reference/classifications) and \\\&quot;fine-tune\\\&quot; for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file.
1521
+ * @param {any} file Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. If the &#x60;purpose&#x60; is set to \\\&quot;fine-tune\\\&quot;, each line is a JSON record with \\\&quot;prompt\\\&quot; and \\\&quot;completion\\\&quot; fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data).
1522
+ * @param {string} purpose The intended purpose of the uploaded documents. Use \\\&quot;fine-tune\\\&quot; for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file.
1396
1523
  * @param {*} [options] Override http request option.
1397
1524
  * @throws {RequiredError}
1398
1525
  */
@@ -1478,6 +1605,7 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
1478
1605
  * @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
1479
1606
  * @param {CreateSearchRequest} createSearchRequest
1480
1607
  * @param {*} [options] Override http request option.
1608
+ * @deprecated
1481
1609
  * @throws {RequiredError}
1482
1610
  */
1483
1611
  createSearch: async (engineId: string, createSearchRequest: CreateSearchRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
@@ -1537,6 +1665,40 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
1537
1665
 
1538
1666
 
1539
1667
 
1668
+ setSearchParams(localVarUrlObj, localVarQueryParameter);
1669
+ let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
1670
+ localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
1671
+
1672
+ return {
1673
+ url: toPathString(localVarUrlObj),
1674
+ options: localVarRequestOptions,
1675
+ };
1676
+ },
1677
+ /**
1678
+ *
1679
+ * @summary Delete a fine-tuned model. You must have the Owner role in your organization.
1680
+ * @param {string} model The model to delete
1681
+ * @param {*} [options] Override http request option.
1682
+ * @throws {RequiredError}
1683
+ */
1684
+ deleteModel: async (model: string, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
1685
+ // verify required parameter 'model' is not null or undefined
1686
+ assertParamExists('deleteModel', 'model', model)
1687
+ const localVarPath = `/models/{model}`
1688
+ .replace(`{${"model"}}`, encodeURIComponent(String(model)));
1689
+ // use dummy base URL string because the URL constructor only accepts absolute URLs.
1690
+ const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
1691
+ let baseOptions;
1692
+ if (configuration) {
1693
+ baseOptions = configuration.baseOptions;
1694
+ }
1695
+
1696
+ const localVarRequestOptions = { method: 'DELETE', ...baseOptions, ...options};
1697
+ const localVarHeaderParameter = {} as any;
1698
+ const localVarQueryParameter = {} as any;
1699
+
1700
+
1701
+
1540
1702
  setSearchParams(localVarUrlObj, localVarQueryParameter);
1541
1703
  let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
1542
1704
  localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
@@ -1582,8 +1744,9 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
1582
1744
  },
1583
1745
  /**
1584
1746
  *
1585
- * @summary Lists the currently available engines, and provides basic information about each one such as the owner and availability.
1747
+ * @summary Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability.
1586
1748
  * @param {*} [options] Override http request option.
1749
+ * @deprecated
1587
1750
  * @throws {RequiredError}
1588
1751
  */
1589
1752
  listEngines: async (options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
@@ -1711,9 +1874,40 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
1711
1874
  },
1712
1875
  /**
1713
1876
  *
1714
- * @summary Retrieves an engine instance, providing basic information about the engine such as the owner and availability.
1877
+ * @summary Lists the currently available models, and provides basic information about each one such as the owner and availability.
1878
+ * @param {*} [options] Override http request option.
1879
+ * @throws {RequiredError}
1880
+ */
1881
+ listModels: async (options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
1882
+ const localVarPath = `/models`;
1883
+ // use dummy base URL string because the URL constructor only accepts absolute URLs.
1884
+ const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
1885
+ let baseOptions;
1886
+ if (configuration) {
1887
+ baseOptions = configuration.baseOptions;
1888
+ }
1889
+
1890
+ const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
1891
+ const localVarHeaderParameter = {} as any;
1892
+ const localVarQueryParameter = {} as any;
1893
+
1894
+
1895
+
1896
+ setSearchParams(localVarUrlObj, localVarQueryParameter);
1897
+ let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
1898
+ localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
1899
+
1900
+ return {
1901
+ url: toPathString(localVarUrlObj),
1902
+ options: localVarRequestOptions,
1903
+ };
1904
+ },
1905
+ /**
1906
+ *
1907
+ * @summary Retrieves a model instance, providing basic information about it such as the owner and availability.
1715
1908
  * @param {string} engineId The ID of the engine to use for this request
1716
1909
  * @param {*} [options] Override http request option.
1910
+ * @deprecated
1717
1911
  * @throws {RequiredError}
1718
1912
  */
1719
1913
  retrieveEngine: async (engineId: string, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
@@ -1802,6 +1996,40 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
1802
1996
 
1803
1997
 
1804
1998
 
1999
+ setSearchParams(localVarUrlObj, localVarQueryParameter);
2000
+ let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2001
+ localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
2002
+
2003
+ return {
2004
+ url: toPathString(localVarUrlObj),
2005
+ options: localVarRequestOptions,
2006
+ };
2007
+ },
2008
+ /**
2009
+ *
2010
+ * @summary Retrieves a model instance, providing basic information about the model such as the owner and permissioning.
2011
+ * @param {string} model The ID of the model to use for this request
2012
+ * @param {*} [options] Override http request option.
2013
+ * @throws {RequiredError}
2014
+ */
2015
+ retrieveModel: async (model: string, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2016
+ // verify required parameter 'model' is not null or undefined
2017
+ assertParamExists('retrieveModel', 'model', model)
2018
+ const localVarPath = `/models/{model}`
2019
+ .replace(`{${"model"}}`, encodeURIComponent(String(model)));
2020
+ // use dummy base URL string because the URL constructor only accepts absolute URLs.
2021
+ const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2022
+ let baseOptions;
2023
+ if (configuration) {
2024
+ baseOptions = configuration.baseOptions;
2025
+ }
2026
+
2027
+ const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
2028
+ const localVarHeaderParameter = {} as any;
2029
+ const localVarQueryParameter = {} as any;
2030
+
2031
+
2032
+
1805
2033
  setSearchParams(localVarUrlObj, localVarQueryParameter);
1806
2034
  let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
1807
2035
  localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
@@ -1837,6 +2065,7 @@ export const OpenAIApiFp = function(configuration?: Configuration) {
1837
2065
  * @summary Answers the specified question using the provided documents and examples. The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions).
1838
2066
  * @param {CreateAnswerRequest} createAnswerRequest
1839
2067
  * @param {*} [options] Override http request option.
2068
+ * @deprecated
1840
2069
  * @throws {RequiredError}
1841
2070
  */
1842
2071
  async createAnswer(createAnswerRequest: CreateAnswerRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateAnswerResponse>> {
@@ -1848,6 +2077,7 @@ export const OpenAIApiFp = function(configuration?: Configuration) {
1848
2077
  * @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
1849
2078
  * @param {CreateClassificationRequest} createClassificationRequest
1850
2079
  * @param {*} [options] Override http request option.
2080
+ * @deprecated
1851
2081
  * @throws {RequiredError}
1852
2082
  */
1853
2083
  async createClassification(createClassificationRequest: CreateClassificationRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateClassificationResponse>> {
@@ -1856,44 +2086,42 @@ export const OpenAIApiFp = function(configuration?: Configuration) {
1856
2086
  },
1857
2087
  /**
1858
2088
  *
1859
- * @summary Creates a new completion for the provided prompt and parameters
1860
- * @param {string} engineId The ID of the engine to use for this request
2089
+ * @summary Creates a completion for the provided prompt and parameters
1861
2090
  * @param {CreateCompletionRequest} createCompletionRequest
1862
2091
  * @param {*} [options] Override http request option.
1863
2092
  * @throws {RequiredError}
1864
2093
  */
1865
- async createCompletion(engineId: string, createCompletionRequest: CreateCompletionRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateCompletionResponse>> {
1866
- const localVarAxiosArgs = await localVarAxiosParamCreator.createCompletion(engineId, createCompletionRequest, options);
2094
+ async createCompletion(createCompletionRequest: CreateCompletionRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateCompletionResponse>> {
2095
+ const localVarAxiosArgs = await localVarAxiosParamCreator.createCompletion(createCompletionRequest, options);
1867
2096
  return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
1868
2097
  },
1869
2098
  /**
1870
2099
  *
1871
- * @summary Creates a completion using a fine-tuned model
1872
- * @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
2100
+ * @summary Creates a new edit for the provided input, instruction, and parameters
2101
+ * @param {CreateEditRequest} createEditRequest
1873
2102
  * @param {*} [options] Override http request option.
1874
2103
  * @throws {RequiredError}
1875
2104
  */
1876
- async createCompletionFromModel(createCompletionFromModelRequest: CreateCompletionFromModelRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateCompletionResponse>> {
1877
- const localVarAxiosArgs = await localVarAxiosParamCreator.createCompletionFromModel(createCompletionFromModelRequest, options);
2105
+ async createEdit(createEditRequest: CreateEditRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateEditResponse>> {
2106
+ const localVarAxiosArgs = await localVarAxiosParamCreator.createEdit(createEditRequest, options);
1878
2107
  return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
1879
2108
  },
1880
2109
  /**
1881
2110
  *
1882
- * @summary Creates an embedding vector representing the input text.
1883
- * @param {string} engineId The ID of the engine to use for this request
2111
+ * @summary Creates an embedding vector representing the input text.
1884
2112
  * @param {CreateEmbeddingRequest} createEmbeddingRequest
1885
2113
  * @param {*} [options] Override http request option.
1886
2114
  * @throws {RequiredError}
1887
2115
  */
1888
- async createEmbedding(engineId: string, createEmbeddingRequest: CreateEmbeddingRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateEmbeddingResponse>> {
1889
- const localVarAxiosArgs = await localVarAxiosParamCreator.createEmbedding(engineId, createEmbeddingRequest, options);
2116
+ async createEmbedding(createEmbeddingRequest: CreateEmbeddingRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateEmbeddingResponse>> {
2117
+ const localVarAxiosArgs = await localVarAxiosParamCreator.createEmbedding(createEmbeddingRequest, options);
1890
2118
  return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
1891
2119
  },
1892
2120
  /**
1893
2121
  *
1894
2122
  * @summary Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit.
1895
- * @param {any} file Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. If the &#x60;purpose&#x60; is set to \\\&quot;search\\\&quot; or \\\&quot;answers\\\&quot;, each line is a JSON record with a \\\&quot;text\\\&quot; field and an optional \\\&quot;metadata\\\&quot; field. Only \\\&quot;text\\\&quot; field will be used for search. Specially, when the &#x60;purpose&#x60; is \\\&quot;answers\\\&quot;, \\\&quot;\\\\n\\\&quot; is used as a delimiter to chunk contents in the \\\&quot;text\\\&quot; field into multiple documents for finer-grained matching. If the &#x60;purpose&#x60; is set to \\\&quot;classifications\\\&quot;, each line is a JSON record representing a single training example with \\\&quot;text\\\&quot; and \\\&quot;label\\\&quot; fields along with an optional \\\&quot;metadata\\\&quot; field. If the &#x60;purpose&#x60; is set to \\\&quot;fine-tune\\\&quot;, each line is a JSON record with \\\&quot;prompt\\\&quot; and \\\&quot;completion\\\&quot; fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data).
1896
- * @param {string} purpose The intended purpose of the uploaded documents. Use \\\&quot;search\\\&quot; for [Search](/docs/api-reference/searches), \\\&quot;answers\\\&quot; for [Answers](/docs/api-reference/answers), \\\&quot;classifications\\\&quot; for [Classifications](/docs/api-reference/classifications) and \\\&quot;fine-tune\\\&quot; for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file.
2123
+ * @param {any} file Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. If the &#x60;purpose&#x60; is set to \\\&quot;fine-tune\\\&quot;, each line is a JSON record with \\\&quot;prompt\\\&quot; and \\\&quot;completion\\\&quot; fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data).
2124
+ * @param {string} purpose The intended purpose of the uploaded documents. Use \\\&quot;fine-tune\\\&quot; for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file.
1897
2125
  * @param {*} [options] Override http request option.
1898
2126
  * @throws {RequiredError}
1899
2127
  */
@@ -1918,6 +2146,7 @@ export const OpenAIApiFp = function(configuration?: Configuration) {
1918
2146
  * @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
1919
2147
  * @param {CreateSearchRequest} createSearchRequest
1920
2148
  * @param {*} [options] Override http request option.
2149
+ * @deprecated
1921
2150
  * @throws {RequiredError}
1922
2151
  */
1923
2152
  async createSearch(engineId: string, createSearchRequest: CreateSearchRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateSearchResponse>> {
@@ -1935,6 +2164,17 @@ export const OpenAIApiFp = function(configuration?: Configuration) {
1935
2164
  const localVarAxiosArgs = await localVarAxiosParamCreator.deleteFile(fileId, options);
1936
2165
  return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
1937
2166
  },
2167
+ /**
2168
+ *
2169
+ * @summary Delete a fine-tuned model. You must have the Owner role in your organization.
2170
+ * @param {string} model The model to delete
2171
+ * @param {*} [options] Override http request option.
2172
+ * @throws {RequiredError}
2173
+ */
2174
+ async deleteModel(model: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<DeleteModelResponse>> {
2175
+ const localVarAxiosArgs = await localVarAxiosParamCreator.deleteModel(model, options);
2176
+ return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
2177
+ },
1938
2178
  /**
1939
2179
  *
1940
2180
  * @summary Returns the contents of the specified file
@@ -1948,8 +2188,9 @@ export const OpenAIApiFp = function(configuration?: Configuration) {
1948
2188
  },
1949
2189
  /**
1950
2190
  *
1951
- * @summary Lists the currently available engines, and provides basic information about each one such as the owner and availability.
2191
+ * @summary Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability.
1952
2192
  * @param {*} [options] Override http request option.
2193
+ * @deprecated
1953
2194
  * @throws {RequiredError}
1954
2195
  */
1955
2196
  async listEngines(options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<ListEnginesResponse>> {
@@ -1990,9 +2231,20 @@ export const OpenAIApiFp = function(configuration?: Configuration) {
1990
2231
  },
1991
2232
  /**
1992
2233
  *
1993
- * @summary Retrieves an engine instance, providing basic information about the engine such as the owner and availability.
2234
+ * @summary Lists the currently available models, and provides basic information about each one such as the owner and availability.
2235
+ * @param {*} [options] Override http request option.
2236
+ * @throws {RequiredError}
2237
+ */
2238
+ async listModels(options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<ListModelsResponse>> {
2239
+ const localVarAxiosArgs = await localVarAxiosParamCreator.listModels(options);
2240
+ return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
2241
+ },
2242
+ /**
2243
+ *
2244
+ * @summary Retrieves a model instance, providing basic information about it such as the owner and availability.
1994
2245
  * @param {string} engineId The ID of the engine to use for this request
1995
2246
  * @param {*} [options] Override http request option.
2247
+ * @deprecated
1996
2248
  * @throws {RequiredError}
1997
2249
  */
1998
2250
  async retrieveEngine(engineId: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<Engine>> {
@@ -2021,6 +2273,17 @@ export const OpenAIApiFp = function(configuration?: Configuration) {
2021
2273
  const localVarAxiosArgs = await localVarAxiosParamCreator.retrieveFineTune(fineTuneId, options);
2022
2274
  return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
2023
2275
  },
2276
+ /**
2277
+ *
2278
+ * @summary Retrieves a model instance, providing basic information about the model such as the owner and permissioning.
2279
+ * @param {string} model The ID of the model to use for this request
2280
+ * @param {*} [options] Override http request option.
2281
+ * @throws {RequiredError}
2282
+ */
2283
+ async retrieveModel(model: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<Model>> {
2284
+ const localVarAxiosArgs = await localVarAxiosParamCreator.retrieveModel(model, options);
2285
+ return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
2286
+ },
2024
2287
  }
2025
2288
  };
2026
2289
 
@@ -2046,6 +2309,7 @@ export const OpenAIApiFactory = function (configuration?: Configuration, basePat
2046
2309
  * @summary Answers the specified question using the provided documents and examples. The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions).
2047
2310
  * @param {CreateAnswerRequest} createAnswerRequest
2048
2311
  * @param {*} [options] Override http request option.
2312
+ * @deprecated
2049
2313
  * @throws {RequiredError}
2050
2314
  */
2051
2315
  createAnswer(createAnswerRequest: CreateAnswerRequest, options?: any): AxiosPromise<CreateAnswerResponse> {
@@ -2056,6 +2320,7 @@ export const OpenAIApiFactory = function (configuration?: Configuration, basePat
2056
2320
  * @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
2057
2321
  * @param {CreateClassificationRequest} createClassificationRequest
2058
2322
  * @param {*} [options] Override http request option.
2323
+ * @deprecated
2059
2324
  * @throws {RequiredError}
2060
2325
  */
2061
2326
  createClassification(createClassificationRequest: CreateClassificationRequest, options?: any): AxiosPromise<CreateClassificationResponse> {
@@ -2063,41 +2328,39 @@ export const OpenAIApiFactory = function (configuration?: Configuration, basePat
2063
2328
  },
2064
2329
  /**
2065
2330
  *
2066
- * @summary Creates a new completion for the provided prompt and parameters
2067
- * @param {string} engineId The ID of the engine to use for this request
2331
+ * @summary Creates a completion for the provided prompt and parameters
2068
2332
  * @param {CreateCompletionRequest} createCompletionRequest
2069
2333
  * @param {*} [options] Override http request option.
2070
2334
  * @throws {RequiredError}
2071
2335
  */
2072
- createCompletion(engineId: string, createCompletionRequest: CreateCompletionRequest, options?: any): AxiosPromise<CreateCompletionResponse> {
2073
- return localVarFp.createCompletion(engineId, createCompletionRequest, options).then((request) => request(axios, basePath));
2336
+ createCompletion(createCompletionRequest: CreateCompletionRequest, options?: any): AxiosPromise<CreateCompletionResponse> {
2337
+ return localVarFp.createCompletion(createCompletionRequest, options).then((request) => request(axios, basePath));
2074
2338
  },
2075
2339
  /**
2076
2340
  *
2077
- * @summary Creates a completion using a fine-tuned model
2078
- * @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
2341
+ * @summary Creates a new edit for the provided input, instruction, and parameters
2342
+ * @param {CreateEditRequest} createEditRequest
2079
2343
  * @param {*} [options] Override http request option.
2080
2344
  * @throws {RequiredError}
2081
2345
  */
2082
- createCompletionFromModel(createCompletionFromModelRequest: CreateCompletionFromModelRequest, options?: any): AxiosPromise<CreateCompletionResponse> {
2083
- return localVarFp.createCompletionFromModel(createCompletionFromModelRequest, options).then((request) => request(axios, basePath));
2346
+ createEdit(createEditRequest: CreateEditRequest, options?: any): AxiosPromise<CreateEditResponse> {
2347
+ return localVarFp.createEdit(createEditRequest, options).then((request) => request(axios, basePath));
2084
2348
  },
2085
2349
  /**
2086
2350
  *
2087
- * @summary Creates an embedding vector representing the input text.
2088
- * @param {string} engineId The ID of the engine to use for this request
2351
+ * @summary Creates an embedding vector representing the input text.
2089
2352
  * @param {CreateEmbeddingRequest} createEmbeddingRequest
2090
2353
  * @param {*} [options] Override http request option.
2091
2354
  * @throws {RequiredError}
2092
2355
  */
2093
- createEmbedding(engineId: string, createEmbeddingRequest: CreateEmbeddingRequest, options?: any): AxiosPromise<CreateEmbeddingResponse> {
2094
- return localVarFp.createEmbedding(engineId, createEmbeddingRequest, options).then((request) => request(axios, basePath));
2356
+ createEmbedding(createEmbeddingRequest: CreateEmbeddingRequest, options?: any): AxiosPromise<CreateEmbeddingResponse> {
2357
+ return localVarFp.createEmbedding(createEmbeddingRequest, options).then((request) => request(axios, basePath));
2095
2358
  },
2096
2359
  /**
2097
2360
  *
2098
2361
  * @summary Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit.
2099
- * @param {any} file Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. If the &#x60;purpose&#x60; is set to \\\&quot;search\\\&quot; or \\\&quot;answers\\\&quot;, each line is a JSON record with a \\\&quot;text\\\&quot; field and an optional \\\&quot;metadata\\\&quot; field. Only \\\&quot;text\\\&quot; field will be used for search. Specially, when the &#x60;purpose&#x60; is \\\&quot;answers\\\&quot;, \\\&quot;\\\\n\\\&quot; is used as a delimiter to chunk contents in the \\\&quot;text\\\&quot; field into multiple documents for finer-grained matching. If the &#x60;purpose&#x60; is set to \\\&quot;classifications\\\&quot;, each line is a JSON record representing a single training example with \\\&quot;text\\\&quot; and \\\&quot;label\\\&quot; fields along with an optional \\\&quot;metadata\\\&quot; field. If the &#x60;purpose&#x60; is set to \\\&quot;fine-tune\\\&quot;, each line is a JSON record with \\\&quot;prompt\\\&quot; and \\\&quot;completion\\\&quot; fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data).
2100
- * @param {string} purpose The intended purpose of the uploaded documents. Use \\\&quot;search\\\&quot; for [Search](/docs/api-reference/searches), \\\&quot;answers\\\&quot; for [Answers](/docs/api-reference/answers), \\\&quot;classifications\\\&quot; for [Classifications](/docs/api-reference/classifications) and \\\&quot;fine-tune\\\&quot; for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file.
2362
+ * @param {any} file Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. If the &#x60;purpose&#x60; is set to \\\&quot;fine-tune\\\&quot;, each line is a JSON record with \\\&quot;prompt\\\&quot; and \\\&quot;completion\\\&quot; fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data).
2363
+ * @param {string} purpose The intended purpose of the uploaded documents. Use \\\&quot;fine-tune\\\&quot; for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file.
2101
2364
  * @param {*} [options] Override http request option.
2102
2365
  * @throws {RequiredError}
2103
2366
  */
@@ -2120,6 +2383,7 @@ export const OpenAIApiFactory = function (configuration?: Configuration, basePat
2120
2383
  * @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
2121
2384
  * @param {CreateSearchRequest} createSearchRequest
2122
2385
  * @param {*} [options] Override http request option.
2386
+ * @deprecated
2123
2387
  * @throws {RequiredError}
2124
2388
  */
2125
2389
  createSearch(engineId: string, createSearchRequest: CreateSearchRequest, options?: any): AxiosPromise<CreateSearchResponse> {
@@ -2135,6 +2399,16 @@ export const OpenAIApiFactory = function (configuration?: Configuration, basePat
2135
2399
  deleteFile(fileId: string, options?: any): AxiosPromise<DeleteFileResponse> {
2136
2400
  return localVarFp.deleteFile(fileId, options).then((request) => request(axios, basePath));
2137
2401
  },
2402
+ /**
2403
+ *
2404
+ * @summary Delete a fine-tuned model. You must have the Owner role in your organization.
2405
+ * @param {string} model The model to delete
2406
+ * @param {*} [options] Override http request option.
2407
+ * @throws {RequiredError}
2408
+ */
2409
+ deleteModel(model: string, options?: any): AxiosPromise<DeleteModelResponse> {
2410
+ return localVarFp.deleteModel(model, options).then((request) => request(axios, basePath));
2411
+ },
2138
2412
  /**
2139
2413
  *
2140
2414
  * @summary Returns the contents of the specified file
@@ -2147,8 +2421,9 @@ export const OpenAIApiFactory = function (configuration?: Configuration, basePat
2147
2421
  },
2148
2422
  /**
2149
2423
  *
2150
- * @summary Lists the currently available engines, and provides basic information about each one such as the owner and availability.
2424
+ * @summary Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability.
2151
2425
  * @param {*} [options] Override http request option.
2426
+ * @deprecated
2152
2427
  * @throws {RequiredError}
2153
2428
  */
2154
2429
  listEngines(options?: any): AxiosPromise<ListEnginesResponse> {
@@ -2185,9 +2460,19 @@ export const OpenAIApiFactory = function (configuration?: Configuration, basePat
2185
2460
  },
2186
2461
  /**
2187
2462
  *
2188
- * @summary Retrieves an engine instance, providing basic information about the engine such as the owner and availability.
2463
+ * @summary Lists the currently available models, and provides basic information about each one such as the owner and availability.
2464
+ * @param {*} [options] Override http request option.
2465
+ * @throws {RequiredError}
2466
+ */
2467
+ listModels(options?: any): AxiosPromise<ListModelsResponse> {
2468
+ return localVarFp.listModels(options).then((request) => request(axios, basePath));
2469
+ },
2470
+ /**
2471
+ *
2472
+ * @summary Retrieves a model instance, providing basic information about it such as the owner and availability.
2189
2473
  * @param {string} engineId The ID of the engine to use for this request
2190
2474
  * @param {*} [options] Override http request option.
2475
+ * @deprecated
2191
2476
  * @throws {RequiredError}
2192
2477
  */
2193
2478
  retrieveEngine(engineId: string, options?: any): AxiosPromise<Engine> {
@@ -2213,6 +2498,16 @@ export const OpenAIApiFactory = function (configuration?: Configuration, basePat
2213
2498
  retrieveFineTune(fineTuneId: string, options?: any): AxiosPromise<FineTune> {
2214
2499
  return localVarFp.retrieveFineTune(fineTuneId, options).then((request) => request(axios, basePath));
2215
2500
  },
2501
+ /**
2502
+ *
2503
+ * @summary Retrieves a model instance, providing basic information about the model such as the owner and permissioning.
2504
+ * @param {string} model The ID of the model to use for this request
2505
+ * @param {*} [options] Override http request option.
2506
+ * @throws {RequiredError}
2507
+ */
2508
+ retrieveModel(model: string, options?: any): AxiosPromise<Model> {
2509
+ return localVarFp.retrieveModel(model, options).then((request) => request(axios, basePath));
2510
+ },
2216
2511
  };
2217
2512
  };
2218
2513
 
@@ -2240,6 +2535,7 @@ export class OpenAIApi extends BaseAPI {
2240
2535
  * @summary Answers the specified question using the provided documents and examples. The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions).
2241
2536
  * @param {CreateAnswerRequest} createAnswerRequest
2242
2537
  * @param {*} [options] Override http request option.
2538
+ * @deprecated
2243
2539
  * @throws {RequiredError}
2244
2540
  * @memberof OpenAIApi
2245
2541
  */
@@ -2252,6 +2548,7 @@ export class OpenAIApi extends BaseAPI {
2252
2548
  * @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
2253
2549
  * @param {CreateClassificationRequest} createClassificationRequest
2254
2550
  * @param {*} [options] Override http request option.
2551
+ * @deprecated
2255
2552
  * @throws {RequiredError}
2256
2553
  * @memberof OpenAIApi
2257
2554
  */
@@ -2261,47 +2558,45 @@ export class OpenAIApi extends BaseAPI {
2261
2558
 
2262
2559
  /**
2263
2560
  *
2264
- * @summary Creates a new completion for the provided prompt and parameters
2265
- * @param {string} engineId The ID of the engine to use for this request
2561
+ * @summary Creates a completion for the provided prompt and parameters
2266
2562
  * @param {CreateCompletionRequest} createCompletionRequest
2267
2563
  * @param {*} [options] Override http request option.
2268
2564
  * @throws {RequiredError}
2269
2565
  * @memberof OpenAIApi
2270
2566
  */
2271
- public createCompletion(engineId: string, createCompletionRequest: CreateCompletionRequest, options?: AxiosRequestConfig) {
2272
- return OpenAIApiFp(this.configuration).createCompletion(engineId, createCompletionRequest, options).then((request) => request(this.axios, this.basePath));
2567
+ public createCompletion(createCompletionRequest: CreateCompletionRequest, options?: AxiosRequestConfig) {
2568
+ return OpenAIApiFp(this.configuration).createCompletion(createCompletionRequest, options).then((request) => request(this.axios, this.basePath));
2273
2569
  }
2274
2570
 
2275
2571
  /**
2276
2572
  *
2277
- * @summary Creates a completion using a fine-tuned model
2278
- * @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
2573
+ * @summary Creates a new edit for the provided input, instruction, and parameters
2574
+ * @param {CreateEditRequest} createEditRequest
2279
2575
  * @param {*} [options] Override http request option.
2280
2576
  * @throws {RequiredError}
2281
2577
  * @memberof OpenAIApi
2282
2578
  */
2283
- public createCompletionFromModel(createCompletionFromModelRequest: CreateCompletionFromModelRequest, options?: AxiosRequestConfig) {
2284
- return OpenAIApiFp(this.configuration).createCompletionFromModel(createCompletionFromModelRequest, options).then((request) => request(this.axios, this.basePath));
2579
+ public createEdit(createEditRequest: CreateEditRequest, options?: AxiosRequestConfig) {
2580
+ return OpenAIApiFp(this.configuration).createEdit(createEditRequest, options).then((request) => request(this.axios, this.basePath));
2285
2581
  }
2286
2582
 
2287
2583
  /**
2288
2584
  *
2289
- * @summary Creates an embedding vector representing the input text.
2290
- * @param {string} engineId The ID of the engine to use for this request
2585
+ * @summary Creates an embedding vector representing the input text.
2291
2586
  * @param {CreateEmbeddingRequest} createEmbeddingRequest
2292
2587
  * @param {*} [options] Override http request option.
2293
2588
  * @throws {RequiredError}
2294
2589
  * @memberof OpenAIApi
2295
2590
  */
2296
- public createEmbedding(engineId: string, createEmbeddingRequest: CreateEmbeddingRequest, options?: AxiosRequestConfig) {
2297
- return OpenAIApiFp(this.configuration).createEmbedding(engineId, createEmbeddingRequest, options).then((request) => request(this.axios, this.basePath));
2591
+ public createEmbedding(createEmbeddingRequest: CreateEmbeddingRequest, options?: AxiosRequestConfig) {
2592
+ return OpenAIApiFp(this.configuration).createEmbedding(createEmbeddingRequest, options).then((request) => request(this.axios, this.basePath));
2298
2593
  }
2299
2594
 
2300
2595
  /**
2301
2596
  *
2302
2597
  * @summary Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit.
2303
- * @param {any} file Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. If the &#x60;purpose&#x60; is set to \\\&quot;search\\\&quot; or \\\&quot;answers\\\&quot;, each line is a JSON record with a \\\&quot;text\\\&quot; field and an optional \\\&quot;metadata\\\&quot; field. Only \\\&quot;text\\\&quot; field will be used for search. Specially, when the &#x60;purpose&#x60; is \\\&quot;answers\\\&quot;, \\\&quot;\\\\n\\\&quot; is used as a delimiter to chunk contents in the \\\&quot;text\\\&quot; field into multiple documents for finer-grained matching. If the &#x60;purpose&#x60; is set to \\\&quot;classifications\\\&quot;, each line is a JSON record representing a single training example with \\\&quot;text\\\&quot; and \\\&quot;label\\\&quot; fields along with an optional \\\&quot;metadata\\\&quot; field. If the &#x60;purpose&#x60; is set to \\\&quot;fine-tune\\\&quot;, each line is a JSON record with \\\&quot;prompt\\\&quot; and \\\&quot;completion\\\&quot; fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data).
2304
- * @param {string} purpose The intended purpose of the uploaded documents. Use \\\&quot;search\\\&quot; for [Search](/docs/api-reference/searches), \\\&quot;answers\\\&quot; for [Answers](/docs/api-reference/answers), \\\&quot;classifications\\\&quot; for [Classifications](/docs/api-reference/classifications) and \\\&quot;fine-tune\\\&quot; for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file.
2598
+ * @param {any} file Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. If the &#x60;purpose&#x60; is set to \\\&quot;fine-tune\\\&quot;, each line is a JSON record with \\\&quot;prompt\\\&quot; and \\\&quot;completion\\\&quot; fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data).
2599
+ * @param {string} purpose The intended purpose of the uploaded documents. Use \\\&quot;fine-tune\\\&quot; for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file.
2305
2600
  * @param {*} [options] Override http request option.
2306
2601
  * @throws {RequiredError}
2307
2602
  * @memberof OpenAIApi
@@ -2328,6 +2623,7 @@ export class OpenAIApi extends BaseAPI {
2328
2623
  * @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
2329
2624
  * @param {CreateSearchRequest} createSearchRequest
2330
2625
  * @param {*} [options] Override http request option.
2626
+ * @deprecated
2331
2627
  * @throws {RequiredError}
2332
2628
  * @memberof OpenAIApi
2333
2629
  */
@@ -2347,6 +2643,18 @@ export class OpenAIApi extends BaseAPI {
2347
2643
  return OpenAIApiFp(this.configuration).deleteFile(fileId, options).then((request) => request(this.axios, this.basePath));
2348
2644
  }
2349
2645
 
2646
+ /**
2647
+ *
2648
+ * @summary Delete a fine-tuned model. You must have the Owner role in your organization.
2649
+ * @param {string} model The model to delete
2650
+ * @param {*} [options] Override http request option.
2651
+ * @throws {RequiredError}
2652
+ * @memberof OpenAIApi
2653
+ */
2654
+ public deleteModel(model: string, options?: AxiosRequestConfig) {
2655
+ return OpenAIApiFp(this.configuration).deleteModel(model, options).then((request) => request(this.axios, this.basePath));
2656
+ }
2657
+
2350
2658
  /**
2351
2659
  *
2352
2660
  * @summary Returns the contents of the specified file
@@ -2361,8 +2669,9 @@ export class OpenAIApi extends BaseAPI {
2361
2669
 
2362
2670
  /**
2363
2671
  *
2364
- * @summary Lists the currently available engines, and provides basic information about each one such as the owner and availability.
2672
+ * @summary Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability.
2365
2673
  * @param {*} [options] Override http request option.
2674
+ * @deprecated
2366
2675
  * @throws {RequiredError}
2367
2676
  * @memberof OpenAIApi
2368
2677
  */
@@ -2407,9 +2716,21 @@ export class OpenAIApi extends BaseAPI {
2407
2716
 
2408
2717
  /**
2409
2718
  *
2410
- * @summary Retrieves an engine instance, providing basic information about the engine such as the owner and availability.
2719
+ * @summary Lists the currently available models, and provides basic information about each one such as the owner and availability.
2720
+ * @param {*} [options] Override http request option.
2721
+ * @throws {RequiredError}
2722
+ * @memberof OpenAIApi
2723
+ */
2724
+ public listModels(options?: AxiosRequestConfig) {
2725
+ return OpenAIApiFp(this.configuration).listModels(options).then((request) => request(this.axios, this.basePath));
2726
+ }
2727
+
2728
+ /**
2729
+ *
2730
+ * @summary Retrieves a model instance, providing basic information about it such as the owner and availability.
2411
2731
  * @param {string} engineId The ID of the engine to use for this request
2412
2732
  * @param {*} [options] Override http request option.
2733
+ * @deprecated
2413
2734
  * @throws {RequiredError}
2414
2735
  * @memberof OpenAIApi
2415
2736
  */
@@ -2440,6 +2761,18 @@ export class OpenAIApi extends BaseAPI {
2440
2761
  public retrieveFineTune(fineTuneId: string, options?: AxiosRequestConfig) {
2441
2762
  return OpenAIApiFp(this.configuration).retrieveFineTune(fineTuneId, options).then((request) => request(this.axios, this.basePath));
2442
2763
  }
2764
+
2765
+ /**
2766
+ *
2767
+ * @summary Retrieves a model instance, providing basic information about the model such as the owner and permissioning.
2768
+ * @param {string} model The ID of the model to use for this request
2769
+ * @param {*} [options] Override http request option.
2770
+ * @throws {RequiredError}
2771
+ * @memberof OpenAIApi
2772
+ */
2773
+ public retrieveModel(model: string, options?: AxiosRequestConfig) {
2774
+ return OpenAIApiFp(this.configuration).retrieveModel(model, options).then((request) => request(this.axios, this.basePath));
2775
+ }
2443
2776
  }
2444
2777
 
2445
2778