openai 2.0.1 → 2.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/base.ts CHANGED
@@ -4,7 +4,7 @@
4
4
  * OpenAI API
5
5
  * APIs for sampling from and fine-tuning language models
6
6
  *
7
- * The version of the OpenAPI document: 1.0.0
7
+ * The version of the OpenAPI document: 1.0.4
8
8
  *
9
9
  *
10
10
  * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
package/common.ts CHANGED
@@ -4,7 +4,7 @@
4
4
  * OpenAI API
5
5
  * APIs for sampling from and fine-tuning language models
6
6
  *
7
- * The version of the OpenAPI document: 1.0.0
7
+ * The version of the OpenAPI document: 1.0.4
8
8
  *
9
9
  *
10
10
  * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
package/configuration.ts CHANGED
@@ -4,7 +4,7 @@
4
4
  * OpenAI API
5
5
  * APIs for sampling from and fine-tuning language models
6
6
  *
7
- * The version of the OpenAPI document: 1.0.0
7
+ * The version of the OpenAPI document: 1.0.4
8
8
  *
9
9
  *
10
10
  * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
package/dist/api.d.ts CHANGED
@@ -2,7 +2,7 @@
2
2
  * OpenAI API
3
3
  * APIs for sampling from and fine-tuning language models
4
4
  *
5
- * The version of the OpenAPI document: 1.0.0
5
+ * The version of the OpenAPI document: 1.0.4
6
6
  *
7
7
  *
8
8
  * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
@@ -19,29 +19,29 @@ import { RequestArgs, BaseAPI } from './base';
19
19
  */
20
20
  export interface CreateAnswerRequest {
21
21
  /**
22
- * ID of the engine to use for completion.
22
+ * ID of the engine to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
23
23
  * @type {string}
24
24
  * @memberof CreateAnswerRequest
25
25
  */
26
- 'model'?: string;
26
+ 'model': string;
27
27
  /**
28
28
  * Question to get answered.
29
29
  * @type {string}
30
30
  * @memberof CreateAnswerRequest
31
31
  */
32
- 'question'?: string;
32
+ 'question': string;
33
33
  /**
34
34
  * List of (question, answer) pairs that will help steer the model towards the tone and answer format you\'d like. We recommend adding 2 to 3 examples.
35
35
  * @type {Array<any>}
36
36
  * @memberof CreateAnswerRequest
37
37
  */
38
- 'examples'?: Array<any>;
38
+ 'examples': Array<any>;
39
39
  /**
40
40
  * A text snippet containing the contextual information used to generate the answers for the `examples` you provide.
41
41
  * @type {string}
42
42
  * @memberof CreateAnswerRequest
43
43
  */
44
- 'examples_context'?: string;
44
+ 'examples_context': string;
45
45
  /**
46
46
  * List of documents from which the answer for the input `question` should be derived. If this is an empty list, the question will be answered based on the question-answer examples. You should specify either `documents` or a `file`, but not both.
47
47
  * @type {Array<string>}
@@ -55,7 +55,7 @@ export interface CreateAnswerRequest {
55
55
  */
56
56
  'file'?: string | null;
57
57
  /**
58
- * ID of the engine to use for [Search](/docs/api-reference/searches/create).
58
+ * ID of the engine to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`.
59
59
  * @type {string}
60
60
  * @memberof CreateAnswerRequest
61
61
  */
@@ -120,6 +120,12 @@ export interface CreateAnswerRequest {
120
120
  * @memberof CreateAnswerRequest
121
121
  */
122
122
  'expand'?: Array<any> | null;
123
+ /**
124
+ * A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse.
125
+ * @type {string}
126
+ * @memberof CreateAnswerRequest
127
+ */
128
+ 'user'?: string;
123
129
  }
124
130
  /**
125
131
  *
@@ -190,17 +196,17 @@ export interface CreateAnswerResponseSelectedDocuments {
190
196
  */
191
197
  export interface CreateClassificationRequest {
192
198
  /**
193
- * ID of the engine to use for completion.
199
+ * ID of the engine to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
194
200
  * @type {string}
195
201
  * @memberof CreateClassificationRequest
196
202
  */
197
- 'model'?: string;
203
+ 'model': string;
198
204
  /**
199
205
  * Query to be classified.
200
206
  * @type {string}
201
207
  * @memberof CreateClassificationRequest
202
208
  */
203
- 'query'?: string;
209
+ 'query': string;
204
210
  /**
205
211
  * A list of examples with labels, in the following format: `[[\"The movie is so interesting.\", \"Positive\"], [\"It is quite boring.\", \"Negative\"], ...]` All the label strings will be normalized to be capitalized. You should specify either `examples` or `file`, but not both.
206
212
  * @type {Array<any>}
@@ -220,7 +226,7 @@ export interface CreateClassificationRequest {
220
226
  */
221
227
  'labels'?: Array<string> | null;
222
228
  /**
223
- * ID of the engine to use for [Search](/docs/api-reference/searches/create).
229
+ * ID of the engine to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`.
224
230
  * @type {string}
225
231
  * @memberof CreateClassificationRequest
226
232
  */
@@ -267,6 +273,12 @@ export interface CreateClassificationRequest {
267
273
  * @memberof CreateClassificationRequest
268
274
  */
269
275
  'expand'?: Array<any> | null;
276
+ /**
277
+ * A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse.
278
+ * @type {string}
279
+ * @memberof CreateClassificationRequest
280
+ */
281
+ 'user'?: string;
270
282
  }
271
283
  /**
272
284
  *
@@ -336,6 +348,122 @@ export interface CreateClassificationResponseSelectedExamples {
336
348
  */
337
349
  'label'?: string;
338
350
  }
351
+ /**
352
+ *
353
+ * @export
354
+ * @interface CreateCompletionFromModelRequest
355
+ */
356
+ export interface CreateCompletionFromModelRequest {
357
+ /**
358
+ * The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.
359
+ * @type {string | Array<string> | Array<number> | Array<any>}
360
+ * @memberof CreateCompletionFromModelRequest
361
+ */
362
+ 'prompt'?: string | Array<string> | Array<number> | Array<any> | null;
363
+ /**
364
+ * The suffix that comes after a completion of inserted text, encoded as a string or array of strings.
365
+ * @type {string | Array<string>}
366
+ * @memberof CreateCompletionFromModelRequest
367
+ */
368
+ 'suffix'?: string | Array<string> | null;
369
+ /**
370
+ * The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).
371
+ * @type {number}
372
+ * @memberof CreateCompletionFromModelRequest
373
+ */
374
+ 'max_tokens'?: number | null;
375
+ /**
376
+ * What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. We generally recommend altering this or `top_p` but not both.
377
+ * @type {number}
378
+ * @memberof CreateCompletionFromModelRequest
379
+ */
380
+ 'temperature'?: number | null;
381
+ /**
382
+ * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
383
+ * @type {number}
384
+ * @memberof CreateCompletionFromModelRequest
385
+ */
386
+ 'top_p'?: number | null;
387
+ /**
388
+ * How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
389
+ * @type {number}
390
+ * @memberof CreateCompletionFromModelRequest
391
+ */
392
+ 'n'?: number | null;
393
+ /**
394
+ * Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
395
+ * @type {boolean}
396
+ * @memberof CreateCompletionFromModelRequest
397
+ */
398
+ 'stream'?: boolean | null;
399
+ /**
400
+ * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. If you need more than this, please contact support@openai.com and describe your use case.
401
+ * @type {number}
402
+ * @memberof CreateCompletionFromModelRequest
403
+ */
404
+ 'logprobs'?: number | null;
405
+ /**
406
+ * Echo back the prompt in addition to the completion
407
+ * @type {boolean}
408
+ * @memberof CreateCompletionFromModelRequest
409
+ */
410
+ 'echo'?: boolean | null;
411
+ /**
412
+ * Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
413
+ * @type {string | Array<string>}
414
+ * @memberof CreateCompletionFromModelRequest
415
+ */
416
+ 'stop'?: string | Array<string> | null;
417
+ /**
418
+ * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
419
+ * @type {number}
420
+ * @memberof CreateCompletionFromModelRequest
421
+ */
422
+ 'presence_penalty'?: number | null;
423
+ /**
424
+ * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
425
+ * @type {number}
426
+ * @memberof CreateCompletionFromModelRequest
427
+ */
428
+ 'frequency_penalty'?: number | null;
429
+ /**
430
+ * Generates `best_of` completions server-side and returns the \"best\" (the one with the lowest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
431
+ * @type {number}
432
+ * @memberof CreateCompletionFromModelRequest
433
+ */
434
+ 'best_of'?: number | null;
435
+ /**
436
+ * Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.
437
+ * @type {object}
438
+ * @memberof CreateCompletionFromModelRequest
439
+ */
440
+ 'logit_bias'?: object | null;
441
+ /**
442
+ * A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse.
443
+ * @type {string}
444
+ * @memberof CreateCompletionFromModelRequest
445
+ */
446
+ 'user'?: string;
447
+ /**
448
+ * ID of the model to use for completion.
449
+ * @type {string}
450
+ * @memberof CreateCompletionFromModelRequest
451
+ */
452
+ 'model'?: string;
453
+ }
454
+ /**
455
+ *
456
+ * @export
457
+ * @interface CreateCompletionFromModelRequestAllOf
458
+ */
459
+ export interface CreateCompletionFromModelRequestAllOf {
460
+ /**
461
+ * ID of the model to use for completion.
462
+ * @type {string}
463
+ * @memberof CreateCompletionFromModelRequestAllOf
464
+ */
465
+ 'model'?: string;
466
+ }
339
467
  /**
340
468
  *
341
469
  * @export
@@ -349,7 +477,13 @@ export interface CreateCompletionRequest {
349
477
  */
350
478
  'prompt'?: string | Array<string> | Array<number> | Array<any> | null;
351
479
  /**
352
- * The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except `davinci-codex`, which supports 4096).
480
+ * The suffix that comes after a completion of inserted text, encoded as a string or array of strings.
481
+ * @type {string | Array<string>}
482
+ * @memberof CreateCompletionRequest
483
+ */
484
+ 'suffix'?: string | Array<string> | null;
485
+ /**
486
+ * The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).
353
487
  * @type {number}
354
488
  * @memberof CreateCompletionRequest
355
489
  */
@@ -420,6 +554,12 @@ export interface CreateCompletionRequest {
420
554
  * @memberof CreateCompletionRequest
421
555
  */
422
556
  'logit_bias'?: object | null;
557
+ /**
558
+ * A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse.
559
+ * @type {string}
560
+ * @memberof CreateCompletionRequest
561
+ */
562
+ 'user'?: string;
423
563
  }
424
564
  /**
425
565
  *
@@ -520,6 +660,74 @@ export interface CreateCompletionResponseLogprobs {
520
660
  */
521
661
  'text_offset'?: Array<number>;
522
662
  }
663
+ /**
664
+ *
665
+ * @export
666
+ * @interface CreateEditRequest
667
+ */
668
+ export interface CreateEditRequest {
669
+ /**
670
+ * The input text to use as a starting point for the edit.
671
+ * @type {string}
672
+ * @memberof CreateEditRequest
673
+ */
674
+ 'input'?: string | null;
675
+ /**
676
+ * The instruction that tells the model how to edit the prompt.
677
+ * @type {string}
678
+ * @memberof CreateEditRequest
679
+ */
680
+ 'instruction': string;
681
+ /**
682
+ * What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. We generally recommend altering this or `top_p` but not both.
683
+ * @type {number}
684
+ * @memberof CreateEditRequest
685
+ */
686
+ 'temperature'?: number | null;
687
+ /**
688
+ * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
689
+ * @type {number}
690
+ * @memberof CreateEditRequest
691
+ */
692
+ 'top_p'?: number | null;
693
+ }
694
+ /**
695
+ *
696
+ * @export
697
+ * @interface CreateEditResponse
698
+ */
699
+ export interface CreateEditResponse {
700
+ /**
701
+ *
702
+ * @type {string}
703
+ * @memberof CreateEditResponse
704
+ */
705
+ 'id'?: string;
706
+ /**
707
+ *
708
+ * @type {string}
709
+ * @memberof CreateEditResponse
710
+ */
711
+ 'object'?: string;
712
+ /**
713
+ *
714
+ * @type {number}
715
+ * @memberof CreateEditResponse
716
+ */
717
+ 'created'?: number;
718
+ /**
719
+ *
720
+ * @type {string}
721
+ * @memberof CreateEditResponse
722
+ */
723
+ 'model'?: string;
724
+ /**
725
+ *
726
+ * @type {Array<CreateCompletionResponseChoices>}
727
+ * @memberof CreateEditResponse
728
+ */
729
+ 'choices'?: Array<CreateCompletionResponseChoices>;
730
+ }
523
731
  /**
524
732
  *
525
733
  * @export
@@ -527,11 +735,17 @@ export interface CreateCompletionResponseLogprobs {
527
735
  */
528
736
  export interface CreateEmbeddingRequest {
529
737
  /**
530
- * Input text to get embeddings for, encoded as a string or array of tokens. To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed 2048 tokens in length. We suggest replacing newlines (`\\n`) in your input with a single space, as we have observed inferior results when newlines are present.
738
+ * Input text to get embeddings for, encoded as a string or array of tokens. To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed 2048 tokens in length. Unless your are embedding code, we suggest replacing newlines (`\\n`) in your input with a single space, as we have observed inferior results when newlines are present.
531
739
  * @type {string | Array<string> | Array<number> | Array<any>}
532
740
  * @memberof CreateEmbeddingRequest
533
741
  */
534
- 'input'?: string | Array<string> | Array<number> | Array<any>;
742
+ 'input': string | Array<string> | Array<number> | Array<any>;
743
+ /**
744
+ * A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse.
745
+ * @type {string}
746
+ * @memberof CreateEmbeddingRequest
747
+ */
748
+ 'user'?: string;
535
749
  }
536
750
  /**
537
751
  *
@@ -594,7 +808,7 @@ export interface CreateFineTuneRequest {
594
808
  * @type {string}
595
809
  * @memberof CreateFineTuneRequest
596
810
  */
597
- 'training_file'?: string;
811
+ 'training_file': string;
598
812
  /**
599
813
  * The ID of an uploaded file that contains validation data. If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in the [fine-tuning results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). Your train and validation data should be mutually exclusive. Your dataset must be formatted as a JSONL file, where each validation example is a JSON object with the keys \"prompt\" and \"completion\". Additionally, you must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details.
600
814
  * @type {string}
@@ -655,6 +869,12 @@ export interface CreateFineTuneRequest {
655
869
  * @memberof CreateFineTuneRequest
656
870
  */
657
871
  'classification_betas'?: Array<number> | null;
872
+ /**
873
+ * A string of up to 40 characters that will be added to your fine-tuned model name. For example, a `suffix` of \"custom-model-name\" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`.
874
+ * @type {string}
875
+ * @memberof CreateFineTuneRequest
876
+ */
877
+ 'suffix'?: string | null;
658
878
  }
659
879
  /**
660
880
  *
@@ -662,6 +882,12 @@ export interface CreateFineTuneRequest {
662
882
  * @interface CreateSearchRequest
663
883
  */
664
884
  export interface CreateSearchRequest {
885
+ /**
886
+ * Query to search against the documents.
887
+ * @type {string}
888
+ * @memberof CreateSearchRequest
889
+ */
890
+ 'query': string;
665
891
  /**
666
892
  * Up to 200 documents to search over, provided as a list of strings. The maximum document length (in tokens) is 2034 minus the number of tokens in the query. You should specify either `documents` or a `file`, but not both.
667
893
  * @type {Array<string>}
@@ -674,12 +900,6 @@ export interface CreateSearchRequest {
674
900
  * @memberof CreateSearchRequest
675
901
  */
676
902
  'file'?: string | null;
677
- /**
678
- * Query to search against the documents.
679
- * @type {string}
680
- * @memberof CreateSearchRequest
681
- */
682
- 'query'?: string;
683
903
  /**
684
904
  * The maximum number of documents to be re-ranked and returned by search. This flag only takes effect when `file` is set.
685
905
  * @type {number}
@@ -692,6 +912,12 @@ export interface CreateSearchRequest {
692
912
  * @memberof CreateSearchRequest
693
913
  */
694
914
  'return_metadata'?: boolean | null;
915
+ /**
916
+ * A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse.
917
+ * @type {string}
918
+ * @memberof CreateSearchRequest
919
+ */
920
+ 'user'?: string;
695
921
  }
696
922
  /**
697
923
  *
@@ -768,6 +994,31 @@ export interface DeleteFileResponse {
768
994
  */
769
995
  'deleted'?: boolean;
770
996
  }
997
+ /**
998
+ *
999
+ * @export
1000
+ * @interface DeleteModelResponse
1001
+ */
1002
+ export interface DeleteModelResponse {
1003
+ /**
1004
+ *
1005
+ * @type {string}
1006
+ * @memberof DeleteModelResponse
1007
+ */
1008
+ 'id'?: string;
1009
+ /**
1010
+ *
1011
+ * @type {string}
1012
+ * @memberof DeleteModelResponse
1013
+ */
1014
+ 'object'?: string;
1015
+ /**
1016
+ *
1017
+ * @type {boolean}
1018
+ * @memberof DeleteModelResponse
1019
+ */
1020
+ 'deleted'?: boolean;
1021
+ }
771
1022
  /**
772
1023
  *
773
1024
  * @export
@@ -1084,6 +1335,23 @@ export declare const OpenAIApiAxiosParamCreator: (configuration?: Configuration)
1084
1335
  * @throws {RequiredError}
1085
1336
  */
1086
1337
  createCompletion: (engineId: string, createCompletionRequest: CreateCompletionRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>;
1338
+ /**
1339
+ *
1340
+ * @summary Creates a completion using a fine-tuned model
1341
+ * @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
1342
+ * @param {*} [options] Override http request option.
1343
+ * @throws {RequiredError}
1344
+ */
1345
+ createCompletionFromModel: (createCompletionFromModelRequest: CreateCompletionFromModelRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>;
1346
+ /**
1347
+ *
1348
+ * @summary Creates a new edit for the provided input, instruction, and parameters
1349
+ * @param {string} engineId The ID of the engine to use for this request
1350
+ * @param {CreateEditRequest} createEditRequest
1351
+ * @param {*} [options] Override http request option.
1352
+ * @throws {RequiredError}
1353
+ */
1354
+ createEdit: (engineId: string, createEditRequest: CreateEditRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>;
1087
1355
  /**
1088
1356
  *
1089
1357
  * @summary Creates an embedding vector representing the input text.
@@ -1113,7 +1381,7 @@ export declare const OpenAIApiAxiosParamCreator: (configuration?: Configuration)
1113
1381
  /**
1114
1382
  *
1115
1383
  * @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
1116
- * @param {string} engineId The ID of the engine to use for this request
1384
+ * @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
1117
1385
  * @param {CreateSearchRequest} createSearchRequest
1118
1386
  * @param {*} [options] Override http request option.
1119
1387
  * @throws {RequiredError}
@@ -1127,6 +1395,14 @@ export declare const OpenAIApiAxiosParamCreator: (configuration?: Configuration)
1127
1395
  * @throws {RequiredError}
1128
1396
  */
1129
1397
  deleteFile: (fileId: string, options?: AxiosRequestConfig) => Promise<RequestArgs>;
1398
+ /**
1399
+ *
1400
+ * @summary Delete a fine-tuned model. You must have the Owner role in your organization.
1401
+ * @param {string} model The model to delete
1402
+ * @param {*} [options] Override http request option.
1403
+ * @throws {RequiredError}
1404
+ */
1405
+ deleteModel: (model: string, options?: AxiosRequestConfig) => Promise<RequestArgs>;
1130
1406
  /**
1131
1407
  *
1132
1408
  * @summary Returns the contents of the specified file
@@ -1228,6 +1504,23 @@ export declare const OpenAIApiFp: (configuration?: Configuration) => {
1228
1504
  * @throws {RequiredError}
1229
1505
  */
1230
1506
  createCompletion(engineId: string, createCompletionRequest: CreateCompletionRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateCompletionResponse>>;
1507
+ /**
1508
+ *
1509
+ * @summary Creates a completion using a fine-tuned model
1510
+ * @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
1511
+ * @param {*} [options] Override http request option.
1512
+ * @throws {RequiredError}
1513
+ */
1514
+ createCompletionFromModel(createCompletionFromModelRequest: CreateCompletionFromModelRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateCompletionResponse>>;
1515
+ /**
1516
+ *
1517
+ * @summary Creates a new edit for the provided input, instruction, and parameters
1518
+ * @param {string} engineId The ID of the engine to use for this request
1519
+ * @param {CreateEditRequest} createEditRequest
1520
+ * @param {*} [options] Override http request option.
1521
+ * @throws {RequiredError}
1522
+ */
1523
+ createEdit(engineId: string, createEditRequest: CreateEditRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateEditResponse>>;
1231
1524
  /**
1232
1525
  *
1233
1526
  * @summary Creates an embedding vector representing the input text.
@@ -1257,7 +1550,7 @@ export declare const OpenAIApiFp: (configuration?: Configuration) => {
1257
1550
  /**
1258
1551
  *
1259
1552
  * @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
1260
- * @param {string} engineId The ID of the engine to use for this request
1553
+ * @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
1261
1554
  * @param {CreateSearchRequest} createSearchRequest
1262
1555
  * @param {*} [options] Override http request option.
1263
1556
  * @throws {RequiredError}
@@ -1271,6 +1564,14 @@ export declare const OpenAIApiFp: (configuration?: Configuration) => {
1271
1564
  * @throws {RequiredError}
1272
1565
  */
1273
1566
  deleteFile(fileId: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<DeleteFileResponse>>;
1567
+ /**
1568
+ *
1569
+ * @summary Delete a fine-tuned model. You must have the Owner role in your organization.
1570
+ * @param {string} model The model to delete
1571
+ * @param {*} [options] Override http request option.
1572
+ * @throws {RequiredError}
1573
+ */
1574
+ deleteModel(model: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<DeleteModelResponse>>;
1274
1575
  /**
1275
1576
  *
1276
1577
  * @summary Returns the contents of the specified file
@@ -1372,6 +1673,23 @@ export declare const OpenAIApiFactory: (configuration?: Configuration, basePath?
1372
1673
  * @throws {RequiredError}
1373
1674
  */
1374
1675
  createCompletion(engineId: string, createCompletionRequest: CreateCompletionRequest, options?: any): AxiosPromise<CreateCompletionResponse>;
1676
+ /**
1677
+ *
1678
+ * @summary Creates a completion using a fine-tuned model
1679
+ * @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
1680
+ * @param {*} [options] Override http request option.
1681
+ * @throws {RequiredError}
1682
+ */
1683
+ createCompletionFromModel(createCompletionFromModelRequest: CreateCompletionFromModelRequest, options?: any): AxiosPromise<CreateCompletionResponse>;
1684
+ /**
1685
+ *
1686
+ * @summary Creates a new edit for the provided input, instruction, and parameters
1687
+ * @param {string} engineId The ID of the engine to use for this request
1688
+ * @param {CreateEditRequest} createEditRequest
1689
+ * @param {*} [options] Override http request option.
1690
+ * @throws {RequiredError}
1691
+ */
1692
+ createEdit(engineId: string, createEditRequest: CreateEditRequest, options?: any): AxiosPromise<CreateEditResponse>;
1375
1693
  /**
1376
1694
  *
1377
1695
  * @summary Creates an embedding vector representing the input text.
@@ -1401,7 +1719,7 @@ export declare const OpenAIApiFactory: (configuration?: Configuration, basePath?
1401
1719
  /**
1402
1720
  *
1403
1721
  * @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
1404
- * @param {string} engineId The ID of the engine to use for this request
1722
+ * @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
1405
1723
  * @param {CreateSearchRequest} createSearchRequest
1406
1724
  * @param {*} [options] Override http request option.
1407
1725
  * @throws {RequiredError}
@@ -1415,6 +1733,14 @@ export declare const OpenAIApiFactory: (configuration?: Configuration, basePath?
1415
1733
  * @throws {RequiredError}
1416
1734
  */
1417
1735
  deleteFile(fileId: string, options?: any): AxiosPromise<DeleteFileResponse>;
1736
+ /**
1737
+ *
1738
+ * @summary Delete a fine-tuned model. You must have the Owner role in your organization.
1739
+ * @param {string} model The model to delete
1740
+ * @param {*} [options] Override http request option.
1741
+ * @throws {RequiredError}
1742
+ */
1743
+ deleteModel(model: string, options?: any): AxiosPromise<DeleteModelResponse>;
1418
1744
  /**
1419
1745
  *
1420
1746
  * @summary Returns the contents of the specified file
@@ -1522,6 +1848,25 @@ export declare class OpenAIApi extends BaseAPI {
1522
1848
  * @memberof OpenAIApi
1523
1849
  */
1524
1850
  createCompletion(engineId: string, createCompletionRequest: CreateCompletionRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateCompletionResponse, any>>;
1851
+ /**
1852
+ *
1853
+ * @summary Creates a completion using a fine-tuned model
1854
+ * @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
1855
+ * @param {*} [options] Override http request option.
1856
+ * @throws {RequiredError}
1857
+ * @memberof OpenAIApi
1858
+ */
1859
+ createCompletionFromModel(createCompletionFromModelRequest: CreateCompletionFromModelRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateCompletionResponse, any>>;
1860
+ /**
1861
+ *
1862
+ * @summary Creates a new edit for the provided input, instruction, and parameters
1863
+ * @param {string} engineId The ID of the engine to use for this request
1864
+ * @param {CreateEditRequest} createEditRequest
1865
+ * @param {*} [options] Override http request option.
1866
+ * @throws {RequiredError}
1867
+ * @memberof OpenAIApi
1868
+ */
1869
+ createEdit(engineId: string, createEditRequest: CreateEditRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateEditResponse, any>>;
1525
1870
  /**
1526
1871
  *
1527
1872
  * @summary Creates an embedding vector representing the input text.
@@ -1554,7 +1899,7 @@ export declare class OpenAIApi extends BaseAPI {
1554
1899
  /**
1555
1900
  *
1556
1901
  * @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
1557
- * @param {string} engineId The ID of the engine to use for this request
1902
+ * @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
1558
1903
  * @param {CreateSearchRequest} createSearchRequest
1559
1904
  * @param {*} [options] Override http request option.
1560
1905
  * @throws {RequiredError}
@@ -1570,6 +1915,15 @@ export declare class OpenAIApi extends BaseAPI {
1570
1915
  * @memberof OpenAIApi
1571
1916
  */
1572
1917
  deleteFile(fileId: string, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<DeleteFileResponse, any>>;
1918
+ /**
1919
+ *
1920
+ * @summary Delete a fine-tuned model. You must have the Owner role in your organization.
1921
+ * @param {string} model The model to delete
1922
+ * @param {*} [options] Override http request option.
1923
+ * @throws {RequiredError}
1924
+ * @memberof OpenAIApi
1925
+ */
1926
+ deleteModel(model: string, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<DeleteModelResponse, any>>;
1573
1927
  /**
1574
1928
  *
1575
1929
  * @summary Returns the contents of the specified file