openai 2.0.1 → 2.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -22,7 +22,7 @@ const configuration = new Configuration({
22
22
  });
23
23
  const openai = new OpenAIApi(configuration);
24
24
 
25
- const completion = await openai.createCompletion("davinci", {
25
+ const completion = await openai.createCompletion("text-davinci-001", {
26
26
  prompt: "Hello world",
27
27
  });
28
28
  console.log(completion.data.choices[0].text);
@@ -37,10 +37,9 @@ All of the available API request functions additionally contain an optional fina
37
37
 
38
38
  ```javascript
39
39
  const completion = await openai.createCompletion(
40
- "davinci",
40
+ "text-davinci-001",
41
41
  {
42
- prompt: "Once upon a time",
43
- max_tokens: 5,
42
+ prompt: "Hello world",
44
43
  },
45
44
  {
46
45
  timeout: 1000,
@@ -57,7 +56,7 @@ API requests can potentially return errors due to invalid inputs or other issues
57
56
 
58
57
  ```javascript
59
58
  try {
60
- const completion = await openai.createCompletion("davinci", {
59
+ const completion = await openai.createCompletion("text-davinci-001", {
61
60
  prompt: "Hello world",
62
61
  });
63
62
  console.log(completion.data.choices[0].text);
package/api.ts CHANGED
@@ -28,7 +28,7 @@ import { BASE_PATH, COLLECTION_FORMATS, RequestArgs, BaseAPI, RequiredError } fr
28
28
  */
29
29
  export interface CreateAnswerRequest {
30
30
  /**
31
- * ID of the engine to use for completion.
31
+ * ID of the engine to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
32
32
  * @type {string}
33
33
  * @memberof CreateAnswerRequest
34
34
  */
@@ -64,7 +64,7 @@ export interface CreateAnswerRequest {
64
64
  */
65
65
  'file'?: string | null;
66
66
  /**
67
- * ID of the engine to use for [Search](/docs/api-reference/searches/create).
67
+ * ID of the engine to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`.
68
68
  * @type {string}
69
69
  * @memberof CreateAnswerRequest
70
70
  */
@@ -199,7 +199,7 @@ export interface CreateAnswerResponseSelectedDocuments {
199
199
  */
200
200
  export interface CreateClassificationRequest {
201
201
  /**
202
- * ID of the engine to use for completion.
202
+ * ID of the engine to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
203
203
  * @type {string}
204
204
  * @memberof CreateClassificationRequest
205
205
  */
@@ -229,7 +229,7 @@ export interface CreateClassificationRequest {
229
229
  */
230
230
  'labels'?: Array<string> | null;
231
231
  /**
232
- * ID of the engine to use for [Search](/docs/api-reference/searches/create).
232
+ * ID of the engine to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`.
233
233
  * @type {string}
234
234
  * @memberof CreateClassificationRequest
235
235
  */
@@ -345,6 +345,110 @@ export interface CreateClassificationResponseSelectedExamples {
345
345
  */
346
346
  'label'?: string;
347
347
  }
348
+ /**
349
+ *
350
+ * @export
351
+ * @interface CreateCompletionFromModelRequest
352
+ */
353
+ export interface CreateCompletionFromModelRequest {
354
+ /**
355
+ * The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.
356
+ * @type {string | Array<string> | Array<number> | Array<any>}
357
+ * @memberof CreateCompletionFromModelRequest
358
+ */
359
+ 'prompt'?: string | Array<string> | Array<number> | Array<any> | null;
360
+ /**
361
+ * The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except `code-davinci-001`, which supports 4096).
362
+ * @type {number}
363
+ * @memberof CreateCompletionFromModelRequest
364
+ */
365
+ 'max_tokens'?: number | null;
366
+ /**
367
+ * What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. We generally recommend altering this or `top_p` but not both.
368
+ * @type {number}
369
+ * @memberof CreateCompletionFromModelRequest
370
+ */
371
+ 'temperature'?: number | null;
372
+ /**
373
+ * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
374
+ * @type {number}
375
+ * @memberof CreateCompletionFromModelRequest
376
+ */
377
+ 'top_p'?: number | null;
378
+ /**
379
+ * How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
380
+ * @type {number}
381
+ * @memberof CreateCompletionFromModelRequest
382
+ */
383
+ 'n'?: number | null;
384
+ /**
385
+ * Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
386
+ * @type {boolean}
387
+ * @memberof CreateCompletionFromModelRequest
388
+ */
389
+ 'stream'?: boolean | null;
390
+ /**
391
+ * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. If you need more than this, please contact support@openai.com and describe your use case.
392
+ * @type {number}
393
+ * @memberof CreateCompletionFromModelRequest
394
+ */
395
+ 'logprobs'?: number | null;
396
+ /**
397
+ * Echo back the prompt in addition to the completion
398
+ * @type {boolean}
399
+ * @memberof CreateCompletionFromModelRequest
400
+ */
401
+ 'echo'?: boolean | null;
402
+ /**
403
+ * Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
404
+ * @type {string | Array<string>}
405
+ * @memberof CreateCompletionFromModelRequest
406
+ */
407
+ 'stop'?: string | Array<string> | null;
408
+ /**
409
+ * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
410
+ * @type {number}
411
+ * @memberof CreateCompletionFromModelRequest
412
+ */
413
+ 'presence_penalty'?: number | null;
414
+ /**
415
+ * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
416
+ * @type {number}
417
+ * @memberof CreateCompletionFromModelRequest
418
+ */
419
+ 'frequency_penalty'?: number | null;
420
+ /**
421
+ * Generates `best_of` completions server-side and returns the \"best\" (the one with the lowest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
422
+ * @type {number}
423
+ * @memberof CreateCompletionFromModelRequest
424
+ */
425
+ 'best_of'?: number | null;
426
+ /**
427
+ * Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.
428
+ * @type {object}
429
+ * @memberof CreateCompletionFromModelRequest
430
+ */
431
+ 'logit_bias'?: object | null;
432
+ /**
433
+ * ID of the model to use for completion.
434
+ * @type {string}
435
+ * @memberof CreateCompletionFromModelRequest
436
+ */
437
+ 'model'?: string;
438
+ }
439
+ /**
440
+ *
441
+ * @export
442
+ * @interface CreateCompletionFromModelRequestAllOf
443
+ */
444
+ export interface CreateCompletionFromModelRequestAllOf {
445
+ /**
446
+ * ID of the model to use for completion.
447
+ * @type {string}
448
+ * @memberof CreateCompletionFromModelRequestAllOf
449
+ */
450
+ 'model'?: string;
451
+ }
348
452
  /**
349
453
  *
350
454
  * @export
@@ -358,7 +462,7 @@ export interface CreateCompletionRequest {
358
462
  */
359
463
  'prompt'?: string | Array<string> | Array<number> | Array<any> | null;
360
464
  /**
361
- * The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except `davinci-codex`, which supports 4096).
465
+ * The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except `code-davinci-001`, which supports 4096).
362
466
  * @type {number}
363
467
  * @memberof CreateCompletionRequest
364
468
  */
@@ -1208,6 +1312,42 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
1208
1312
  options: localVarRequestOptions,
1209
1313
  };
1210
1314
  },
1315
+ /**
1316
+ *
1317
+ * @summary Creates a completion using a fine-tuned model
1318
+ * @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
1319
+ * @param {*} [options] Override http request option.
1320
+ * @throws {RequiredError}
1321
+ */
1322
+ createCompletionFromModel: async (createCompletionFromModelRequest: CreateCompletionFromModelRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
1323
+ // verify required parameter 'createCompletionFromModelRequest' is not null or undefined
1324
+ assertParamExists('createCompletionFromModel', 'createCompletionFromModelRequest', createCompletionFromModelRequest)
1325
+ const localVarPath = `/completions`;
1326
+ // use dummy base URL string because the URL constructor only accepts absolute URLs.
1327
+ const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
1328
+ let baseOptions;
1329
+ if (configuration) {
1330
+ baseOptions = configuration.baseOptions;
1331
+ }
1332
+
1333
+ const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
1334
+ const localVarHeaderParameter = {} as any;
1335
+ const localVarQueryParameter = {} as any;
1336
+
1337
+
1338
+
1339
+ localVarHeaderParameter['Content-Type'] = 'application/json';
1340
+
1341
+ setSearchParams(localVarUrlObj, localVarQueryParameter);
1342
+ let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
1343
+ localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
1344
+ localVarRequestOptions.data = serializeDataIfNeeded(createCompletionFromModelRequest, localVarRequestOptions, configuration)
1345
+
1346
+ return {
1347
+ url: toPathString(localVarUrlObj),
1348
+ options: localVarRequestOptions,
1349
+ };
1350
+ },
1211
1351
  /**
1212
1352
  *
1213
1353
  * @summary Creates an embedding vector representing the input text.
@@ -1335,7 +1475,7 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
1335
1475
  /**
1336
1476
  *
1337
1477
  * @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
1338
- * @param {string} engineId The ID of the engine to use for this request
1478
+ * @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
1339
1479
  * @param {CreateSearchRequest} createSearchRequest
1340
1480
  * @param {*} [options] Override http request option.
1341
1481
  * @throws {RequiredError}
@@ -1726,6 +1866,17 @@ export const OpenAIApiFp = function(configuration?: Configuration) {
1726
1866
  const localVarAxiosArgs = await localVarAxiosParamCreator.createCompletion(engineId, createCompletionRequest, options);
1727
1867
  return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
1728
1868
  },
1869
+ /**
1870
+ *
1871
+ * @summary Creates a completion using a fine-tuned model
1872
+ * @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
1873
+ * @param {*} [options] Override http request option.
1874
+ * @throws {RequiredError}
1875
+ */
1876
+ async createCompletionFromModel(createCompletionFromModelRequest: CreateCompletionFromModelRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateCompletionResponse>> {
1877
+ const localVarAxiosArgs = await localVarAxiosParamCreator.createCompletionFromModel(createCompletionFromModelRequest, options);
1878
+ return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
1879
+ },
1729
1880
  /**
1730
1881
  *
1731
1882
  * @summary Creates an embedding vector representing the input text.
@@ -1764,7 +1915,7 @@ export const OpenAIApiFp = function(configuration?: Configuration) {
1764
1915
  /**
1765
1916
  *
1766
1917
  * @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
1767
- * @param {string} engineId The ID of the engine to use for this request
1918
+ * @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
1768
1919
  * @param {CreateSearchRequest} createSearchRequest
1769
1920
  * @param {*} [options] Override http request option.
1770
1921
  * @throws {RequiredError}
@@ -1921,6 +2072,16 @@ export const OpenAIApiFactory = function (configuration?: Configuration, basePat
1921
2072
  createCompletion(engineId: string, createCompletionRequest: CreateCompletionRequest, options?: any): AxiosPromise<CreateCompletionResponse> {
1922
2073
  return localVarFp.createCompletion(engineId, createCompletionRequest, options).then((request) => request(axios, basePath));
1923
2074
  },
2075
+ /**
2076
+ *
2077
+ * @summary Creates a completion using a fine-tuned model
2078
+ * @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
2079
+ * @param {*} [options] Override http request option.
2080
+ * @throws {RequiredError}
2081
+ */
2082
+ createCompletionFromModel(createCompletionFromModelRequest: CreateCompletionFromModelRequest, options?: any): AxiosPromise<CreateCompletionResponse> {
2083
+ return localVarFp.createCompletionFromModel(createCompletionFromModelRequest, options).then((request) => request(axios, basePath));
2084
+ },
1924
2085
  /**
1925
2086
  *
1926
2087
  * @summary Creates an embedding vector representing the input text.
@@ -1956,7 +2117,7 @@ export const OpenAIApiFactory = function (configuration?: Configuration, basePat
1956
2117
  /**
1957
2118
  *
1958
2119
  * @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
1959
- * @param {string} engineId The ID of the engine to use for this request
2120
+ * @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
1960
2121
  * @param {CreateSearchRequest} createSearchRequest
1961
2122
  * @param {*} [options] Override http request option.
1962
2123
  * @throws {RequiredError}
@@ -2111,6 +2272,18 @@ export class OpenAIApi extends BaseAPI {
2111
2272
  return OpenAIApiFp(this.configuration).createCompletion(engineId, createCompletionRequest, options).then((request) => request(this.axios, this.basePath));
2112
2273
  }
2113
2274
 
2275
+ /**
2276
+ *
2277
+ * @summary Creates a completion using a fine-tuned model
2278
+ * @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
2279
+ * @param {*} [options] Override http request option.
2280
+ * @throws {RequiredError}
2281
+ * @memberof OpenAIApi
2282
+ */
2283
+ public createCompletionFromModel(createCompletionFromModelRequest: CreateCompletionFromModelRequest, options?: AxiosRequestConfig) {
2284
+ return OpenAIApiFp(this.configuration).createCompletionFromModel(createCompletionFromModelRequest, options).then((request) => request(this.axios, this.basePath));
2285
+ }
2286
+
2114
2287
  /**
2115
2288
  *
2116
2289
  * @summary Creates an embedding vector representing the input text.
@@ -2152,7 +2325,7 @@ export class OpenAIApi extends BaseAPI {
2152
2325
  /**
2153
2326
  *
2154
2327
  * @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
2155
- * @param {string} engineId The ID of the engine to use for this request
2328
+ * @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
2156
2329
  * @param {CreateSearchRequest} createSearchRequest
2157
2330
  * @param {*} [options] Override http request option.
2158
2331
  * @throws {RequiredError}
package/dist/api.d.ts CHANGED
@@ -19,7 +19,7 @@ import { RequestArgs, BaseAPI } from './base';
19
19
  */
20
20
  export interface CreateAnswerRequest {
21
21
  /**
22
- * ID of the engine to use for completion.
22
+ * ID of the engine to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
23
23
  * @type {string}
24
24
  * @memberof CreateAnswerRequest
25
25
  */
@@ -55,7 +55,7 @@ export interface CreateAnswerRequest {
55
55
  */
56
56
  'file'?: string | null;
57
57
  /**
58
- * ID of the engine to use for [Search](/docs/api-reference/searches/create).
58
+ * ID of the engine to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`.
59
59
  * @type {string}
60
60
  * @memberof CreateAnswerRequest
61
61
  */
@@ -190,7 +190,7 @@ export interface CreateAnswerResponseSelectedDocuments {
190
190
  */
191
191
  export interface CreateClassificationRequest {
192
192
  /**
193
- * ID of the engine to use for completion.
193
+ * ID of the engine to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
194
194
  * @type {string}
195
195
  * @memberof CreateClassificationRequest
196
196
  */
@@ -220,7 +220,7 @@ export interface CreateClassificationRequest {
220
220
  */
221
221
  'labels'?: Array<string> | null;
222
222
  /**
223
- * ID of the engine to use for [Search](/docs/api-reference/searches/create).
223
+ * ID of the engine to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`.
224
224
  * @type {string}
225
225
  * @memberof CreateClassificationRequest
226
226
  */
@@ -336,6 +336,110 @@ export interface CreateClassificationResponseSelectedExamples {
336
336
  */
337
337
  'label'?: string;
338
338
  }
339
+ /**
340
+ *
341
+ * @export
342
+ * @interface CreateCompletionFromModelRequest
343
+ */
344
+ export interface CreateCompletionFromModelRequest {
345
+ /**
346
+ * The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.
347
+ * @type {string | Array<string> | Array<number> | Array<any>}
348
+ * @memberof CreateCompletionFromModelRequest
349
+ */
350
+ 'prompt'?: string | Array<string> | Array<number> | Array<any> | null;
351
+ /**
352
+ * The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except `code-davinci-001`, which supports 4096).
353
+ * @type {number}
354
+ * @memberof CreateCompletionFromModelRequest
355
+ */
356
+ 'max_tokens'?: number | null;
357
+ /**
358
+ * What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. We generally recommend altering this or `top_p` but not both.
359
+ * @type {number}
360
+ * @memberof CreateCompletionFromModelRequest
361
+ */
362
+ 'temperature'?: number | null;
363
+ /**
364
+ * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
365
+ * @type {number}
366
+ * @memberof CreateCompletionFromModelRequest
367
+ */
368
+ 'top_p'?: number | null;
369
+ /**
370
+ * How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
371
+ * @type {number}
372
+ * @memberof CreateCompletionFromModelRequest
373
+ */
374
+ 'n'?: number | null;
375
+ /**
376
+ * Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
377
+ * @type {boolean}
378
+ * @memberof CreateCompletionFromModelRequest
379
+ */
380
+ 'stream'?: boolean | null;
381
+ /**
382
+ * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. If you need more than this, please contact support@openai.com and describe your use case.
383
+ * @type {number}
384
+ * @memberof CreateCompletionFromModelRequest
385
+ */
386
+ 'logprobs'?: number | null;
387
+ /**
388
+ * Echo back the prompt in addition to the completion
389
+ * @type {boolean}
390
+ * @memberof CreateCompletionFromModelRequest
391
+ */
392
+ 'echo'?: boolean | null;
393
+ /**
394
+ * Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
395
+ * @type {string | Array<string>}
396
+ * @memberof CreateCompletionFromModelRequest
397
+ */
398
+ 'stop'?: string | Array<string> | null;
399
+ /**
400
+ * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
401
+ * @type {number}
402
+ * @memberof CreateCompletionFromModelRequest
403
+ */
404
+ 'presence_penalty'?: number | null;
405
+ /**
406
+ * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
407
+ * @type {number}
408
+ * @memberof CreateCompletionFromModelRequest
409
+ */
410
+ 'frequency_penalty'?: number | null;
411
+ /**
412
+ * Generates `best_of` completions server-side and returns the \"best\" (the one with the lowest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
413
+ * @type {number}
414
+ * @memberof CreateCompletionFromModelRequest
415
+ */
416
+ 'best_of'?: number | null;
417
+ /**
418
+ * Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.
419
+ * @type {object}
420
+ * @memberof CreateCompletionFromModelRequest
421
+ */
422
+ 'logit_bias'?: object | null;
423
+ /**
424
+ * ID of the model to use for completion.
425
+ * @type {string}
426
+ * @memberof CreateCompletionFromModelRequest
427
+ */
428
+ 'model'?: string;
429
+ }
430
+ /**
431
+ *
432
+ * @export
433
+ * @interface CreateCompletionFromModelRequestAllOf
434
+ */
435
+ export interface CreateCompletionFromModelRequestAllOf {
436
+ /**
437
+ * ID of the model to use for completion.
438
+ * @type {string}
439
+ * @memberof CreateCompletionFromModelRequestAllOf
440
+ */
441
+ 'model'?: string;
442
+ }
339
443
  /**
340
444
  *
341
445
  * @export
@@ -349,7 +453,7 @@ export interface CreateCompletionRequest {
349
453
  */
350
454
  'prompt'?: string | Array<string> | Array<number> | Array<any> | null;
351
455
  /**
352
- * The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except `davinci-codex`, which supports 4096).
456
+ * The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except `code-davinci-001`, which supports 4096).
353
457
  * @type {number}
354
458
  * @memberof CreateCompletionRequest
355
459
  */
@@ -1084,6 +1188,14 @@ export declare const OpenAIApiAxiosParamCreator: (configuration?: Configuration)
1084
1188
  * @throws {RequiredError}
1085
1189
  */
1086
1190
  createCompletion: (engineId: string, createCompletionRequest: CreateCompletionRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>;
1191
+ /**
1192
+ *
1193
+ * @summary Creates a completion using a fine-tuned model
1194
+ * @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
1195
+ * @param {*} [options] Override http request option.
1196
+ * @throws {RequiredError}
1197
+ */
1198
+ createCompletionFromModel: (createCompletionFromModelRequest: CreateCompletionFromModelRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>;
1087
1199
  /**
1088
1200
  *
1089
1201
  * @summary Creates an embedding vector representing the input text.
@@ -1113,7 +1225,7 @@ export declare const OpenAIApiAxiosParamCreator: (configuration?: Configuration)
1113
1225
  /**
1114
1226
  *
1115
1227
  * @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
1116
- * @param {string} engineId The ID of the engine to use for this request
1228
+ * @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
1117
1229
  * @param {CreateSearchRequest} createSearchRequest
1118
1230
  * @param {*} [options] Override http request option.
1119
1231
  * @throws {RequiredError}
@@ -1228,6 +1340,14 @@ export declare const OpenAIApiFp: (configuration?: Configuration) => {
1228
1340
  * @throws {RequiredError}
1229
1341
  */
1230
1342
  createCompletion(engineId: string, createCompletionRequest: CreateCompletionRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateCompletionResponse>>;
1343
+ /**
1344
+ *
1345
+ * @summary Creates a completion using a fine-tuned model
1346
+ * @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
1347
+ * @param {*} [options] Override http request option.
1348
+ * @throws {RequiredError}
1349
+ */
1350
+ createCompletionFromModel(createCompletionFromModelRequest: CreateCompletionFromModelRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateCompletionResponse>>;
1231
1351
  /**
1232
1352
  *
1233
1353
  * @summary Creates an embedding vector representing the input text.
@@ -1257,7 +1377,7 @@ export declare const OpenAIApiFp: (configuration?: Configuration) => {
1257
1377
  /**
1258
1378
  *
1259
1379
  * @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
1260
- * @param {string} engineId The ID of the engine to use for this request
1380
+ * @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
1261
1381
  * @param {CreateSearchRequest} createSearchRequest
1262
1382
  * @param {*} [options] Override http request option.
1263
1383
  * @throws {RequiredError}
@@ -1372,6 +1492,14 @@ export declare const OpenAIApiFactory: (configuration?: Configuration, basePath?
1372
1492
  * @throws {RequiredError}
1373
1493
  */
1374
1494
  createCompletion(engineId: string, createCompletionRequest: CreateCompletionRequest, options?: any): AxiosPromise<CreateCompletionResponse>;
1495
+ /**
1496
+ *
1497
+ * @summary Creates a completion using a fine-tuned model
1498
+ * @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
1499
+ * @param {*} [options] Override http request option.
1500
+ * @throws {RequiredError}
1501
+ */
1502
+ createCompletionFromModel(createCompletionFromModelRequest: CreateCompletionFromModelRequest, options?: any): AxiosPromise<CreateCompletionResponse>;
1375
1503
  /**
1376
1504
  *
1377
1505
  * @summary Creates an embedding vector representing the input text.
@@ -1401,7 +1529,7 @@ export declare const OpenAIApiFactory: (configuration?: Configuration, basePath?
1401
1529
  /**
1402
1530
  *
1403
1531
  * @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
1404
- * @param {string} engineId The ID of the engine to use for this request
1532
+ * @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
1405
1533
  * @param {CreateSearchRequest} createSearchRequest
1406
1534
  * @param {*} [options] Override http request option.
1407
1535
  * @throws {RequiredError}
@@ -1522,6 +1650,15 @@ export declare class OpenAIApi extends BaseAPI {
1522
1650
  * @memberof OpenAIApi
1523
1651
  */
1524
1652
  createCompletion(engineId: string, createCompletionRequest: CreateCompletionRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateCompletionResponse, any>>;
1653
+ /**
1654
+ *
1655
+ * @summary Creates a completion using a fine-tuned model
1656
+ * @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
1657
+ * @param {*} [options] Override http request option.
1658
+ * @throws {RequiredError}
1659
+ * @memberof OpenAIApi
1660
+ */
1661
+ createCompletionFromModel(createCompletionFromModelRequest: CreateCompletionFromModelRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateCompletionResponse, any>>;
1525
1662
  /**
1526
1663
  *
1527
1664
  * @summary Creates an embedding vector representing the input text.
@@ -1554,7 +1691,7 @@ export declare class OpenAIApi extends BaseAPI {
1554
1691
  /**
1555
1692
  *
1556
1693
  * @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
1557
- * @param {string} engineId The ID of the engine to use for this request
1694
+ * @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
1558
1695
  * @param {CreateSearchRequest} createSearchRequest
1559
1696
  * @param {*} [options] Override http request option.
1560
1697
  * @throws {RequiredError}
package/dist/api.js CHANGED
@@ -158,6 +158,36 @@ exports.OpenAIApiAxiosParamCreator = function (configuration) {
158
158
  options: localVarRequestOptions,
159
159
  };
160
160
  }),
161
+ /**
162
+ *
163
+ * @summary Creates a completion using a fine-tuned model
164
+ * @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
165
+ * @param {*} [options] Override http request option.
166
+ * @throws {RequiredError}
167
+ */
168
+ createCompletionFromModel: (createCompletionFromModelRequest, options = {}) => __awaiter(this, void 0, void 0, function* () {
169
+ // verify required parameter 'createCompletionFromModelRequest' is not null or undefined
170
+ common_1.assertParamExists('createCompletionFromModel', 'createCompletionFromModelRequest', createCompletionFromModelRequest);
171
+ const localVarPath = `/completions`;
172
+ // use dummy base URL string because the URL constructor only accepts absolute URLs.
173
+ const localVarUrlObj = new URL(localVarPath, common_1.DUMMY_BASE_URL);
174
+ let baseOptions;
175
+ if (configuration) {
176
+ baseOptions = configuration.baseOptions;
177
+ }
178
+ const localVarRequestOptions = Object.assign(Object.assign({ method: 'POST' }, baseOptions), options);
179
+ const localVarHeaderParameter = {};
180
+ const localVarQueryParameter = {};
181
+ localVarHeaderParameter['Content-Type'] = 'application/json';
182
+ common_1.setSearchParams(localVarUrlObj, localVarQueryParameter);
183
+ let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
184
+ localVarRequestOptions.headers = Object.assign(Object.assign(Object.assign({}, localVarHeaderParameter), headersFromBaseOptions), options.headers);
185
+ localVarRequestOptions.data = common_1.serializeDataIfNeeded(createCompletionFromModelRequest, localVarRequestOptions, configuration);
186
+ return {
187
+ url: common_1.toPathString(localVarUrlObj),
188
+ options: localVarRequestOptions,
189
+ };
190
+ }),
161
191
  /**
162
192
  *
163
193
  * @summary Creates an embedding vector representing the input text.
@@ -265,7 +295,7 @@ exports.OpenAIApiAxiosParamCreator = function (configuration) {
265
295
  /**
266
296
  *
267
297
  * @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
268
- * @param {string} engineId The ID of the engine to use for this request
298
+ * @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
269
299
  * @param {CreateSearchRequest} createSearchRequest
270
300
  * @param {*} [options] Override http request option.
271
301
  * @throws {RequiredError}
@@ -611,6 +641,19 @@ exports.OpenAIApiFp = function (configuration) {
611
641
  return common_1.createRequestFunction(localVarAxiosArgs, axios_1.default, base_1.BASE_PATH, configuration);
612
642
  });
613
643
  },
644
+ /**
645
+ *
646
+ * @summary Creates a completion using a fine-tuned model
647
+ * @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
648
+ * @param {*} [options] Override http request option.
649
+ * @throws {RequiredError}
650
+ */
651
+ createCompletionFromModel(createCompletionFromModelRequest, options) {
652
+ return __awaiter(this, void 0, void 0, function* () {
653
+ const localVarAxiosArgs = yield localVarAxiosParamCreator.createCompletionFromModel(createCompletionFromModelRequest, options);
654
+ return common_1.createRequestFunction(localVarAxiosArgs, axios_1.default, base_1.BASE_PATH, configuration);
655
+ });
656
+ },
614
657
  /**
615
658
  *
616
659
  * @summary Creates an embedding vector representing the input text.
@@ -655,7 +698,7 @@ exports.OpenAIApiFp = function (configuration) {
655
698
  /**
656
699
  *
657
700
  * @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
658
- * @param {string} engineId The ID of the engine to use for this request
701
+ * @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
659
702
  * @param {CreateSearchRequest} createSearchRequest
660
703
  * @param {*} [options] Override http request option.
661
704
  * @throws {RequiredError}
@@ -831,6 +874,16 @@ exports.OpenAIApiFactory = function (configuration, basePath, axios) {
831
874
  createCompletion(engineId, createCompletionRequest, options) {
832
875
  return localVarFp.createCompletion(engineId, createCompletionRequest, options).then((request) => request(axios, basePath));
833
876
  },
877
+ /**
878
+ *
879
+ * @summary Creates a completion using a fine-tuned model
880
+ * @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
881
+ * @param {*} [options] Override http request option.
882
+ * @throws {RequiredError}
883
+ */
884
+ createCompletionFromModel(createCompletionFromModelRequest, options) {
885
+ return localVarFp.createCompletionFromModel(createCompletionFromModelRequest, options).then((request) => request(axios, basePath));
886
+ },
834
887
  /**
835
888
  *
836
889
  * @summary Creates an embedding vector representing the input text.
@@ -866,7 +919,7 @@ exports.OpenAIApiFactory = function (configuration, basePath, axios) {
866
919
  /**
867
920
  *
868
921
  * @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
869
- * @param {string} engineId The ID of the engine to use for this request
922
+ * @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
870
923
  * @param {CreateSearchRequest} createSearchRequest
871
924
  * @param {*} [options] Override http request option.
872
925
  * @throws {RequiredError}
@@ -1016,6 +1069,17 @@ class OpenAIApi extends base_1.BaseAPI {
1016
1069
  createCompletion(engineId, createCompletionRequest, options) {
1017
1070
  return exports.OpenAIApiFp(this.configuration).createCompletion(engineId, createCompletionRequest, options).then((request) => request(this.axios, this.basePath));
1018
1071
  }
1072
+ /**
1073
+ *
1074
+ * @summary Creates a completion using a fine-tuned model
1075
+ * @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
1076
+ * @param {*} [options] Override http request option.
1077
+ * @throws {RequiredError}
1078
+ * @memberof OpenAIApi
1079
+ */
1080
+ createCompletionFromModel(createCompletionFromModelRequest, options) {
1081
+ return exports.OpenAIApiFp(this.configuration).createCompletionFromModel(createCompletionFromModelRequest, options).then((request) => request(this.axios, this.basePath));
1082
+ }
1019
1083
  /**
1020
1084
  *
1021
1085
  * @summary Creates an embedding vector representing the input text.
@@ -1054,7 +1118,7 @@ class OpenAIApi extends base_1.BaseAPI {
1054
1118
  /**
1055
1119
  *
1056
1120
  * @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
1057
- * @param {string} engineId The ID of the engine to use for this request
1121
+ * @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
1058
1122
  * @param {CreateSearchRequest} createSearchRequest
1059
1123
  * @param {*} [options] Override http request option.
1060
1124
  * @throws {RequiredError}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "openai",
3
- "version": "2.0.1",
3
+ "version": "2.0.2",
4
4
  "description": "Node.js library for the OpenAI API",
5
5
  "keywords": [
6
6
  "openai",