openai 3.2.0 → 3.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/api.ts +21 -7
- package/dist/api.d.ts +15 -5
- package/dist/api.js +15 -8
- package/dist/base.d.ts +1 -1
- package/dist/base.js +1 -1
- package/dist/common.d.ts +1 -1
- package/dist/common.js +1 -1
- package/dist/configuration.d.ts +1 -1
- package/dist/configuration.js +1 -1
- package/dist/index.d.ts +1 -1
- package/dist/index.js +1 -1
- package/package.json +1 -1
package/api.ts
CHANGED
|
@@ -317,6 +317,12 @@ export interface CreateChatCompletionRequest {
|
|
|
317
317
|
* @memberof CreateChatCompletionRequest
|
|
318
318
|
*/
|
|
319
319
|
'stop'?: CreateChatCompletionRequestStop;
|
|
320
|
+
/**
|
|
321
|
+
* The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).
|
|
322
|
+
* @type {number}
|
|
323
|
+
* @memberof CreateChatCompletionRequest
|
|
324
|
+
*/
|
|
325
|
+
'max_tokens'?: number;
|
|
320
326
|
/**
|
|
321
327
|
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
|
|
322
328
|
* @type {number}
|
|
@@ -2442,10 +2448,11 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
|
|
|
2442
2448
|
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
|
|
2443
2449
|
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
|
|
2444
2450
|
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
|
|
2451
|
+
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
|
|
2445
2452
|
* @param {*} [options] Override http request option.
|
|
2446
2453
|
* @throws {RequiredError}
|
|
2447
2454
|
*/
|
|
2448
|
-
createTranscription: async (file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
|
|
2455
|
+
createTranscription: async (file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
|
|
2449
2456
|
// verify required parameter 'file' is not null or undefined
|
|
2450
2457
|
assertParamExists('createTranscription', 'file', file)
|
|
2451
2458
|
// verify required parameter 'model' is not null or undefined
|
|
@@ -2484,6 +2491,10 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
|
|
|
2484
2491
|
localVarFormParams.append('temperature', temperature as any);
|
|
2485
2492
|
}
|
|
2486
2493
|
|
|
2494
|
+
if (language !== undefined) {
|
|
2495
|
+
localVarFormParams.append('language', language as any);
|
|
2496
|
+
}
|
|
2497
|
+
|
|
2487
2498
|
|
|
2488
2499
|
localVarHeaderParameter['Content-Type'] = 'multipart/form-data';
|
|
2489
2500
|
|
|
@@ -3146,11 +3157,12 @@ export const OpenAIApiFp = function(configuration?: Configuration) {
|
|
|
3146
3157
|
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
|
|
3147
3158
|
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
|
|
3148
3159
|
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
|
|
3160
|
+
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
|
|
3149
3161
|
* @param {*} [options] Override http request option.
|
|
3150
3162
|
* @throws {RequiredError}
|
|
3151
3163
|
*/
|
|
3152
|
-
async createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateTranscriptionResponse>> {
|
|
3153
|
-
const localVarAxiosArgs = await localVarAxiosParamCreator.createTranscription(file, model, prompt, responseFormat, temperature, options);
|
|
3164
|
+
async createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateTranscriptionResponse>> {
|
|
3165
|
+
const localVarAxiosArgs = await localVarAxiosParamCreator.createTranscription(file, model, prompt, responseFormat, temperature, language, options);
|
|
3154
3166
|
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
|
|
3155
3167
|
},
|
|
3156
3168
|
/**
|
|
@@ -3472,11 +3484,12 @@ export const OpenAIApiFactory = function (configuration?: Configuration, basePat
|
|
|
3472
3484
|
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
|
|
3473
3485
|
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
|
|
3474
3486
|
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
|
|
3487
|
+
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
|
|
3475
3488
|
* @param {*} [options] Override http request option.
|
|
3476
3489
|
* @throws {RequiredError}
|
|
3477
3490
|
*/
|
|
3478
|
-
createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options?: any): AxiosPromise<CreateTranscriptionResponse> {
|
|
3479
|
-
return localVarFp.createTranscription(file, model, prompt, responseFormat, temperature, options).then((request) => request(axios, basePath));
|
|
3491
|
+
createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options?: any): AxiosPromise<CreateTranscriptionResponse> {
|
|
3492
|
+
return localVarFp.createTranscription(file, model, prompt, responseFormat, temperature, language, options).then((request) => request(axios, basePath));
|
|
3480
3493
|
},
|
|
3481
3494
|
/**
|
|
3482
3495
|
*
|
|
@@ -3812,12 +3825,13 @@ export class OpenAIApi extends BaseAPI {
|
|
|
3812
3825
|
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
|
|
3813
3826
|
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
|
|
3814
3827
|
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
|
|
3828
|
+
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
|
|
3815
3829
|
* @param {*} [options] Override http request option.
|
|
3816
3830
|
* @throws {RequiredError}
|
|
3817
3831
|
* @memberof OpenAIApi
|
|
3818
3832
|
*/
|
|
3819
|
-
public createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options?: AxiosRequestConfig) {
|
|
3820
|
-
return OpenAIApiFp(this.configuration).createTranscription(file, model, prompt, responseFormat, temperature, options).then((request) => request(this.axios, this.basePath));
|
|
3833
|
+
public createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options?: AxiosRequestConfig) {
|
|
3834
|
+
return OpenAIApiFp(this.configuration).createTranscription(file, model, prompt, responseFormat, temperature, language, options).then((request) => request(this.axios, this.basePath));
|
|
3821
3835
|
}
|
|
3822
3836
|
|
|
3823
3837
|
/**
|
package/dist/api.d.ts
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
* OpenAI API
|
|
3
3
|
* APIs for sampling from and fine-tuning language models
|
|
4
4
|
*
|
|
5
|
-
* The version of the OpenAPI document: 1.
|
|
5
|
+
* The version of the OpenAPI document: 1.2.0
|
|
6
6
|
*
|
|
7
7
|
*
|
|
8
8
|
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
|
|
@@ -300,6 +300,12 @@ export interface CreateChatCompletionRequest {
|
|
|
300
300
|
* @memberof CreateChatCompletionRequest
|
|
301
301
|
*/
|
|
302
302
|
'stop'?: CreateChatCompletionRequestStop;
|
|
303
|
+
/**
|
|
304
|
+
* The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).
|
|
305
|
+
* @type {number}
|
|
306
|
+
* @memberof CreateChatCompletionRequest
|
|
307
|
+
*/
|
|
308
|
+
'max_tokens'?: number;
|
|
303
309
|
/**
|
|
304
310
|
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
|
|
305
311
|
* @type {number}
|
|
@@ -1958,10 +1964,11 @@ export declare const OpenAIApiAxiosParamCreator: (configuration?: Configuration)
|
|
|
1958
1964
|
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
|
|
1959
1965
|
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
|
|
1960
1966
|
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
|
|
1967
|
+
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
|
|
1961
1968
|
* @param {*} [options] Override http request option.
|
|
1962
1969
|
* @throws {RequiredError}
|
|
1963
1970
|
*/
|
|
1964
|
-
createTranscription: (file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options?: AxiosRequestConfig) => Promise<RequestArgs>;
|
|
1971
|
+
createTranscription: (file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options?: AxiosRequestConfig) => Promise<RequestArgs>;
|
|
1965
1972
|
/**
|
|
1966
1973
|
*
|
|
1967
1974
|
* @summary Translates audio into into English.
|
|
@@ -2210,10 +2217,11 @@ export declare const OpenAIApiFp: (configuration?: Configuration) => {
|
|
|
2210
2217
|
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
|
|
2211
2218
|
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
|
|
2212
2219
|
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
|
|
2220
|
+
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
|
|
2213
2221
|
* @param {*} [options] Override http request option.
|
|
2214
2222
|
* @throws {RequiredError}
|
|
2215
2223
|
*/
|
|
2216
|
-
createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateTranscriptionResponse>>;
|
|
2224
|
+
createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateTranscriptionResponse>>;
|
|
2217
2225
|
/**
|
|
2218
2226
|
*
|
|
2219
2227
|
* @summary Translates audio into into English.
|
|
@@ -2462,10 +2470,11 @@ export declare const OpenAIApiFactory: (configuration?: Configuration, basePath?
|
|
|
2462
2470
|
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
|
|
2463
2471
|
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
|
|
2464
2472
|
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
|
|
2473
|
+
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
|
|
2465
2474
|
* @param {*} [options] Override http request option.
|
|
2466
2475
|
* @throws {RequiredError}
|
|
2467
2476
|
*/
|
|
2468
|
-
createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options?: any): AxiosPromise<CreateTranscriptionResponse>;
|
|
2477
|
+
createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options?: any): AxiosPromise<CreateTranscriptionResponse>;
|
|
2469
2478
|
/**
|
|
2470
2479
|
*
|
|
2471
2480
|
* @summary Translates audio into into English.
|
|
@@ -2730,11 +2739,12 @@ export declare class OpenAIApi extends BaseAPI {
|
|
|
2730
2739
|
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
|
|
2731
2740
|
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
|
|
2732
2741
|
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
|
|
2742
|
+
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
|
|
2733
2743
|
* @param {*} [options] Override http request option.
|
|
2734
2744
|
* @throws {RequiredError}
|
|
2735
2745
|
* @memberof OpenAIApi
|
|
2736
2746
|
*/
|
|
2737
|
-
createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateTranscriptionResponse, any>>;
|
|
2747
|
+
createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateTranscriptionResponse, any>>;
|
|
2738
2748
|
/**
|
|
2739
2749
|
*
|
|
2740
2750
|
* @summary Translates audio into into English.
|
package/dist/api.js
CHANGED
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
* OpenAI API
|
|
6
6
|
* APIs for sampling from and fine-tuning language models
|
|
7
7
|
*
|
|
8
|
-
* The version of the OpenAPI document: 1.
|
|
8
|
+
* The version of the OpenAPI document: 1.2.0
|
|
9
9
|
*
|
|
10
10
|
*
|
|
11
11
|
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
|
|
@@ -548,10 +548,11 @@ exports.OpenAIApiAxiosParamCreator = function (configuration) {
|
|
|
548
548
|
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
|
|
549
549
|
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
|
|
550
550
|
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
|
|
551
|
+
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
|
|
551
552
|
* @param {*} [options] Override http request option.
|
|
552
553
|
* @throws {RequiredError}
|
|
553
554
|
*/
|
|
554
|
-
createTranscription: (file, model, prompt, responseFormat, temperature, options = {}) => __awaiter(this, void 0, void 0, function* () {
|
|
555
|
+
createTranscription: (file, model, prompt, responseFormat, temperature, language, options = {}) => __awaiter(this, void 0, void 0, function* () {
|
|
555
556
|
// verify required parameter 'file' is not null or undefined
|
|
556
557
|
common_1.assertParamExists('createTranscription', 'file', file);
|
|
557
558
|
// verify required parameter 'model' is not null or undefined
|
|
@@ -582,6 +583,9 @@ exports.OpenAIApiAxiosParamCreator = function (configuration) {
|
|
|
582
583
|
if (temperature !== undefined) {
|
|
583
584
|
localVarFormParams.append('temperature', temperature);
|
|
584
585
|
}
|
|
586
|
+
if (language !== undefined) {
|
|
587
|
+
localVarFormParams.append('language', language);
|
|
588
|
+
}
|
|
585
589
|
localVarHeaderParameter['Content-Type'] = 'multipart/form-data';
|
|
586
590
|
common_1.setSearchParams(localVarUrlObj, localVarQueryParameter);
|
|
587
591
|
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
|
|
@@ -1196,12 +1200,13 @@ exports.OpenAIApiFp = function (configuration) {
|
|
|
1196
1200
|
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
|
|
1197
1201
|
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
|
|
1198
1202
|
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
|
|
1203
|
+
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
|
|
1199
1204
|
* @param {*} [options] Override http request option.
|
|
1200
1205
|
* @throws {RequiredError}
|
|
1201
1206
|
*/
|
|
1202
|
-
createTranscription(file, model, prompt, responseFormat, temperature, options) {
|
|
1207
|
+
createTranscription(file, model, prompt, responseFormat, temperature, language, options) {
|
|
1203
1208
|
return __awaiter(this, void 0, void 0, function* () {
|
|
1204
|
-
const localVarAxiosArgs = yield localVarAxiosParamCreator.createTranscription(file, model, prompt, responseFormat, temperature, options);
|
|
1209
|
+
const localVarAxiosArgs = yield localVarAxiosParamCreator.createTranscription(file, model, prompt, responseFormat, temperature, language, options);
|
|
1205
1210
|
return common_1.createRequestFunction(localVarAxiosArgs, axios_1.default, base_1.BASE_PATH, configuration);
|
|
1206
1211
|
});
|
|
1207
1212
|
},
|
|
@@ -1549,11 +1554,12 @@ exports.OpenAIApiFactory = function (configuration, basePath, axios) {
|
|
|
1549
1554
|
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
|
|
1550
1555
|
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
|
|
1551
1556
|
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
|
|
1557
|
+
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
|
|
1552
1558
|
* @param {*} [options] Override http request option.
|
|
1553
1559
|
* @throws {RequiredError}
|
|
1554
1560
|
*/
|
|
1555
|
-
createTranscription(file, model, prompt, responseFormat, temperature, options) {
|
|
1556
|
-
return localVarFp.createTranscription(file, model, prompt, responseFormat, temperature, options).then((request) => request(axios, basePath));
|
|
1561
|
+
createTranscription(file, model, prompt, responseFormat, temperature, language, options) {
|
|
1562
|
+
return localVarFp.createTranscription(file, model, prompt, responseFormat, temperature, language, options).then((request) => request(axios, basePath));
|
|
1557
1563
|
},
|
|
1558
1564
|
/**
|
|
1559
1565
|
*
|
|
@@ -1874,12 +1880,13 @@ class OpenAIApi extends base_1.BaseAPI {
|
|
|
1874
1880
|
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
|
|
1875
1881
|
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
|
|
1876
1882
|
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
|
|
1883
|
+
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
|
|
1877
1884
|
* @param {*} [options] Override http request option.
|
|
1878
1885
|
* @throws {RequiredError}
|
|
1879
1886
|
* @memberof OpenAIApi
|
|
1880
1887
|
*/
|
|
1881
|
-
createTranscription(file, model, prompt, responseFormat, temperature, options) {
|
|
1882
|
-
return exports.OpenAIApiFp(this.configuration).createTranscription(file, model, prompt, responseFormat, temperature, options).then((request) => request(this.axios, this.basePath));
|
|
1888
|
+
createTranscription(file, model, prompt, responseFormat, temperature, language, options) {
|
|
1889
|
+
return exports.OpenAIApiFp(this.configuration).createTranscription(file, model, prompt, responseFormat, temperature, language, options).then((request) => request(this.axios, this.basePath));
|
|
1883
1890
|
}
|
|
1884
1891
|
/**
|
|
1885
1892
|
*
|
package/dist/base.d.ts
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
* OpenAI API
|
|
3
3
|
* APIs for sampling from and fine-tuning language models
|
|
4
4
|
*
|
|
5
|
-
* The version of the OpenAPI document: 1.
|
|
5
|
+
* The version of the OpenAPI document: 1.2.0
|
|
6
6
|
*
|
|
7
7
|
*
|
|
8
8
|
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
|
package/dist/base.js
CHANGED
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
* OpenAI API
|
|
6
6
|
* APIs for sampling from and fine-tuning language models
|
|
7
7
|
*
|
|
8
|
-
* The version of the OpenAPI document: 1.
|
|
8
|
+
* The version of the OpenAPI document: 1.2.0
|
|
9
9
|
*
|
|
10
10
|
*
|
|
11
11
|
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
|
package/dist/common.d.ts
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
* OpenAI API
|
|
3
3
|
* APIs for sampling from and fine-tuning language models
|
|
4
4
|
*
|
|
5
|
-
* The version of the OpenAPI document: 1.
|
|
5
|
+
* The version of the OpenAPI document: 1.2.0
|
|
6
6
|
*
|
|
7
7
|
*
|
|
8
8
|
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
|
package/dist/common.js
CHANGED
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
* OpenAI API
|
|
6
6
|
* APIs for sampling from and fine-tuning language models
|
|
7
7
|
*
|
|
8
|
-
* The version of the OpenAPI document: 1.
|
|
8
|
+
* The version of the OpenAPI document: 1.2.0
|
|
9
9
|
*
|
|
10
10
|
*
|
|
11
11
|
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
|
package/dist/configuration.d.ts
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
* OpenAI API
|
|
3
3
|
* APIs for sampling from and fine-tuning language models
|
|
4
4
|
*
|
|
5
|
-
* The version of the OpenAPI document: 1.
|
|
5
|
+
* The version of the OpenAPI document: 1.2.0
|
|
6
6
|
*
|
|
7
7
|
*
|
|
8
8
|
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
|
package/dist/configuration.js
CHANGED
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
* OpenAI API
|
|
6
6
|
* APIs for sampling from and fine-tuning language models
|
|
7
7
|
*
|
|
8
|
-
* The version of the OpenAPI document: 1.
|
|
8
|
+
* The version of the OpenAPI document: 1.2.0
|
|
9
9
|
*
|
|
10
10
|
*
|
|
11
11
|
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
|
package/dist/index.d.ts
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
* OpenAI API
|
|
3
3
|
* APIs for sampling from and fine-tuning language models
|
|
4
4
|
*
|
|
5
|
-
* The version of the OpenAPI document: 1.
|
|
5
|
+
* The version of the OpenAPI document: 1.2.0
|
|
6
6
|
*
|
|
7
7
|
*
|
|
8
8
|
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
|
package/dist/index.js
CHANGED
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
* OpenAI API
|
|
6
6
|
* APIs for sampling from and fine-tuning language models
|
|
7
7
|
*
|
|
8
|
-
* The version of the OpenAPI document: 1.
|
|
8
|
+
* The version of the OpenAPI document: 1.2.0
|
|
9
9
|
*
|
|
10
10
|
*
|
|
11
11
|
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
|