@scout9/admin 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (124) hide show
  1. package/README.md +1 -0
  2. package/build/api-openai.d.ts +2992 -0
  3. package/build/api-openai.js +1968 -0
  4. package/build/api.d.ts +78 -0
  5. package/build/api.js +90 -0
  6. package/build/base.d.ts +43 -0
  7. package/build/base.js +52 -0
  8. package/build/common.d.ts +54 -0
  9. package/build/common.js +132 -0
  10. package/build/configuration.d.ts +84 -0
  11. package/build/configuration.js +106 -0
  12. package/build/index.d.ts +2 -0
  13. package/build/index.js +18 -0
  14. package/build/schemas/common/algolia.d.ts +20 -0
  15. package/build/schemas/common/algolia.js +2 -0
  16. package/build/schemas/common/contact-map.d.ts +33 -0
  17. package/build/schemas/common/contact-map.js +2 -0
  18. package/build/schemas/common/currency.d.ts +1 -0
  19. package/build/schemas/common/currency.js +2 -0
  20. package/build/schemas/common/index.d.ts +6 -0
  21. package/build/schemas/common/index.js +22 -0
  22. package/build/schemas/common/location.d.ts +5 -0
  23. package/build/schemas/common/location.js +2 -0
  24. package/build/schemas/common/task.d.ts +20 -0
  25. package/build/schemas/common/task.js +2 -0
  26. package/build/schemas/common/time.d.ts +14 -0
  27. package/build/schemas/common/time.js +2 -0
  28. package/build/schemas/common.d.ts +77 -0
  29. package/build/schemas/common.js +8 -0
  30. package/build/schemas/conversations/context.d.ts +55 -0
  31. package/build/schemas/conversations/context.js +2 -0
  32. package/build/schemas/conversations/conversation.d.ts +53 -0
  33. package/build/schemas/conversations/conversation.js +2 -0
  34. package/build/schemas/conversations/index.d.ts +6 -0
  35. package/build/schemas/conversations/index.js +22 -0
  36. package/build/schemas/conversations/message.d.ts +62 -0
  37. package/build/schemas/conversations/message.js +2 -0
  38. package/build/schemas/conversations/parsed.d.ts +7 -0
  39. package/build/schemas/conversations/parsed.js +2 -0
  40. package/build/schemas/conversations/scheduled-conversation.d.ts +31 -0
  41. package/build/schemas/conversations/scheduled-conversation.js +2 -0
  42. package/build/schemas/conversations/webhook.d.ts +8 -0
  43. package/build/schemas/conversations/webhook.js +2 -0
  44. package/build/schemas/index.d.ts +3 -0
  45. package/build/schemas/index.js +19 -0
  46. package/build/schemas/users/businesses/agents/agent.d.ts +76 -0
  47. package/build/schemas/users/businesses/agents/agent.js +2 -0
  48. package/build/schemas/users/businesses/agents/auth.d.ts +7 -0
  49. package/build/schemas/users/businesses/agents/auth.js +2 -0
  50. package/build/schemas/users/businesses/agents/index.d.ts +2 -0
  51. package/build/schemas/users/businesses/agents/index.js +18 -0
  52. package/build/schemas/users/businesses/business-location.d.ts +10 -0
  53. package/build/schemas/users/businesses/business-location.js +2 -0
  54. package/build/schemas/users/businesses/business.d.ts +32 -0
  55. package/build/schemas/users/businesses/business.js +2 -0
  56. package/build/schemas/users/businesses/context/context-indexed.d.ts +9 -0
  57. package/build/schemas/users/businesses/context/context-indexed.js +2 -0
  58. package/build/schemas/users/businesses/context/context-saves.d.ts +12 -0
  59. package/build/schemas/users/businesses/context/context-saves.js +2 -0
  60. package/build/schemas/users/businesses/context/context.d.ts +64 -0
  61. package/build/schemas/users/businesses/context/context.js +2 -0
  62. package/build/schemas/users/businesses/context/index.d.ts +2 -0
  63. package/build/schemas/users/businesses/context/index.js +18 -0
  64. package/build/schemas/users/businesses/index.d.ts +5 -0
  65. package/build/schemas/users/businesses/index.js +21 -0
  66. package/build/schemas/users/businesses/notifications.d.ts +7 -0
  67. package/build/schemas/users/businesses/notifications.js +2 -0
  68. package/build/schemas/users/businesses/offerings/index.d.ts +2 -0
  69. package/build/schemas/users/businesses/offerings/index.js +18 -0
  70. package/build/schemas/users/businesses/offerings/offer-indexed.d.ts +34 -0
  71. package/build/schemas/users/businesses/offerings/offer-indexed.js +2 -0
  72. package/build/schemas/users/businesses/offerings/offer.d.ts +29 -0
  73. package/build/schemas/users/businesses/offerings/offer.js +2 -0
  74. package/build/schemas/users/businesses/thread.d.ts +44 -0
  75. package/build/schemas/users/businesses/thread.js +2 -0
  76. package/build/schemas/users/customers/customer.d.ts +35 -0
  77. package/build/schemas/users/customers/customer.js +2 -0
  78. package/build/schemas/users/customers/index.d.ts +1 -0
  79. package/build/schemas/users/customers/index.js +17 -0
  80. package/build/schemas/users/index.d.ts +2 -0
  81. package/build/schemas/users/index.js +18 -0
  82. package/package.json +33 -0
  83. package/src/api-openai.txt +4117 -0
  84. package/src/api.ts +141 -0
  85. package/src/base.ts +55 -0
  86. package/src/common.ts +136 -0
  87. package/src/configuration.ts +124 -0
  88. package/src/index.ts +2 -0
  89. package/src/schemas/common/algolia.ts +24 -0
  90. package/src/schemas/common/contact-map.ts +35 -0
  91. package/src/schemas/common/currency.ts +1 -0
  92. package/src/schemas/common/index.ts +6 -0
  93. package/src/schemas/common/location.ts +6 -0
  94. package/src/schemas/common/task.ts +26 -0
  95. package/src/schemas/common/time.ts +15 -0
  96. package/src/schemas/common.ts +94 -0
  97. package/src/schemas/conversations/context.ts +64 -0
  98. package/src/schemas/conversations/conversation.ts +68 -0
  99. package/src/schemas/conversations/index.ts +6 -0
  100. package/src/schemas/conversations/message.ts +78 -0
  101. package/src/schemas/conversations/parsed.ts +5 -0
  102. package/src/schemas/conversations/scheduled-conversation.ts +35 -0
  103. package/src/schemas/conversations/webhook.ts +10 -0
  104. package/src/schemas/index.ts +3 -0
  105. package/src/schemas/users/businesses/agents/agent.ts +107 -0
  106. package/src/schemas/users/businesses/agents/auth.ts +8 -0
  107. package/src/schemas/users/businesses/agents/index.ts +2 -0
  108. package/src/schemas/users/businesses/business-location.ts +15 -0
  109. package/src/schemas/users/businesses/business.ts +43 -0
  110. package/src/schemas/users/businesses/context/context-indexed.ts +11 -0
  111. package/src/schemas/users/businesses/context/context-saves.ts +14 -0
  112. package/src/schemas/users/businesses/context/context.ts +76 -0
  113. package/src/schemas/users/businesses/context/index.ts +2 -0
  114. package/src/schemas/users/businesses/index.ts +6 -0
  115. package/src/schemas/users/businesses/notifications.ts +12 -0
  116. package/src/schemas/users/businesses/offerings/index.ts +2 -0
  117. package/src/schemas/users/businesses/offerings/offer-indexed.ts +42 -0
  118. package/src/schemas/users/businesses/offerings/offer.ts +39 -0
  119. package/src/schemas/users/businesses/thread.ts +55 -0
  120. package/src/schemas/users/customers/customer.ts +46 -0
  121. package/src/schemas/users/customers/index.ts +1 -0
  122. package/src/schemas/users/index.ts +2 -0
  123. package/tsconfig.json +16 -0
  124. package/tsconfig.tsbuildinfo +1 -0
@@ -0,0 +1,2992 @@
1
+ import type { Configuration } from './configuration';
2
+ import type { AxiosPromise, AxiosInstance, AxiosRequestConfig } from 'axios';
3
+ import type { RequestArgs } from './base';
4
+ import { BaseAPI } from './base';
5
+ /**
6
+ *
7
+ * @export
8
+ * @interface ChatCompletionFunctions
9
+ */
10
+ export interface ChatCompletionFunctions {
11
+ /**
12
+ * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
13
+ * @type {string}
14
+ * @memberof ChatCompletionFunctions
15
+ */
16
+ 'name': string;
17
+ /**
18
+ * The description of what the function does.
19
+ * @type {string}
20
+ * @memberof ChatCompletionFunctions
21
+ */
22
+ 'description'?: string;
23
+ /**
24
+ * The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.
25
+ * @type {{ [key: string]: any; }}
26
+ * @memberof ChatCompletionFunctions
27
+ */
28
+ 'parameters'?: {
29
+ [key: string]: any;
30
+ };
31
+ }
32
+ /**
33
+ *
34
+ * @export
35
+ * @interface ChatCompletionRequestMessage
36
+ */
37
+ export interface ChatCompletionRequestMessage {
38
+ /**
39
+ * The role of the messages author. One of `system`, `user`, `assistant`, or `function`.
40
+ * @type {string}
41
+ * @memberof ChatCompletionRequestMessage
42
+ */
43
+ 'role': ChatCompletionRequestMessageRoleEnum;
44
+ /**
45
+ * The contents of the message. `content` is required for all messages except assistant messages with function calls.
46
+ * @type {string}
47
+ * @memberof ChatCompletionRequestMessage
48
+ */
49
+ 'content'?: string;
50
+ /**
51
+ * The name of the author of this message. `name` is required if role is `function`, and it should be the name of the function whose response is in the `content`. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.
52
+ * @type {string}
53
+ * @memberof ChatCompletionRequestMessage
54
+ */
55
+ 'name'?: string;
56
+ /**
57
+ *
58
+ * @type {ChatCompletionRequestMessageFunctionCall}
59
+ * @memberof ChatCompletionRequestMessage
60
+ */
61
+ 'function_call'?: ChatCompletionRequestMessageFunctionCall;
62
+ }
63
+ export declare const ChatCompletionRequestMessageRoleEnum: {
64
+ readonly System: "system";
65
+ readonly User: "user";
66
+ readonly Assistant: "assistant";
67
+ readonly Function: "function";
68
+ };
69
+ export type ChatCompletionRequestMessageRoleEnum = typeof ChatCompletionRequestMessageRoleEnum[keyof typeof ChatCompletionRequestMessageRoleEnum];
70
+ /**
71
+ * The name and arguments of a function that should be called, as generated by the model.
72
+ * @export
73
+ * @interface ChatCompletionRequestMessageFunctionCall
74
+ */
75
+ export interface ChatCompletionRequestMessageFunctionCall {
76
+ /**
77
+ * The name of the function to call.
78
+ * @type {string}
79
+ * @memberof ChatCompletionRequestMessageFunctionCall
80
+ */
81
+ 'name'?: string;
82
+ /**
83
+ * The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.
84
+ * @type {string}
85
+ * @memberof ChatCompletionRequestMessageFunctionCall
86
+ */
87
+ 'arguments'?: string;
88
+ }
89
+ /**
90
+ *
91
+ * @export
92
+ * @interface ChatCompletionResponseMessage
93
+ */
94
+ export interface ChatCompletionResponseMessage {
95
+ /**
96
+ * The role of the author of this message.
97
+ * @type {string}
98
+ * @memberof ChatCompletionResponseMessage
99
+ */
100
+ 'role': ChatCompletionResponseMessageRoleEnum;
101
+ /**
102
+ * The contents of the message.
103
+ * @type {string}
104
+ * @memberof ChatCompletionResponseMessage
105
+ */
106
+ 'content'?: string;
107
+ /**
108
+ *
109
+ * @type {ChatCompletionRequestMessageFunctionCall}
110
+ * @memberof ChatCompletionResponseMessage
111
+ */
112
+ 'function_call'?: ChatCompletionRequestMessageFunctionCall;
113
+ }
114
+ export declare const ChatCompletionResponseMessageRoleEnum: {
115
+ readonly System: "system";
116
+ readonly User: "user";
117
+ readonly Assistant: "assistant";
118
+ readonly Function: "function";
119
+ };
120
+ export type ChatCompletionResponseMessageRoleEnum = typeof ChatCompletionResponseMessageRoleEnum[keyof typeof ChatCompletionResponseMessageRoleEnum];
121
+ /**
122
+ *
123
+ * @export
124
+ * @interface CreateAnswerRequest
125
+ */
126
+ export interface CreateAnswerRequest {
127
+ /**
128
+ * ID of the model to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
129
+ * @type {string}
130
+ * @memberof CreateAnswerRequest
131
+ */
132
+ 'model': string;
133
+ /**
134
+ * Question to get answered.
135
+ * @type {string}
136
+ * @memberof CreateAnswerRequest
137
+ */
138
+ 'question': string;
139
+ /**
140
+ * List of (question, answer) pairs that will help steer the model towards the tone and answer format you\'d like. We recommend adding 2 to 3 examples.
141
+ * @type {Array<any>}
142
+ * @memberof CreateAnswerRequest
143
+ */
144
+ 'examples': Array<any>;
145
+ /**
146
+ * A text snippet containing the contextual information used to generate the answers for the `examples` you provide.
147
+ * @type {string}
148
+ * @memberof CreateAnswerRequest
149
+ */
150
+ 'examples_context': string;
151
+ /**
152
+ * List of documents from which the answer for the input `question` should be derived. If this is an empty list, the question will be answered based on the question-answer examples. You should specify either `documents` or a `file`, but not both.
153
+ * @type {Array<string>}
154
+ * @memberof CreateAnswerRequest
155
+ */
156
+ 'documents'?: Array<string> | null;
157
+ /**
158
+ * The ID of an uploaded file that contains documents to search over. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. You should specify either `documents` or a `file`, but not both.
159
+ * @type {string}
160
+ * @memberof CreateAnswerRequest
161
+ */
162
+ 'file'?: string | null;
163
+ /**
164
+ * ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`.
165
+ * @type {string}
166
+ * @memberof CreateAnswerRequest
167
+ */
168
+ 'search_model'?: string | null;
169
+ /**
170
+ * The maximum number of documents to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost.
171
+ * @type {number}
172
+ * @memberof CreateAnswerRequest
173
+ */
174
+ 'max_rerank'?: number | null;
175
+ /**
176
+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
177
+ * @type {number}
178
+ * @memberof CreateAnswerRequest
179
+ */
180
+ 'temperature'?: number | null;
181
+ /**
182
+ * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs.
183
+ * @type {number}
184
+ * @memberof CreateAnswerRequest
185
+ */
186
+ 'logprobs'?: number | null;
187
+ /**
188
+ * The maximum number of tokens allowed for the generated answer
189
+ * @type {number}
190
+ * @memberof CreateAnswerRequest
191
+ */
192
+ 'max_tokens'?: number | null;
193
+ /**
194
+ *
195
+ * @type {CreateAnswerRequestStop}
196
+ * @memberof CreateAnswerRequest
197
+ */
198
+ 'stop'?: CreateAnswerRequestStop | null;
199
+ /**
200
+ * How many answers to generate for each question.
201
+ * @type {number}
202
+ * @memberof CreateAnswerRequest
203
+ */
204
+ 'n'?: number | null;
205
+ /**
206
+ * Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.
207
+ * @type {object}
208
+ * @memberof CreateAnswerRequest
209
+ */
210
+ 'logit_bias'?: object | null;
211
+ /**
212
+ * A special boolean flag for showing metadata. If set to `true`, each document entry in the returned JSON will contain a \"metadata\" field. This flag only takes effect when `file` is set.
213
+ * @type {boolean}
214
+ * @memberof CreateAnswerRequest
215
+ */
216
+ 'return_metadata'?: boolean | null;
217
+ /**
218
+ * If set to `true`, the returned JSON will include a \"prompt\" field containing the final prompt that was used to request a completion. This is mainly useful for debugging purposes.
219
+ * @type {boolean}
220
+ * @memberof CreateAnswerRequest
221
+ */
222
+ 'return_prompt'?: boolean | null;
223
+ /**
224
+ * If an object name is in the list, we provide the full information of the object; otherwise, we only provide the object ID. Currently we support `completion` and `file` objects for expansion.
225
+ * @type {Array<any>}
226
+ * @memberof CreateAnswerRequest
227
+ */
228
+ 'expand'?: Array<any> | null;
229
+ /**
230
+ * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
231
+ * @type {string}
232
+ * @memberof CreateAnswerRequest
233
+ */
234
+ 'user'?: string;
235
+ }
236
+ /**
237
+ * @type CreateAnswerRequestStop
238
+ * Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
239
+ * @export
240
+ */
241
+ export type CreateAnswerRequestStop = Array<string> | string;
242
+ /**
243
+ *
244
+ * @export
245
+ * @interface CreateAnswerResponse
246
+ */
247
+ export interface CreateAnswerResponse {
248
+ /**
249
+ *
250
+ * @type {string}
251
+ * @memberof CreateAnswerResponse
252
+ */
253
+ 'object'?: string;
254
+ /**
255
+ *
256
+ * @type {string}
257
+ * @memberof CreateAnswerResponse
258
+ */
259
+ 'model'?: string;
260
+ /**
261
+ *
262
+ * @type {string}
263
+ * @memberof CreateAnswerResponse
264
+ */
265
+ 'search_model'?: string;
266
+ /**
267
+ *
268
+ * @type {string}
269
+ * @memberof CreateAnswerResponse
270
+ */
271
+ 'completion'?: string;
272
+ /**
273
+ *
274
+ * @type {Array<string>}
275
+ * @memberof CreateAnswerResponse
276
+ */
277
+ 'answers'?: Array<string>;
278
+ /**
279
+ *
280
+ * @type {Array<CreateAnswerResponseSelectedDocumentsInner>}
281
+ * @memberof CreateAnswerResponse
282
+ */
283
+ 'selected_documents'?: Array<CreateAnswerResponseSelectedDocumentsInner>;
284
+ }
285
+ /**
286
+ *
287
+ * @export
288
+ * @interface CreateAnswerResponseSelectedDocumentsInner
289
+ */
290
+ export interface CreateAnswerResponseSelectedDocumentsInner {
291
+ /**
292
+ *
293
+ * @type {number}
294
+ * @memberof CreateAnswerResponseSelectedDocumentsInner
295
+ */
296
+ 'document'?: number;
297
+ /**
298
+ *
299
+ * @type {string}
300
+ * @memberof CreateAnswerResponseSelectedDocumentsInner
301
+ */
302
+ 'text'?: string;
303
+ }
304
+ /**
305
+ *
306
+ * @export
307
+ * @interface CreateChatCompletionRequest
308
+ */
309
+ export interface CreateChatCompletionRequest {
310
+ /**
311
+ * ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.
312
+ * @type {string}
313
+ * @memberof CreateChatCompletionRequest
314
+ */
315
+ 'model': string;
316
+ /**
317
+ * A list of messages comprising the conversation so far. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb).
318
+ * @type {Array<ChatCompletionRequestMessage>}
319
+ * @memberof CreateChatCompletionRequest
320
+ */
321
+ 'messages': Array<ChatCompletionRequestMessage>;
322
+ /**
323
+ * A list of functions the model may generate JSON inputs for.
324
+ * @type {Array<ChatCompletionFunctions>}
325
+ * @memberof CreateChatCompletionRequest
326
+ */
327
+ 'functions'?: Array<ChatCompletionFunctions>;
328
+ /**
329
+ *
330
+ * @type {CreateChatCompletionRequestFunctionCall}
331
+ * @memberof CreateChatCompletionRequest
332
+ */
333
+ 'function_call'?: CreateChatCompletionRequestFunctionCall;
334
+ /**
335
+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
336
+ * @type {number}
337
+ * @memberof CreateChatCompletionRequest
338
+ */
339
+ 'temperature'?: number | null;
340
+ /**
341
+ * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
342
+ * @type {number}
343
+ * @memberof CreateChatCompletionRequest
344
+ */
345
+ 'top_p'?: number | null;
346
+ /**
347
+ * How many chat completion choices to generate for each input message.
348
+ * @type {number}
349
+ * @memberof CreateChatCompletionRequest
350
+ */
351
+ 'n'?: number | null;
352
+ /**
353
+ * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb).
354
+ * @type {boolean}
355
+ * @memberof CreateChatCompletionRequest
356
+ */
357
+ 'stream'?: boolean | null;
358
+ /**
359
+ *
360
+ * @type {CreateChatCompletionRequestStop}
361
+ * @memberof CreateChatCompletionRequest
362
+ */
363
+ 'stop'?: CreateChatCompletionRequestStop;
364
+ /**
365
+ * The maximum number of [tokens](/tokenizer) to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model\'s context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
366
+ * @type {number}
367
+ * @memberof CreateChatCompletionRequest
368
+ */
369
+ 'max_tokens'?: number;
370
+ /**
371
+ * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
372
+ * @type {number}
373
+ * @memberof CreateChatCompletionRequest
374
+ */
375
+ 'presence_penalty'?: number | null;
376
+ /**
377
+ * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
378
+ * @type {number}
379
+ * @memberof CreateChatCompletionRequest
380
+ */
381
+ 'frequency_penalty'?: number | null;
382
+ /**
383
+ * Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
384
+ * @type {object}
385
+ * @memberof CreateChatCompletionRequest
386
+ */
387
+ 'logit_bias'?: object | null;
388
+ /**
389
+ * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
390
+ * @type {string}
391
+ * @memberof CreateChatCompletionRequest
392
+ */
393
+ 'user'?: string;
394
+ }
395
+ /**
396
+ * @type CreateChatCompletionRequestFunctionCall
397
+ * Controls how the model responds to function calls. \"none\" means the model does not call a function, and responds to the end-user. \"auto\" means the model can pick between an end-user or calling a function. Specifying a particular function via `{\"name\":\\ \"my_function\"}` forces the model to call that function. \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.
398
+ * @export
399
+ */
400
+ export type CreateChatCompletionRequestFunctionCall = CreateChatCompletionRequestFunctionCallOneOf | string;
401
+ /**
402
+ *
403
+ * @export
404
+ * @interface CreateChatCompletionRequestFunctionCallOneOf
405
+ */
406
+ export interface CreateChatCompletionRequestFunctionCallOneOf {
407
+ /**
408
+ * The name of the function to call.
409
+ * @type {string}
410
+ * @memberof CreateChatCompletionRequestFunctionCallOneOf
411
+ */
412
+ 'name': string;
413
+ }
414
+ /**
415
+ * @type CreateChatCompletionRequestStop
416
+ * Up to 4 sequences where the API will stop generating further tokens.
417
+ * @export
418
+ */
419
+ export type CreateChatCompletionRequestStop = Array<string> | string;
420
+ /**
421
+ *
422
+ * @export
423
+ * @interface CreateChatCompletionResponse
424
+ */
425
+ export interface CreateChatCompletionResponse {
426
+ /**
427
+ *
428
+ * @type {string}
429
+ * @memberof CreateChatCompletionResponse
430
+ */
431
+ 'id': string;
432
+ /**
433
+ *
434
+ * @type {string}
435
+ * @memberof CreateChatCompletionResponse
436
+ */
437
+ 'object': string;
438
+ /**
439
+ *
440
+ * @type {number}
441
+ * @memberof CreateChatCompletionResponse
442
+ */
443
+ 'created': number;
444
+ /**
445
+ *
446
+ * @type {string}
447
+ * @memberof CreateChatCompletionResponse
448
+ */
449
+ 'model': string;
450
+ /**
451
+ *
452
+ * @type {Array<CreateChatCompletionResponseChoicesInner>}
453
+ * @memberof CreateChatCompletionResponse
454
+ */
455
+ 'choices': Array<CreateChatCompletionResponseChoicesInner>;
456
+ /**
457
+ *
458
+ * @type {CreateCompletionResponseUsage}
459
+ * @memberof CreateChatCompletionResponse
460
+ */
461
+ 'usage'?: CreateCompletionResponseUsage;
462
+ }
463
+ /**
464
+ *
465
+ * @export
466
+ * @interface CreateChatCompletionResponseChoicesInner
467
+ */
468
+ export interface CreateChatCompletionResponseChoicesInner {
469
+ /**
470
+ *
471
+ * @type {number}
472
+ * @memberof CreateChatCompletionResponseChoicesInner
473
+ */
474
+ 'index'?: number;
475
+ /**
476
+ *
477
+ * @type {ChatCompletionResponseMessage}
478
+ * @memberof CreateChatCompletionResponseChoicesInner
479
+ */
480
+ 'message'?: ChatCompletionResponseMessage;
481
+ /**
482
+ *
483
+ * @type {string}
484
+ * @memberof CreateChatCompletionResponseChoicesInner
485
+ */
486
+ 'finish_reason'?: string;
487
+ }
488
+ /**
489
+ *
490
+ * @export
491
+ * @interface CreateClassificationRequest
492
+ */
493
+ export interface CreateClassificationRequest {
494
+ /**
495
+ * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
496
+ * @type {string}
497
+ * @memberof CreateClassificationRequest
498
+ */
499
+ 'model': string;
500
+ /**
501
+ * Query to be classified.
502
+ * @type {string}
503
+ * @memberof CreateClassificationRequest
504
+ */
505
+ 'query': string;
506
+ /**
507
+ * A list of examples with labels, in the following format: `[[\"The movie is so interesting.\", \"Positive\"], [\"It is quite boring.\", \"Negative\"], ...]` All the label strings will be normalized to be capitalized. You should specify either `examples` or `file`, but not both.
508
+ * @type {Array<any>}
509
+ * @memberof CreateClassificationRequest
510
+ */
511
+ 'examples'?: Array<any> | null;
512
+ /**
513
+ * The ID of the uploaded file that contains training examples. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. You should specify either `examples` or `file`, but not both.
514
+ * @type {string}
515
+ * @memberof CreateClassificationRequest
516
+ */
517
+ 'file'?: string | null;
518
+ /**
519
+ * The set of categories being classified. If not specified, candidate labels will be automatically collected from the examples you provide. All the label strings will be normalized to be capitalized.
520
+ * @type {Array<string>}
521
+ * @memberof CreateClassificationRequest
522
+ */
523
+ 'labels'?: Array<string> | null;
524
+ /**
525
+ * ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`.
526
+ * @type {string}
527
+ * @memberof CreateClassificationRequest
528
+ */
529
+ 'search_model'?: string | null;
530
+ /**
531
+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
532
+ * @type {number}
533
+ * @memberof CreateClassificationRequest
534
+ */
535
+ 'temperature'?: number | null;
536
+ /**
537
+ * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs.
538
+ * @type {number}
539
+ * @memberof CreateClassificationRequest
540
+ */
541
+ 'logprobs'?: number | null;
542
+ /**
543
+ * The maximum number of examples to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost.
544
+ * @type {number}
545
+ * @memberof CreateClassificationRequest
546
+ */
547
+ 'max_examples'?: number | null;
548
+ /**
549
+ * Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.
550
+ * @type {object}
551
+ * @memberof CreateClassificationRequest
552
+ */
553
+ 'logit_bias'?: object | null;
554
+ /**
555
+ * If set to `true`, the returned JSON will include a \"prompt\" field containing the final prompt that was used to request a completion. This is mainly useful for debugging purposes.
556
+ * @type {boolean}
557
+ * @memberof CreateClassificationRequest
558
+ */
559
+ 'return_prompt'?: boolean | null;
560
+ /**
561
+ * A special boolean flag for showing metadata. If set to `true`, each document entry in the returned JSON will contain a \"metadata\" field. This flag only takes effect when `file` is set.
562
+ * @type {boolean}
563
+ * @memberof CreateClassificationRequest
564
+ */
565
+ 'return_metadata'?: boolean | null;
566
+ /**
567
+ * If an object name is in the list, we provide the full information of the object; otherwise, we only provide the object ID. Currently we support `completion` and `file` objects for expansion.
568
+ * @type {Array<any>}
569
+ * @memberof CreateClassificationRequest
570
+ */
571
+ 'expand'?: Array<any> | null;
572
+ /**
573
+ * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
574
+ * @type {string}
575
+ * @memberof CreateClassificationRequest
576
+ */
577
+ 'user'?: string;
578
+ }
579
+ /**
580
+ *
581
+ * @export
582
+ * @interface CreateClassificationResponse
583
+ */
584
+ export interface CreateClassificationResponse {
585
+ /**
586
+ *
587
+ * @type {string}
588
+ * @memberof CreateClassificationResponse
589
+ */
590
+ 'object'?: string;
591
+ /**
592
+ *
593
+ * @type {string}
594
+ * @memberof CreateClassificationResponse
595
+ */
596
+ 'model'?: string;
597
+ /**
598
+ *
599
+ * @type {string}
600
+ * @memberof CreateClassificationResponse
601
+ */
602
+ 'search_model'?: string;
603
+ /**
604
+ *
605
+ * @type {string}
606
+ * @memberof CreateClassificationResponse
607
+ */
608
+ 'completion'?: string;
609
+ /**
610
+ *
611
+ * @type {string}
612
+ * @memberof CreateClassificationResponse
613
+ */
614
+ 'label'?: string;
615
+ /**
616
+ *
617
+ * @type {Array<CreateClassificationResponseSelectedExamplesInner>}
618
+ * @memberof CreateClassificationResponse
619
+ */
620
+ 'selected_examples'?: Array<CreateClassificationResponseSelectedExamplesInner>;
621
+ }
622
+ /**
623
+ *
624
+ * @export
625
+ * @interface CreateClassificationResponseSelectedExamplesInner
626
+ */
627
+ export interface CreateClassificationResponseSelectedExamplesInner {
628
+ /**
629
+ *
630
+ * @type {number}
631
+ * @memberof CreateClassificationResponseSelectedExamplesInner
632
+ */
633
+ 'document'?: number;
634
+ /**
635
+ *
636
+ * @type {string}
637
+ * @memberof CreateClassificationResponseSelectedExamplesInner
638
+ */
639
+ 'text'?: string;
640
+ /**
641
+ *
642
+ * @type {string}
643
+ * @memberof CreateClassificationResponseSelectedExamplesInner
644
+ */
645
+ 'label'?: string;
646
+ }
647
+ /**
648
+ *
649
+ * @export
650
+ * @interface CreateCompletionRequest
651
+ */
652
+ export interface CreateCompletionRequest {
653
+ /**
654
+ * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
655
+ * @type {string}
656
+ * @memberof CreateCompletionRequest
657
+ */
658
+ 'model': string;
659
+ /**
660
+ *
661
+ * @type {CreateCompletionRequestPrompt}
662
+ * @memberof CreateCompletionRequest
663
+ */
664
+ 'prompt'?: CreateCompletionRequestPrompt | null;
665
+ /**
666
+ * The suffix that comes after a completion of inserted text.
667
+ * @type {string}
668
+ * @memberof CreateCompletionRequest
669
+ */
670
+ 'suffix'?: string | null;
671
+ /**
672
+ * The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
673
+ * @type {number}
674
+ * @memberof CreateCompletionRequest
675
+ */
676
+ 'max_tokens'?: number | null;
677
+ /**
678
+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
679
+ * @type {number}
680
+ * @memberof CreateCompletionRequest
681
+ */
682
+ 'temperature'?: number | null;
683
+ /**
684
+ * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
685
+ * @type {number}
686
+ * @memberof CreateCompletionRequest
687
+ */
688
+ 'top_p'?: number | null;
689
+ /**
690
+ * How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
691
+ * @type {number}
692
+ * @memberof CreateCompletionRequest
693
+ */
694
+ 'n'?: number | null;
695
+ /**
696
+ * Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb).
697
+ * @type {boolean}
698
+ * @memberof CreateCompletionRequest
699
+ */
700
+ 'stream'?: boolean | null;
701
+ /**
702
+ * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5.
703
+ * @type {number}
704
+ * @memberof CreateCompletionRequest
705
+ */
706
+ 'logprobs'?: number | null;
707
+ /**
708
+ * Echo back the prompt in addition to the completion
709
+ * @type {boolean}
710
+ * @memberof CreateCompletionRequest
711
+ */
712
+ 'echo'?: boolean | null;
713
+ /**
714
+ *
715
+ * @type {CreateCompletionRequestStop}
716
+ * @memberof CreateCompletionRequest
717
+ */
718
+ 'stop'?: CreateCompletionRequestStop | null;
719
+ /**
720
+ * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
721
+ * @type {number}
722
+ * @memberof CreateCompletionRequest
723
+ */
724
+ 'presence_penalty'?: number | null;
725
+ /**
726
+ * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
727
+ * @type {number}
728
+ * @memberof CreateCompletionRequest
729
+ */
730
+ 'frequency_penalty'?: number | null;
731
+ /**
732
+ * Generates `best_of` completions server-side and returns the \"best\" (the one with the highest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
733
+ * @type {number}
734
+ * @memberof CreateCompletionRequest
735
+ */
736
+ 'best_of'?: number | null;
737
+ /**
738
+ * Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.
739
+ * @type {object}
740
+ * @memberof CreateCompletionRequest
741
+ */
742
+ 'logit_bias'?: object | null;
743
+ /**
744
+ * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
745
+ * @type {string}
746
+ * @memberof CreateCompletionRequest
747
+ */
748
+ 'user'?: string;
749
+ }
750
+ /**
751
+ * @type CreateCompletionRequestPrompt
752
+ * The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.
753
+ * @export
754
+ */
755
+ export type CreateCompletionRequestPrompt = Array<any> | Array<number> | Array<string> | string;
756
+ /**
757
+ * @type CreateCompletionRequestStop
758
+ * Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
759
+ * @export
760
+ */
761
+ export type CreateCompletionRequestStop = Array<string> | string;
762
+ /**
763
+ *
764
+ * @export
765
+ * @interface CreateCompletionResponse
766
+ */
767
+ export interface CreateCompletionResponse {
768
+ /**
769
+ *
770
+ * @type {string}
771
+ * @memberof CreateCompletionResponse
772
+ */
773
+ 'id': string;
774
+ /**
775
+ *
776
+ * @type {string}
777
+ * @memberof CreateCompletionResponse
778
+ */
779
+ 'object': string;
780
+ /**
781
+ *
782
+ * @type {number}
783
+ * @memberof CreateCompletionResponse
784
+ */
785
+ 'created': number;
786
+ /**
787
+ *
788
+ * @type {string}
789
+ * @memberof CreateCompletionResponse
790
+ */
791
+ 'model': string;
792
+ /**
793
+ *
794
+ * @type {Array<CreateCompletionResponseChoicesInner>}
795
+ * @memberof CreateCompletionResponse
796
+ */
797
+ 'choices': Array<CreateCompletionResponseChoicesInner>;
798
+ /**
799
+ *
800
+ * @type {CreateCompletionResponseUsage}
801
+ * @memberof CreateCompletionResponse
802
+ */
803
+ 'usage'?: CreateCompletionResponseUsage;
804
+ }
805
+ /**
806
+ *
807
+ * @export
808
+ * @interface CreateCompletionResponseChoicesInner
809
+ */
810
+ export interface CreateCompletionResponseChoicesInner {
811
+ /**
812
+ *
813
+ * @type {string}
814
+ * @memberof CreateCompletionResponseChoicesInner
815
+ */
816
+ 'text'?: string;
817
+ /**
818
+ *
819
+ * @type {number}
820
+ * @memberof CreateCompletionResponseChoicesInner
821
+ */
822
+ 'index'?: number;
823
+ /**
824
+ *
825
+ * @type {CreateCompletionResponseChoicesInnerLogprobs}
826
+ * @memberof CreateCompletionResponseChoicesInner
827
+ */
828
+ 'logprobs'?: CreateCompletionResponseChoicesInnerLogprobs | null;
829
+ /**
830
+ *
831
+ * @type {string}
832
+ * @memberof CreateCompletionResponseChoicesInner
833
+ */
834
+ 'finish_reason'?: string;
835
+ }
836
+ /**
837
+ *
838
+ * @export
839
+ * @interface CreateCompletionResponseChoicesInnerLogprobs
840
+ */
841
+ export interface CreateCompletionResponseChoicesInnerLogprobs {
842
+ /**
843
+ *
844
+ * @type {Array<string>}
845
+ * @memberof CreateCompletionResponseChoicesInnerLogprobs
846
+ */
847
+ 'tokens'?: Array<string>;
848
+ /**
849
+ *
850
+ * @type {Array<number>}
851
+ * @memberof CreateCompletionResponseChoicesInnerLogprobs
852
+ */
853
+ 'token_logprobs'?: Array<number>;
854
+ /**
855
+ *
856
+ * @type {Array<object>}
857
+ * @memberof CreateCompletionResponseChoicesInnerLogprobs
858
+ */
859
+ 'top_logprobs'?: Array<object>;
860
+ /**
861
+ *
862
+ * @type {Array<number>}
863
+ * @memberof CreateCompletionResponseChoicesInnerLogprobs
864
+ */
865
+ 'text_offset'?: Array<number>;
866
+ }
867
+ /**
868
+ *
869
+ * @export
870
+ * @interface CreateCompletionResponseUsage
871
+ */
872
+ export interface CreateCompletionResponseUsage {
873
+ /**
874
+ *
875
+ * @type {number}
876
+ * @memberof CreateCompletionResponseUsage
877
+ */
878
+ 'prompt_tokens': number;
879
+ /**
880
+ *
881
+ * @type {number}
882
+ * @memberof CreateCompletionResponseUsage
883
+ */
884
+ 'completion_tokens': number;
885
+ /**
886
+ *
887
+ * @type {number}
888
+ * @memberof CreateCompletionResponseUsage
889
+ */
890
+ 'total_tokens': number;
891
+ }
892
+ /**
893
+ *
894
+ * @export
895
+ * @interface CreateEditRequest
896
+ */
897
+ export interface CreateEditRequest {
898
+ /**
899
+ * ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint.
900
+ * @type {string}
901
+ * @memberof CreateEditRequest
902
+ */
903
+ 'model': string;
904
+ /**
905
+ * The input text to use as a starting point for the edit.
906
+ * @type {string}
907
+ * @memberof CreateEditRequest
908
+ */
909
+ 'input'?: string | null;
910
+ /**
911
+ * The instruction that tells the model how to edit the prompt.
912
+ * @type {string}
913
+ * @memberof CreateEditRequest
914
+ */
915
+ 'instruction': string;
916
+ /**
917
+ * How many edits to generate for the input and instruction.
918
+ * @type {number}
919
+ * @memberof CreateEditRequest
920
+ */
921
+ 'n'?: number | null;
922
+ /**
923
+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
924
+ * @type {number}
925
+ * @memberof CreateEditRequest
926
+ */
927
+ 'temperature'?: number | null;
928
+ /**
929
+ * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
930
+ * @type {number}
931
+ * @memberof CreateEditRequest
932
+ */
933
+ 'top_p'?: number | null;
934
+ }
935
+ /**
936
+ *
937
+ * @export
938
+ * @interface CreateEditResponse
939
+ */
940
+ export interface CreateEditResponse {
941
+ /**
942
+ *
943
+ * @type {string}
944
+ * @memberof CreateEditResponse
945
+ */
946
+ 'object': string;
947
+ /**
948
+ *
949
+ * @type {number}
950
+ * @memberof CreateEditResponse
951
+ */
952
+ 'created': number;
953
+ /**
954
+ *
955
+ * @type {Array<CreateCompletionResponseChoicesInner>}
956
+ * @memberof CreateEditResponse
957
+ */
958
+ 'choices': Array<CreateCompletionResponseChoicesInner>;
959
+ /**
960
+ *
961
+ * @type {CreateCompletionResponseUsage}
962
+ * @memberof CreateEditResponse
963
+ */
964
+ 'usage': CreateCompletionResponseUsage;
965
+ }
966
+ /**
967
+ *
968
+ * @export
969
+ * @interface CreateEmbeddingRequest
970
+ */
971
+ export interface CreateEmbeddingRequest {
972
+ /**
973
+ * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
974
+ * @type {string}
975
+ * @memberof CreateEmbeddingRequest
976
+ */
977
+ 'model': string;
978
+ /**
979
+ *
980
+ * @type {CreateEmbeddingRequestInput}
981
+ * @memberof CreateEmbeddingRequest
982
+ */
983
+ 'input': CreateEmbeddingRequestInput;
984
+ /**
985
+ * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
986
+ * @type {string}
987
+ * @memberof CreateEmbeddingRequest
988
+ */
989
+ 'user'?: string;
990
+ }
991
+ /**
992
+ * @type CreateEmbeddingRequestInput
993
+ * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for `text-embedding-ada-002`). [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
994
+ * @export
995
+ */
996
+ export type CreateEmbeddingRequestInput = Array<any> | Array<number> | Array<string> | string;
997
+ /**
998
+ *
999
+ * @export
1000
+ * @interface CreateEmbeddingResponse
1001
+ */
1002
+ export interface CreateEmbeddingResponse {
1003
+ /**
1004
+ *
1005
+ * @type {string}
1006
+ * @memberof CreateEmbeddingResponse
1007
+ */
1008
+ 'object': string;
1009
+ /**
1010
+ *
1011
+ * @type {string}
1012
+ * @memberof CreateEmbeddingResponse
1013
+ */
1014
+ 'model': string;
1015
+ /**
1016
+ *
1017
+ * @type {Array<CreateEmbeddingResponseDataInner>}
1018
+ * @memberof CreateEmbeddingResponse
1019
+ */
1020
+ 'data': Array<CreateEmbeddingResponseDataInner>;
1021
+ /**
1022
+ *
1023
+ * @type {CreateEmbeddingResponseUsage}
1024
+ * @memberof CreateEmbeddingResponse
1025
+ */
1026
+ 'usage': CreateEmbeddingResponseUsage;
1027
+ }
1028
+ /**
1029
+ *
1030
+ * @export
1031
+ * @interface CreateEmbeddingResponseDataInner
1032
+ */
1033
+ export interface CreateEmbeddingResponseDataInner {
1034
+ /**
1035
+ *
1036
+ * @type {number}
1037
+ * @memberof CreateEmbeddingResponseDataInner
1038
+ */
1039
+ 'index': number;
1040
+ /**
1041
+ *
1042
+ * @type {string}
1043
+ * @memberof CreateEmbeddingResponseDataInner
1044
+ */
1045
+ 'object': string;
1046
+ /**
1047
+ *
1048
+ * @type {Array<number>}
1049
+ * @memberof CreateEmbeddingResponseDataInner
1050
+ */
1051
+ 'embedding': Array<number>;
1052
+ }
1053
+ /**
1054
+ *
1055
+ * @export
1056
+ * @interface CreateEmbeddingResponseUsage
1057
+ */
1058
+ export interface CreateEmbeddingResponseUsage {
1059
+ /**
1060
+ *
1061
+ * @type {number}
1062
+ * @memberof CreateEmbeddingResponseUsage
1063
+ */
1064
+ 'prompt_tokens': number;
1065
+ /**
1066
+ *
1067
+ * @type {number}
1068
+ * @memberof CreateEmbeddingResponseUsage
1069
+ */
1070
+ 'total_tokens': number;
1071
+ }
1072
+ /**
1073
+ *
1074
+ * @export
1075
+ * @interface CreateFineTuneRequest
1076
+ */
1077
+ export interface CreateFineTuneRequest {
1078
+ /**
1079
+ * The ID of an uploaded file that contains training data. See [upload file](/docs/api-reference/files/upload) for how to upload a file. Your dataset must be formatted as a JSONL file, where each training example is a JSON object with the keys \"prompt\" and \"completion\". Additionally, you must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details.
1080
+ * @type {string}
1081
+ * @memberof CreateFineTuneRequest
1082
+ */
1083
+ 'training_file': string;
1084
+ /**
1085
+ * The ID of an uploaded file that contains validation data. If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in the [fine-tuning results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). Your train and validation data should be mutually exclusive. Your dataset must be formatted as a JSONL file, where each validation example is a JSON object with the keys \"prompt\" and \"completion\". Additionally, you must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details.
1086
+ * @type {string}
1087
+ * @memberof CreateFineTuneRequest
1088
+ */
1089
+ 'validation_file'?: string | null;
1090
+ /**
1091
+ * The name of the base model to fine-tune. You can select one of \"ada\", \"babbage\", \"curie\", \"davinci\", or a fine-tuned model created after 2022-04-21. To learn more about these models, see the [Models](https://platform.openai.com/docs/models) documentation.
1092
+ * @type {string}
1093
+ * @memberof CreateFineTuneRequest
1094
+ */
1095
+ 'model'?: string | null;
1096
+ /**
1097
+ * The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.
1098
+ * @type {number}
1099
+ * @memberof CreateFineTuneRequest
1100
+ */
1101
+ 'n_epochs'?: number | null;
1102
+ /**
1103
+ * The batch size to use for training. The batch size is the number of training examples used to train a single forward and backward pass. By default, the batch size will be dynamically configured to be ~0.2% of the number of examples in the training set, capped at 256 - in general, we\'ve found that larger batch sizes tend to work better for larger datasets.
1104
+ * @type {number}
1105
+ * @memberof CreateFineTuneRequest
1106
+ */
1107
+ 'batch_size'?: number | null;
1108
+ /**
1109
+ * The learning rate multiplier to use for training. The fine-tuning learning rate is the original learning rate used for pretraining multiplied by this value. By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final `batch_size` (larger learning rates tend to perform better with larger batch sizes). We recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best results.
1110
+ * @type {number}
1111
+ * @memberof CreateFineTuneRequest
1112
+ */
1113
+ 'learning_rate_multiplier'?: number | null;
1114
+ /**
1115
+ * The weight to use for loss on the prompt tokens. This controls how much the model tries to learn to generate the prompt (as compared to the completion which always has a weight of 1.0), and can add a stabilizing effect to training when completions are short. If prompts are extremely long (relative to completions), it may make sense to reduce this weight so as to avoid over-prioritizing learning the prompt.
1116
+ * @type {number}
1117
+ * @memberof CreateFineTuneRequest
1118
+ */
1119
+ 'prompt_loss_weight'?: number | null;
1120
+ /**
1121
+ * If set, we calculate classification-specific metrics such as accuracy and F-1 score using the validation set at the end of every epoch. These metrics can be viewed in the [results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). In order to compute classification metrics, you must provide a `validation_file`. Additionally, you must specify `classification_n_classes` for multiclass classification or `classification_positive_class` for binary classification.
1122
+ * @type {boolean}
1123
+ * @memberof CreateFineTuneRequest
1124
+ */
1125
+ 'compute_classification_metrics'?: boolean | null;
1126
+ /**
1127
+ * The number of classes in a classification task. This parameter is required for multiclass classification.
1128
+ * @type {number}
1129
+ * @memberof CreateFineTuneRequest
1130
+ */
1131
+ 'classification_n_classes'?: number | null;
1132
+ /**
1133
+ * The positive class in binary classification. This parameter is needed to generate precision, recall, and F1 metrics when doing binary classification.
1134
+ * @type {string}
1135
+ * @memberof CreateFineTuneRequest
1136
+ */
1137
+ 'classification_positive_class'?: string | null;
1138
+ /**
1139
+ * If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score is a generalization of F-1 score. This is only used for binary classification. With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger beta score puts more weight on recall and less on precision. A smaller beta score puts more weight on precision and less on recall.
1140
+ * @type {Array<number>}
1141
+ * @memberof CreateFineTuneRequest
1142
+ */
1143
+ 'classification_betas'?: Array<number> | null;
1144
+ /**
1145
+ * A string of up to 40 characters that will be added to your fine-tuned model name. For example, a `suffix` of \"custom-model-name\" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`.
1146
+ * @type {string}
1147
+ * @memberof CreateFineTuneRequest
1148
+ */
1149
+ 'suffix'?: string | null;
1150
+ }
1151
+ /**
1152
+ *
1153
+ * @export
1154
+ * @interface CreateImageRequest
1155
+ */
1156
+ export interface CreateImageRequest {
1157
+ /**
1158
+ * A text description of the desired image(s). The maximum length is 1000 characters.
1159
+ * @type {string}
1160
+ * @memberof CreateImageRequest
1161
+ */
1162
+ 'prompt': string;
1163
+ /**
1164
+ * The number of images to generate. Must be between 1 and 10.
1165
+ * @type {number}
1166
+ * @memberof CreateImageRequest
1167
+ */
1168
+ 'n'?: number | null;
1169
+ /**
1170
+ * The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
1171
+ * @type {string}
1172
+ * @memberof CreateImageRequest
1173
+ */
1174
+ 'size'?: CreateImageRequestSizeEnum;
1175
+ /**
1176
+ * The format in which the generated images are returned. Must be one of `url` or `b64_json`.
1177
+ * @type {string}
1178
+ * @memberof CreateImageRequest
1179
+ */
1180
+ 'response_format'?: CreateImageRequestResponseFormatEnum;
1181
+ /**
1182
+ * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
1183
+ * @type {string}
1184
+ * @memberof CreateImageRequest
1185
+ */
1186
+ 'user'?: string;
1187
+ }
1188
+ export declare const CreateImageRequestSizeEnum: {
1189
+ readonly _256x256: "256x256";
1190
+ readonly _512x512: "512x512";
1191
+ readonly _1024x1024: "1024x1024";
1192
+ };
1193
+ export type CreateImageRequestSizeEnum = typeof CreateImageRequestSizeEnum[keyof typeof CreateImageRequestSizeEnum];
1194
+ export declare const CreateImageRequestResponseFormatEnum: {
1195
+ readonly Url: "url";
1196
+ readonly B64Json: "b64_json";
1197
+ };
1198
+ export type CreateImageRequestResponseFormatEnum = typeof CreateImageRequestResponseFormatEnum[keyof typeof CreateImageRequestResponseFormatEnum];
1199
+ /**
1200
+ *
1201
+ * @export
1202
+ * @interface CreateModerationRequest
1203
+ */
1204
+ export interface CreateModerationRequest {
1205
+ /**
1206
+ *
1207
+ * @type {CreateModerationRequestInput}
1208
+ * @memberof CreateModerationRequest
1209
+ */
1210
+ 'input': CreateModerationRequestInput;
1211
+ /**
1212
+ * Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`.
1213
+ * @type {string}
1214
+ * @memberof CreateModerationRequest
1215
+ */
1216
+ 'model'?: string;
1217
+ }
1218
+ /**
1219
+ * @type CreateModerationRequestInput
1220
+ * The input text to classify
1221
+ * @export
1222
+ */
1223
+ export type CreateModerationRequestInput = Array<string> | string;
1224
+ /**
1225
+ *
1226
+ * @export
1227
+ * @interface CreateModerationResponse
1228
+ */
1229
+ export interface CreateModerationResponse {
1230
+ /**
1231
+ *
1232
+ * @type {string}
1233
+ * @memberof CreateModerationResponse
1234
+ */
1235
+ 'id': string;
1236
+ /**
1237
+ *
1238
+ * @type {string}
1239
+ * @memberof CreateModerationResponse
1240
+ */
1241
+ 'model': string;
1242
+ /**
1243
+ *
1244
+ * @type {Array<CreateModerationResponseResultsInner>}
1245
+ * @memberof CreateModerationResponse
1246
+ */
1247
+ 'results': Array<CreateModerationResponseResultsInner>;
1248
+ }
1249
+ /**
1250
+ *
1251
+ * @export
1252
+ * @interface CreateModerationResponseResultsInner
1253
+ */
1254
+ export interface CreateModerationResponseResultsInner {
1255
+ /**
1256
+ *
1257
+ * @type {boolean}
1258
+ * @memberof CreateModerationResponseResultsInner
1259
+ */
1260
+ 'flagged': boolean;
1261
+ /**
1262
+ *
1263
+ * @type {CreateModerationResponseResultsInnerCategories}
1264
+ * @memberof CreateModerationResponseResultsInner
1265
+ */
1266
+ 'categories': CreateModerationResponseResultsInnerCategories;
1267
+ /**
1268
+ *
1269
+ * @type {CreateModerationResponseResultsInnerCategoryScores}
1270
+ * @memberof CreateModerationResponseResultsInner
1271
+ */
1272
+ 'category_scores': CreateModerationResponseResultsInnerCategoryScores;
1273
+ }
1274
+ /**
1275
+ *
1276
+ * @export
1277
+ * @interface CreateModerationResponseResultsInnerCategories
1278
+ */
1279
+ export interface CreateModerationResponseResultsInnerCategories {
1280
+ /**
1281
+ *
1282
+ * @type {boolean}
1283
+ * @memberof CreateModerationResponseResultsInnerCategories
1284
+ */
1285
+ 'hate': boolean;
1286
+ /**
1287
+ *
1288
+ * @type {boolean}
1289
+ * @memberof CreateModerationResponseResultsInnerCategories
1290
+ */
1291
+ 'hate/threatening': boolean;
1292
+ /**
1293
+ *
1294
+ * @type {boolean}
1295
+ * @memberof CreateModerationResponseResultsInnerCategories
1296
+ */
1297
+ 'self-harm': boolean;
1298
+ /**
1299
+ *
1300
+ * @type {boolean}
1301
+ * @memberof CreateModerationResponseResultsInnerCategories
1302
+ */
1303
+ 'sexual': boolean;
1304
+ /**
1305
+ *
1306
+ * @type {boolean}
1307
+ * @memberof CreateModerationResponseResultsInnerCategories
1308
+ */
1309
+ 'sexual/minors': boolean;
1310
+ /**
1311
+ *
1312
+ * @type {boolean}
1313
+ * @memberof CreateModerationResponseResultsInnerCategories
1314
+ */
1315
+ 'violence': boolean;
1316
+ /**
1317
+ *
1318
+ * @type {boolean}
1319
+ * @memberof CreateModerationResponseResultsInnerCategories
1320
+ */
1321
+ 'violence/graphic': boolean;
1322
+ }
1323
+ /**
1324
+ *
1325
+ * @export
1326
+ * @interface CreateModerationResponseResultsInnerCategoryScores
1327
+ */
1328
+ export interface CreateModerationResponseResultsInnerCategoryScores {
1329
+ /**
1330
+ *
1331
+ * @type {number}
1332
+ * @memberof CreateModerationResponseResultsInnerCategoryScores
1333
+ */
1334
+ 'hate': number;
1335
+ /**
1336
+ *
1337
+ * @type {number}
1338
+ * @memberof CreateModerationResponseResultsInnerCategoryScores
1339
+ */
1340
+ 'hate/threatening': number;
1341
+ /**
1342
+ *
1343
+ * @type {number}
1344
+ * @memberof CreateModerationResponseResultsInnerCategoryScores
1345
+ */
1346
+ 'self-harm': number;
1347
+ /**
1348
+ *
1349
+ * @type {number}
1350
+ * @memberof CreateModerationResponseResultsInnerCategoryScores
1351
+ */
1352
+ 'sexual': number;
1353
+ /**
1354
+ *
1355
+ * @type {number}
1356
+ * @memberof CreateModerationResponseResultsInnerCategoryScores
1357
+ */
1358
+ 'sexual/minors': number;
1359
+ /**
1360
+ *
1361
+ * @type {number}
1362
+ * @memberof CreateModerationResponseResultsInnerCategoryScores
1363
+ */
1364
+ 'violence': number;
1365
+ /**
1366
+ *
1367
+ * @type {number}
1368
+ * @memberof CreateModerationResponseResultsInnerCategoryScores
1369
+ */
1370
+ 'violence/graphic': number;
1371
+ }
1372
+ /**
1373
+ *
1374
+ * @export
1375
+ * @interface CreateSearchRequest
1376
+ */
1377
+ export interface CreateSearchRequest {
1378
+ /**
1379
+ * Query to search against the documents.
1380
+ * @type {string}
1381
+ * @memberof CreateSearchRequest
1382
+ */
1383
+ 'query': string;
1384
+ /**
1385
+ * Up to 200 documents to search over, provided as a list of strings. The maximum document length (in tokens) is 2034 minus the number of tokens in the query. You should specify either `documents` or a `file`, but not both.
1386
+ * @type {Array<string>}
1387
+ * @memberof CreateSearchRequest
1388
+ */
1389
+ 'documents'?: Array<string> | null;
1390
+ /**
1391
+ * The ID of an uploaded file that contains documents to search over. You should specify either `documents` or a `file`, but not both.
1392
+ * @type {string}
1393
+ * @memberof CreateSearchRequest
1394
+ */
1395
+ 'file'?: string | null;
1396
+ /**
1397
+ * The maximum number of documents to be re-ranked and returned by search. This flag only takes effect when `file` is set.
1398
+ * @type {number}
1399
+ * @memberof CreateSearchRequest
1400
+ */
1401
+ 'max_rerank'?: number | null;
1402
+ /**
1403
+ * A special boolean flag for showing metadata. If set to `true`, each document entry in the returned JSON will contain a \"metadata\" field. This flag only takes effect when `file` is set.
1404
+ * @type {boolean}
1405
+ * @memberof CreateSearchRequest
1406
+ */
1407
+ 'return_metadata'?: boolean | null;
1408
+ /**
1409
+ * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
1410
+ * @type {string}
1411
+ * @memberof CreateSearchRequest
1412
+ */
1413
+ 'user'?: string;
1414
+ }
1415
+ /**
1416
+ *
1417
+ * @export
1418
+ * @interface CreateSearchResponse
1419
+ */
1420
+ export interface CreateSearchResponse {
1421
+ /**
1422
+ *
1423
+ * @type {string}
1424
+ * @memberof CreateSearchResponse
1425
+ */
1426
+ 'object'?: string;
1427
+ /**
1428
+ *
1429
+ * @type {string}
1430
+ * @memberof CreateSearchResponse
1431
+ */
1432
+ 'model'?: string;
1433
+ /**
1434
+ *
1435
+ * @type {Array<CreateSearchResponseDataInner>}
1436
+ * @memberof CreateSearchResponse
1437
+ */
1438
+ 'data'?: Array<CreateSearchResponseDataInner>;
1439
+ }
1440
+ /**
1441
+ *
1442
+ * @export
1443
+ * @interface CreateSearchResponseDataInner
1444
+ */
1445
+ export interface CreateSearchResponseDataInner {
1446
+ /**
1447
+ *
1448
+ * @type {string}
1449
+ * @memberof CreateSearchResponseDataInner
1450
+ */
1451
+ 'object'?: string;
1452
+ /**
1453
+ *
1454
+ * @type {number}
1455
+ * @memberof CreateSearchResponseDataInner
1456
+ */
1457
+ 'document'?: number;
1458
+ /**
1459
+ *
1460
+ * @type {number}
1461
+ * @memberof CreateSearchResponseDataInner
1462
+ */
1463
+ 'score'?: number;
1464
+ }
1465
+ /**
1466
+ *
1467
+ * @export
1468
+ * @interface CreateTranscriptionResponse
1469
+ */
1470
+ export interface CreateTranscriptionResponse {
1471
+ /**
1472
+ *
1473
+ * @type {string}
1474
+ * @memberof CreateTranscriptionResponse
1475
+ */
1476
+ 'text': string;
1477
+ }
1478
+ /**
1479
+ *
1480
+ * @export
1481
+ * @interface CreateTranslationResponse
1482
+ */
1483
+ export interface CreateTranslationResponse {
1484
+ /**
1485
+ *
1486
+ * @type {string}
1487
+ * @memberof CreateTranslationResponse
1488
+ */
1489
+ 'text': string;
1490
+ }
1491
+ /**
1492
+ *
1493
+ * @export
1494
+ * @interface DeleteFileResponse
1495
+ */
1496
+ export interface DeleteFileResponse {
1497
+ /**
1498
+ *
1499
+ * @type {string}
1500
+ * @memberof DeleteFileResponse
1501
+ */
1502
+ 'id': string;
1503
+ /**
1504
+ *
1505
+ * @type {string}
1506
+ * @memberof DeleteFileResponse
1507
+ */
1508
+ 'object': string;
1509
+ /**
1510
+ *
1511
+ * @type {boolean}
1512
+ * @memberof DeleteFileResponse
1513
+ */
1514
+ 'deleted': boolean;
1515
+ }
1516
+ /**
1517
+ *
1518
+ * @export
1519
+ * @interface DeleteModelResponse
1520
+ */
1521
+ export interface DeleteModelResponse {
1522
+ /**
1523
+ *
1524
+ * @type {string}
1525
+ * @memberof DeleteModelResponse
1526
+ */
1527
+ 'id': string;
1528
+ /**
1529
+ *
1530
+ * @type {string}
1531
+ * @memberof DeleteModelResponse
1532
+ */
1533
+ 'object': string;
1534
+ /**
1535
+ *
1536
+ * @type {boolean}
1537
+ * @memberof DeleteModelResponse
1538
+ */
1539
+ 'deleted': boolean;
1540
+ }
1541
+ /**
1542
+ *
1543
+ * @export
1544
+ * @interface Engine
1545
+ */
1546
+ export interface Engine {
1547
+ /**
1548
+ *
1549
+ * @type {string}
1550
+ * @memberof Engine
1551
+ */
1552
+ 'id': string;
1553
+ /**
1554
+ *
1555
+ * @type {string}
1556
+ * @memberof Engine
1557
+ */
1558
+ 'object': string;
1559
+ /**
1560
+ *
1561
+ * @type {number}
1562
+ * @memberof Engine
1563
+ */
1564
+ 'created': number | null;
1565
+ /**
1566
+ *
1567
+ * @type {boolean}
1568
+ * @memberof Engine
1569
+ */
1570
+ 'ready': boolean;
1571
+ }
1572
+ /**
1573
+ *
1574
+ * @export
1575
+ * @interface ErrorResponse
1576
+ */
1577
+ export interface ErrorResponse {
1578
+ /**
1579
+ *
1580
+ * @type {Error}
1581
+ * @memberof ErrorResponse
1582
+ */
1583
+ 'error': Error;
1584
+ }
1585
+ /**
1586
+ *
1587
+ * @export
1588
+ * @interface FineTune
1589
+ */
1590
+ export interface FineTune {
1591
+ /**
1592
+ *
1593
+ * @type {string}
1594
+ * @memberof FineTune
1595
+ */
1596
+ 'id': string;
1597
+ /**
1598
+ *
1599
+ * @type {string}
1600
+ * @memberof FineTune
1601
+ */
1602
+ 'object': string;
1603
+ /**
1604
+ *
1605
+ * @type {number}
1606
+ * @memberof FineTune
1607
+ */
1608
+ 'created_at': number;
1609
+ /**
1610
+ *
1611
+ * @type {number}
1612
+ * @memberof FineTune
1613
+ */
1614
+ 'updated_at': number;
1615
+ /**
1616
+ *
1617
+ * @type {string}
1618
+ * @memberof FineTune
1619
+ */
1620
+ 'model': string;
1621
+ /**
1622
+ *
1623
+ * @type {string}
1624
+ * @memberof FineTune
1625
+ */
1626
+ 'fine_tuned_model': string | null;
1627
+ /**
1628
+ *
1629
+ * @type {string}
1630
+ * @memberof FineTune
1631
+ */
1632
+ 'organization_id': string;
1633
+ /**
1634
+ *
1635
+ * @type {string}
1636
+ * @memberof FineTune
1637
+ */
1638
+ 'status': string;
1639
+ /**
1640
+ *
1641
+ * @type {object}
1642
+ * @memberof FineTune
1643
+ */
1644
+ 'hyperparams': object;
1645
+ /**
1646
+ *
1647
+ * @type {Array<OpenAIFile>}
1648
+ * @memberof FineTune
1649
+ */
1650
+ 'training_files': Array<OpenAIFile>;
1651
+ /**
1652
+ *
1653
+ * @type {Array<OpenAIFile>}
1654
+ * @memberof FineTune
1655
+ */
1656
+ 'validation_files': Array<OpenAIFile>;
1657
+ /**
1658
+ *
1659
+ * @type {Array<OpenAIFile>}
1660
+ * @memberof FineTune
1661
+ */
1662
+ 'result_files': Array<OpenAIFile>;
1663
+ /**
1664
+ *
1665
+ * @type {Array<FineTuneEvent>}
1666
+ * @memberof FineTune
1667
+ */
1668
+ 'events'?: Array<FineTuneEvent>;
1669
+ }
1670
+ /**
1671
+ *
1672
+ * @export
1673
+ * @interface FineTuneEvent
1674
+ */
1675
+ export interface FineTuneEvent {
1676
+ /**
1677
+ *
1678
+ * @type {string}
1679
+ * @memberof FineTuneEvent
1680
+ */
1681
+ 'object': string;
1682
+ /**
1683
+ *
1684
+ * @type {number}
1685
+ * @memberof FineTuneEvent
1686
+ */
1687
+ 'created_at': number;
1688
+ /**
1689
+ *
1690
+ * @type {string}
1691
+ * @memberof FineTuneEvent
1692
+ */
1693
+ 'level': string;
1694
+ /**
1695
+ *
1696
+ * @type {string}
1697
+ * @memberof FineTuneEvent
1698
+ */
1699
+ 'message': string;
1700
+ }
1701
+ /**
1702
+ *
1703
+ * @export
1704
+ * @interface ImagesResponse
1705
+ */
1706
+ export interface ImagesResponse {
1707
+ /**
1708
+ *
1709
+ * @type {number}
1710
+ * @memberof ImagesResponse
1711
+ */
1712
+ 'created': number;
1713
+ /**
1714
+ *
1715
+ * @type {Array<ImagesResponseDataInner>}
1716
+ * @memberof ImagesResponse
1717
+ */
1718
+ 'data': Array<ImagesResponseDataInner>;
1719
+ }
1720
+ /**
1721
+ *
1722
+ * @export
1723
+ * @interface ImagesResponseDataInner
1724
+ */
1725
+ export interface ImagesResponseDataInner {
1726
+ /**
1727
+ *
1728
+ * @type {string}
1729
+ * @memberof ImagesResponseDataInner
1730
+ */
1731
+ 'url'?: string;
1732
+ /**
1733
+ *
1734
+ * @type {string}
1735
+ * @memberof ImagesResponseDataInner
1736
+ */
1737
+ 'b64_json'?: string;
1738
+ }
1739
+ /**
1740
+ *
1741
+ * @export
1742
+ * @interface ListEnginesResponse
1743
+ */
1744
+ export interface ListEnginesResponse {
1745
+ /**
1746
+ *
1747
+ * @type {string}
1748
+ * @memberof ListEnginesResponse
1749
+ */
1750
+ 'object': string;
1751
+ /**
1752
+ *
1753
+ * @type {Array<Engine>}
1754
+ * @memberof ListEnginesResponse
1755
+ */
1756
+ 'data': Array<Engine>;
1757
+ }
1758
+ /**
1759
+ *
1760
+ * @export
1761
+ * @interface ListFilesResponse
1762
+ */
1763
+ export interface ListFilesResponse {
1764
+ /**
1765
+ *
1766
+ * @type {string}
1767
+ * @memberof ListFilesResponse
1768
+ */
1769
+ 'object': string;
1770
+ /**
1771
+ *
1772
+ * @type {Array<OpenAIFile>}
1773
+ * @memberof ListFilesResponse
1774
+ */
1775
+ 'data': Array<OpenAIFile>;
1776
+ }
1777
+ /**
1778
+ *
1779
+ * @export
1780
+ * @interface ListFineTuneEventsResponse
1781
+ */
1782
+ export interface ListFineTuneEventsResponse {
1783
+ /**
1784
+ *
1785
+ * @type {string}
1786
+ * @memberof ListFineTuneEventsResponse
1787
+ */
1788
+ 'object': string;
1789
+ /**
1790
+ *
1791
+ * @type {Array<FineTuneEvent>}
1792
+ * @memberof ListFineTuneEventsResponse
1793
+ */
1794
+ 'data': Array<FineTuneEvent>;
1795
+ }
1796
+ /**
1797
+ *
1798
+ * @export
1799
+ * @interface ListFineTunesResponse
1800
+ */
1801
+ export interface ListFineTunesResponse {
1802
+ /**
1803
+ *
1804
+ * @type {string}
1805
+ * @memberof ListFineTunesResponse
1806
+ */
1807
+ 'object': string;
1808
+ /**
1809
+ *
1810
+ * @type {Array<FineTune>}
1811
+ * @memberof ListFineTunesResponse
1812
+ */
1813
+ 'data': Array<FineTune>;
1814
+ }
1815
+ /**
1816
+ *
1817
+ * @export
1818
+ * @interface ListModelsResponse
1819
+ */
1820
+ export interface ListModelsResponse {
1821
+ /**
1822
+ *
1823
+ * @type {string}
1824
+ * @memberof ListModelsResponse
1825
+ */
1826
+ 'object': string;
1827
+ /**
1828
+ *
1829
+ * @type {Array<Model>}
1830
+ * @memberof ListModelsResponse
1831
+ */
1832
+ 'data': Array<Model>;
1833
+ }
1834
+ /**
1835
+ *
1836
+ * @export
1837
+ * @interface Model
1838
+ */
1839
+ export interface Model {
1840
+ /**
1841
+ *
1842
+ * @type {string}
1843
+ * @memberof Model
1844
+ */
1845
+ 'id': string;
1846
+ /**
1847
+ *
1848
+ * @type {string}
1849
+ * @memberof Model
1850
+ */
1851
+ 'object': string;
1852
+ /**
1853
+ *
1854
+ * @type {number}
1855
+ * @memberof Model
1856
+ */
1857
+ 'created': number;
1858
+ /**
1859
+ *
1860
+ * @type {string}
1861
+ * @memberof Model
1862
+ */
1863
+ 'owned_by': string;
1864
+ }
1865
+ /**
1866
+ *
1867
+ * @export
1868
+ * @interface ModelError
1869
+ */
1870
+ export interface ModelError {
1871
+ /**
1872
+ *
1873
+ * @type {string}
1874
+ * @memberof ModelError
1875
+ */
1876
+ 'type': string;
1877
+ /**
1878
+ *
1879
+ * @type {string}
1880
+ * @memberof ModelError
1881
+ */
1882
+ 'message': string;
1883
+ /**
1884
+ *
1885
+ * @type {string}
1886
+ * @memberof ModelError
1887
+ */
1888
+ 'param': string | null;
1889
+ /**
1890
+ *
1891
+ * @type {string}
1892
+ * @memberof ModelError
1893
+ */
1894
+ 'code': string | null;
1895
+ }
1896
+ /**
1897
+ *
1898
+ * @export
1899
+ * @interface OpenAIFile
1900
+ */
1901
+ export interface OpenAIFile {
1902
+ /**
1903
+ *
1904
+ * @type {string}
1905
+ * @memberof OpenAIFile
1906
+ */
1907
+ 'id': string;
1908
+ /**
1909
+ *
1910
+ * @type {string}
1911
+ * @memberof OpenAIFile
1912
+ */
1913
+ 'object': string;
1914
+ /**
1915
+ *
1916
+ * @type {number}
1917
+ * @memberof OpenAIFile
1918
+ */
1919
+ 'bytes': number;
1920
+ /**
1921
+ *
1922
+ * @type {number}
1923
+ * @memberof OpenAIFile
1924
+ */
1925
+ 'created_at': number;
1926
+ /**
1927
+ *
1928
+ * @type {string}
1929
+ * @memberof OpenAIFile
1930
+ */
1931
+ 'filename': string;
1932
+ /**
1933
+ *
1934
+ * @type {string}
1935
+ * @memberof OpenAIFile
1936
+ */
1937
+ 'purpose': string;
1938
+ /**
1939
+ *
1940
+ * @type {string}
1941
+ * @memberof OpenAIFile
1942
+ */
1943
+ 'status'?: string;
1944
+ /**
1945
+ *
1946
+ * @type {object}
1947
+ * @memberof OpenAIFile
1948
+ */
1949
+ 'status_details'?: object | null;
1950
+ }
1951
+ /**
1952
+ * OpenAIApi - axios parameter creator
1953
+ * @export
1954
+ */
1955
+ export declare const OpenAIApiAxiosParamCreator: (configuration?: Configuration) => {
1956
+ /**
1957
+ *
1958
+ * @summary Immediately cancel a fine-tune job.
1959
+ * @param {string} fineTuneId The ID of the fine-tune job to cancel
1960
+ * @param {*} [options] Override http request option.
1961
+ * @throws {RequiredError}
1962
+ */
1963
+ cancelFineTune: (fineTuneId: string, options?: AxiosRequestConfig) => Promise<RequestArgs>;
1964
+ /**
1965
+ *
1966
+ * @summary Answers the specified question using the provided documents and examples. The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions).
1967
+ * @param {CreateAnswerRequest} createAnswerRequest
1968
+ * @param {*} [options] Override http request option.
1969
+ * @deprecated
1970
+ * @throws {RequiredError}
1971
+ */
1972
+ createAnswer: (createAnswerRequest: CreateAnswerRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>;
1973
+ /**
1974
+ *
1975
+ * @summary Creates a model response for the given chat conversation.
1976
+ * @param {CreateChatCompletionRequest} createChatCompletionRequest
1977
+ * @param {*} [options] Override http request option.
1978
+ * @throws {RequiredError}
1979
+ */
1980
+ createChatCompletion: (createChatCompletionRequest: CreateChatCompletionRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>;
1981
+ /**
1982
+ *
1983
+ * @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
1984
+ * @param {CreateClassificationRequest} createClassificationRequest
1985
+ * @param {*} [options] Override http request option.
1986
+ * @deprecated
1987
+ * @throws {RequiredError}
1988
+ */
1989
+ createClassification: (createClassificationRequest: CreateClassificationRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>;
1990
+ /**
1991
+ *
1992
+ * @summary Creates a completion for the provided prompt and parameters.
1993
+ * @param {CreateCompletionRequest} createCompletionRequest
1994
+ * @param {*} [options] Override http request option.
1995
+ * @throws {RequiredError}
1996
+ */
1997
+ createCompletion: (createCompletionRequest: CreateCompletionRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>;
1998
+ /**
1999
+ *
2000
+ * @summary Creates a new edit for the provided input, instruction, and parameters.
2001
+ * @param {CreateEditRequest} createEditRequest
2002
+ * @param {*} [options] Override http request option.
2003
+ * @throws {RequiredError}
2004
+ */
2005
+ createEdit: (createEditRequest: CreateEditRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>;
2006
+ /**
2007
+ *
2008
+ * @summary Creates an embedding vector representing the input text.
2009
+ * @param {CreateEmbeddingRequest} createEmbeddingRequest
2010
+ * @param {*} [options] Override http request option.
2011
+ * @throws {RequiredError}
2012
+ */
2013
+ createEmbedding: (createEmbeddingRequest: CreateEmbeddingRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>;
2014
+ /**
2015
+ *
2016
+ * @summary Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit.
2017
+ * @param {File} file Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. If the &#x60;purpose&#x60; is set to \\\&quot;fine-tune\\\&quot;, each line is a JSON record with \\\&quot;prompt\\\&quot; and \\\&quot;completion\\\&quot; fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data).
2018
+ * @param {string} purpose The intended purpose of the uploaded documents. Use \\\&quot;fine-tune\\\&quot; for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file.
2019
+ * @param {*} [options] Override http request option.
2020
+ * @throws {RequiredError}
2021
+ */
2022
+ createFile: (file: File, purpose: string, options?: AxiosRequestConfig) => Promise<RequestArgs>;
2023
+ /**
2024
+ *
2025
+ * @summary Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about Fine-tuning](/docs/guides/fine-tuning)
2026
+ * @param {CreateFineTuneRequest} createFineTuneRequest
2027
+ * @param {*} [options] Override http request option.
2028
+ * @throws {RequiredError}
2029
+ */
2030
+ createFineTune: (createFineTuneRequest: CreateFineTuneRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>;
2031
+ /**
2032
+ *
2033
+ * @summary Creates an image given a prompt.
2034
+ * @param {CreateImageRequest} createImageRequest
2035
+ * @param {*} [options] Override http request option.
2036
+ * @throws {RequiredError}
2037
+ */
2038
+ createImage: (createImageRequest: CreateImageRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>;
2039
+ /**
2040
+ *
2041
+ * @summary Creates an edited or extended image given an original image and a prompt.
2042
+ * @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
2043
+ * @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
2044
+ * @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
2045
+ * @param {number} [n] The number of images to generate. Must be between 1 and 10.
2046
+ * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
2047
+ * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
2048
+ * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
2049
+ * @param {*} [options] Override http request option.
2050
+ * @throws {RequiredError}
2051
+ */
2052
+ createImageEdit: (image: File, prompt: string, mask?: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: AxiosRequestConfig) => Promise<RequestArgs>;
2053
+ /**
2054
+ *
2055
+ * @summary Creates a variation of a given image.
2056
+ * @param {File} image The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.
2057
+ * @param {number} [n] The number of images to generate. Must be between 1 and 10.
2058
+ * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
2059
+ * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
2060
+ * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
2061
+ * @param {*} [options] Override http request option.
2062
+ * @throws {RequiredError}
2063
+ */
2064
+ createImageVariation: (image: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: AxiosRequestConfig) => Promise<RequestArgs>;
2065
+ /**
2066
+ *
2067
+ * @summary Classifies if text violates OpenAI\'s Content Policy
2068
+ * @param {CreateModerationRequest} createModerationRequest
2069
+ * @param {*} [options] Override http request option.
2070
+ * @throws {RequiredError}
2071
+ */
2072
+ createModeration: (createModerationRequest: CreateModerationRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>;
2073
+ /**
2074
+ *
2075
+ * @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
2076
+ * @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
2077
+ * @param {CreateSearchRequest} createSearchRequest
2078
+ * @param {*} [options] Override http request option.
2079
+ * @deprecated
2080
+ * @throws {RequiredError}
2081
+ */
2082
+ createSearch: (engineId: string, createSearchRequest: CreateSearchRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>;
2083
+ /**
2084
+ *
2085
+ * @summary Transcribes audio into the input language.
2086
+ * @param {File} file The audio file object (not file name) to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
2087
+ * @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
2088
+ * @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
2089
+ * @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
2090
+ * @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
2091
+ * @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
2092
+ * @param {*} [options] Override http request option.
2093
+ * @throws {RequiredError}
2094
+ */
2095
+ createTranscription: (file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options?: AxiosRequestConfig) => Promise<RequestArgs>;
2096
+ /**
2097
+ *
2098
+ * @summary Translates audio into into English.
2099
+ * @param {File} file The audio file object (not file name) translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
2100
+ * @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
2101
+ * @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
2102
+ * @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
2103
+ * @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
2104
+ * @param {*} [options] Override http request option.
2105
+ * @throws {RequiredError}
2106
+ */
2107
+ createTranslation: (file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options?: AxiosRequestConfig) => Promise<RequestArgs>;
2108
+ /**
2109
+ *
2110
+ * @summary Delete a file.
2111
+ * @param {string} fileId The ID of the file to use for this request
2112
+ * @param {*} [options] Override http request option.
2113
+ * @throws {RequiredError}
2114
+ */
2115
+ deleteFile: (fileId: string, options?: AxiosRequestConfig) => Promise<RequestArgs>;
2116
+ /**
2117
+ *
2118
+ * @summary Delete a fine-tuned model. You must have the Owner role in your organization.
2119
+ * @param {string} model The model to delete
2120
+ * @param {*} [options] Override http request option.
2121
+ * @throws {RequiredError}
2122
+ */
2123
+ deleteModel: (model: string, options?: AxiosRequestConfig) => Promise<RequestArgs>;
2124
+ /**
2125
+ *
2126
+ * @summary Returns the contents of the specified file
2127
+ * @param {string} fileId The ID of the file to use for this request
2128
+ * @param {*} [options] Override http request option.
2129
+ * @throws {RequiredError}
2130
+ */
2131
+ downloadFile: (fileId: string, options?: AxiosRequestConfig) => Promise<RequestArgs>;
2132
+ /**
2133
+ *
2134
+ * @summary Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability.
2135
+ * @param {*} [options] Override http request option.
2136
+ * @deprecated
2137
+ * @throws {RequiredError}
2138
+ */
2139
+ listEngines: (options?: AxiosRequestConfig) => Promise<RequestArgs>;
2140
+ /**
2141
+ *
2142
+ * @summary Returns a list of files that belong to the user\'s organization.
2143
+ * @param {*} [options] Override http request option.
2144
+ * @throws {RequiredError}
2145
+ */
2146
+ listFiles: (options?: AxiosRequestConfig) => Promise<RequestArgs>;
2147
+ /**
2148
+ *
2149
+ * @summary Get fine-grained status updates for a fine-tune job.
2150
+ * @param {string} fineTuneId The ID of the fine-tune job to get events for.
2151
+ * @param {boolean} [stream] Whether to stream events for the fine-tune job. If set to true, events will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available. The stream will terminate with a &#x60;data: [DONE]&#x60; message when the job is finished (succeeded, cancelled, or failed). If set to false, only events generated so far will be returned.
2152
+ * @param {*} [options] Override http request option.
2153
+ * @throws {RequiredError}
2154
+ */
2155
+ listFineTuneEvents: (fineTuneId: string, stream?: boolean, options?: AxiosRequestConfig) => Promise<RequestArgs>;
2156
+ /**
2157
+ *
2158
+ * @summary List your organization\'s fine-tuning jobs
2159
+ * @param {*} [options] Override http request option.
2160
+ * @throws {RequiredError}
2161
+ */
2162
+ listFineTunes: (options?: AxiosRequestConfig) => Promise<RequestArgs>;
2163
+ /**
2164
+ *
2165
+ * @summary Lists the currently available models, and provides basic information about each one such as the owner and availability.
2166
+ * @param {*} [options] Override http request option.
2167
+ * @throws {RequiredError}
2168
+ */
2169
+ listModels: (options?: AxiosRequestConfig) => Promise<RequestArgs>;
2170
+ /**
2171
+ *
2172
+ * @summary Retrieves a model instance, providing basic information about it such as the owner and availability.
2173
+ * @param {string} engineId The ID of the engine to use for this request
2174
+ * @param {*} [options] Override http request option.
2175
+ * @deprecated
2176
+ * @throws {RequiredError}
2177
+ */
2178
+ retrieveEngine: (engineId: string, options?: AxiosRequestConfig) => Promise<RequestArgs>;
2179
+ /**
2180
+ *
2181
+ * @summary Returns information about a specific file.
2182
+ * @param {string} fileId The ID of the file to use for this request
2183
+ * @param {*} [options] Override http request option.
2184
+ * @throws {RequiredError}
2185
+ */
2186
+ retrieveFile: (fileId: string, options?: AxiosRequestConfig) => Promise<RequestArgs>;
2187
+ /**
2188
+ *
2189
+ * @summary Gets info about the fine-tune job. [Learn more about Fine-tuning](/docs/guides/fine-tuning)
2190
+ * @param {string} fineTuneId The ID of the fine-tune job
2191
+ * @param {*} [options] Override http request option.
2192
+ * @throws {RequiredError}
2193
+ */
2194
+ retrieveFineTune: (fineTuneId: string, options?: AxiosRequestConfig) => Promise<RequestArgs>;
2195
+ /**
2196
+ *
2197
+ * @summary Retrieves a model instance, providing basic information about the model such as the owner and permissioning.
2198
+ * @param {string} model The ID of the model to use for this request
2199
+ * @param {*} [options] Override http request option.
2200
+ * @throws {RequiredError}
2201
+ */
2202
+ retrieveModel: (model: string, options?: AxiosRequestConfig) => Promise<RequestArgs>;
2203
+ };
2204
+ /**
2205
+ * OpenAIApi - functional programming interface
2206
+ * @export
2207
+ */
2208
+ export declare const OpenAIApiFp: (configuration?: Configuration) => {
2209
+ /**
2210
+ *
2211
+ * @summary Immediately cancel a fine-tune job.
2212
+ * @param {string} fineTuneId The ID of the fine-tune job to cancel
2213
+ * @param {*} [options] Override http request option.
2214
+ * @throws {RequiredError}
2215
+ */
2216
+ cancelFineTune(fineTuneId: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<FineTune>>;
2217
+ /**
2218
+ *
2219
+ * @summary Answers the specified question using the provided documents and examples. The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions).
2220
+ * @param {CreateAnswerRequest} createAnswerRequest
2221
+ * @param {*} [options] Override http request option.
2222
+ * @deprecated
2223
+ * @throws {RequiredError}
2224
+ */
2225
+ createAnswer(createAnswerRequest: CreateAnswerRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateAnswerResponse>>;
2226
+ /**
2227
+ *
2228
+ * @summary Creates a model response for the given chat conversation.
2229
+ * @param {CreateChatCompletionRequest} createChatCompletionRequest
2230
+ * @param {*} [options] Override http request option.
2231
+ * @throws {RequiredError}
2232
+ */
2233
+ createChatCompletion(createChatCompletionRequest: CreateChatCompletionRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateChatCompletionResponse>>;
2234
+ /**
2235
+ *
2236
+ * @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
2237
+ * @param {CreateClassificationRequest} createClassificationRequest
2238
+ * @param {*} [options] Override http request option.
2239
+ * @deprecated
2240
+ * @throws {RequiredError}
2241
+ */
2242
+ createClassification(createClassificationRequest: CreateClassificationRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateClassificationResponse>>;
2243
+ /**
2244
+ *
2245
+ * @summary Creates a completion for the provided prompt and parameters.
2246
+ * @param {CreateCompletionRequest} createCompletionRequest
2247
+ * @param {*} [options] Override http request option.
2248
+ * @throws {RequiredError}
2249
+ */
2250
+ createCompletion(createCompletionRequest: CreateCompletionRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateCompletionResponse>>;
2251
+ /**
2252
+ *
2253
+ * @summary Creates a new edit for the provided input, instruction, and parameters.
2254
+ * @param {CreateEditRequest} createEditRequest
2255
+ * @param {*} [options] Override http request option.
2256
+ * @throws {RequiredError}
2257
+ */
2258
+ createEdit(createEditRequest: CreateEditRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateEditResponse>>;
2259
+ /**
2260
+ *
2261
+ * @summary Creates an embedding vector representing the input text.
2262
+ * @param {CreateEmbeddingRequest} createEmbeddingRequest
2263
+ * @param {*} [options] Override http request option.
2264
+ * @throws {RequiredError}
2265
+ */
2266
+ createEmbedding(createEmbeddingRequest: CreateEmbeddingRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateEmbeddingResponse>>;
2267
+ /**
2268
+ *
2269
+ * @summary Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit.
2270
+ * @param {File} file Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. If the &#x60;purpose&#x60; is set to \\\&quot;fine-tune\\\&quot;, each line is a JSON record with \\\&quot;prompt\\\&quot; and \\\&quot;completion\\\&quot; fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data).
2271
+ * @param {string} purpose The intended purpose of the uploaded documents. Use \\\&quot;fine-tune\\\&quot; for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file.
2272
+ * @param {*} [options] Override http request option.
2273
+ * @throws {RequiredError}
2274
+ */
2275
+ createFile(file: File, purpose: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<OpenAIFile>>;
2276
+ /**
2277
+ *
2278
+ * @summary Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about Fine-tuning](/docs/guides/fine-tuning)
2279
+ * @param {CreateFineTuneRequest} createFineTuneRequest
2280
+ * @param {*} [options] Override http request option.
2281
+ * @throws {RequiredError}
2282
+ */
2283
+ createFineTune(createFineTuneRequest: CreateFineTuneRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<FineTune>>;
2284
+ /**
2285
+ *
2286
+ * @summary Creates an image given a prompt.
2287
+ * @param {CreateImageRequest} createImageRequest
2288
+ * @param {*} [options] Override http request option.
2289
+ * @throws {RequiredError}
2290
+ */
2291
+ createImage(createImageRequest: CreateImageRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<ImagesResponse>>;
2292
+ /**
2293
+ *
2294
+ * @summary Creates an edited or extended image given an original image and a prompt.
2295
+ * @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
2296
+ * @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
2297
+ * @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
2298
+ * @param {number} [n] The number of images to generate. Must be between 1 and 10.
2299
+ * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
2300
+ * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
2301
+ * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
2302
+ * @param {*} [options] Override http request option.
2303
+ * @throws {RequiredError}
2304
+ */
2305
+ createImageEdit(image: File, prompt: string, mask?: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<ImagesResponse>>;
2306
+ /**
2307
+ *
2308
+ * @summary Creates a variation of a given image.
2309
+ * @param {File} image The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.
2310
+ * @param {number} [n] The number of images to generate. Must be between 1 and 10.
2311
+ * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
2312
+ * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
2313
+ * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
2314
+ * @param {*} [options] Override http request option.
2315
+ * @throws {RequiredError}
2316
+ */
2317
+ createImageVariation(image: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<ImagesResponse>>;
2318
+ /**
2319
+ *
2320
+ * @summary Classifies if text violates OpenAI\'s Content Policy
2321
+ * @param {CreateModerationRequest} createModerationRequest
2322
+ * @param {*} [options] Override http request option.
2323
+ * @throws {RequiredError}
2324
+ */
2325
+ createModeration(createModerationRequest: CreateModerationRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateModerationResponse>>;
2326
+ /**
2327
+ *
2328
+ * @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
2329
+ * @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
2330
+ * @param {CreateSearchRequest} createSearchRequest
2331
+ * @param {*} [options] Override http request option.
2332
+ * @deprecated
2333
+ * @throws {RequiredError}
2334
+ */
2335
+ createSearch(engineId: string, createSearchRequest: CreateSearchRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateSearchResponse>>;
2336
+ /**
2337
+ *
2338
+ * @summary Transcribes audio into the input language.
2339
+ * @param {File} file The audio file object (not file name) to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
2340
+ * @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
2341
+ * @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
2342
+ * @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
2343
+ * @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
2344
+ * @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
2345
+ * @param {*} [options] Override http request option.
2346
+ * @throws {RequiredError}
2347
+ */
2348
+ createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateTranscriptionResponse>>;
2349
+ /**
2350
+ *
2351
+ * @summary Translates audio into into English.
2352
+ * @param {File} file The audio file object (not file name) translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
2353
+ * @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
2354
+ * @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
2355
+ * @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
2356
+ * @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
2357
+ * @param {*} [options] Override http request option.
2358
+ * @throws {RequiredError}
2359
+ */
2360
+ createTranslation(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateTranslationResponse>>;
2361
+ /**
2362
+ *
2363
+ * @summary Delete a file.
2364
+ * @param {string} fileId The ID of the file to use for this request
2365
+ * @param {*} [options] Override http request option.
2366
+ * @throws {RequiredError}
2367
+ */
2368
+ deleteFile(fileId: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<DeleteFileResponse>>;
2369
+ /**
2370
+ *
2371
+ * @summary Delete a fine-tuned model. You must have the Owner role in your organization.
2372
+ * @param {string} model The model to delete
2373
+ * @param {*} [options] Override http request option.
2374
+ * @throws {RequiredError}
2375
+ */
2376
+ deleteModel(model: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<DeleteModelResponse>>;
2377
+ /**
2378
+ *
2379
+ * @summary Returns the contents of the specified file
2380
+ * @param {string} fileId The ID of the file to use for this request
2381
+ * @param {*} [options] Override http request option.
2382
+ * @throws {RequiredError}
2383
+ */
2384
+ downloadFile(fileId: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<string>>;
2385
+ /**
2386
+ *
2387
+ * @summary Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability.
2388
+ * @param {*} [options] Override http request option.
2389
+ * @deprecated
2390
+ * @throws {RequiredError}
2391
+ */
2392
+ listEngines(options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<ListEnginesResponse>>;
2393
+ /**
2394
+ *
2395
+ * @summary Returns a list of files that belong to the user\'s organization.
2396
+ * @param {*} [options] Override http request option.
2397
+ * @throws {RequiredError}
2398
+ */
2399
+ listFiles(options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<ListFilesResponse>>;
2400
+ /**
2401
+ *
2402
+ * @summary Get fine-grained status updates for a fine-tune job.
2403
+ * @param {string} fineTuneId The ID of the fine-tune job to get events for.
2404
+ * @param {boolean} [stream] Whether to stream events for the fine-tune job. If set to true, events will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available. The stream will terminate with a &#x60;data: [DONE]&#x60; message when the job is finished (succeeded, cancelled, or failed). If set to false, only events generated so far will be returned.
2405
+ * @param {*} [options] Override http request option.
2406
+ * @throws {RequiredError}
2407
+ */
2408
+ listFineTuneEvents(fineTuneId: string, stream?: boolean, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<ListFineTuneEventsResponse>>;
2409
+ /**
2410
+ *
2411
+ * @summary List your organization\'s fine-tuning jobs
2412
+ * @param {*} [options] Override http request option.
2413
+ * @throws {RequiredError}
2414
+ */
2415
+ listFineTunes(options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<ListFineTunesResponse>>;
2416
+ /**
2417
+ *
2418
+ * @summary Lists the currently available models, and provides basic information about each one such as the owner and availability.
2419
+ * @param {*} [options] Override http request option.
2420
+ * @throws {RequiredError}
2421
+ */
2422
+ listModels(options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<ListModelsResponse>>;
2423
+ /**
2424
+ *
2425
+ * @summary Retrieves a model instance, providing basic information about it such as the owner and availability.
2426
+ * @param {string} engineId The ID of the engine to use for this request
2427
+ * @param {*} [options] Override http request option.
2428
+ * @deprecated
2429
+ * @throws {RequiredError}
2430
+ */
2431
+ retrieveEngine(engineId: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<Engine>>;
2432
+ /**
2433
+ *
2434
+ * @summary Returns information about a specific file.
2435
+ * @param {string} fileId The ID of the file to use for this request
2436
+ * @param {*} [options] Override http request option.
2437
+ * @throws {RequiredError}
2438
+ */
2439
+ retrieveFile(fileId: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<OpenAIFile>>;
2440
+ /**
2441
+ *
2442
+ * @summary Gets info about the fine-tune job. [Learn more about Fine-tuning](/docs/guides/fine-tuning)
2443
+ * @param {string} fineTuneId The ID of the fine-tune job
2444
+ * @param {*} [options] Override http request option.
2445
+ * @throws {RequiredError}
2446
+ */
2447
+ retrieveFineTune(fineTuneId: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<FineTune>>;
2448
+ /**
2449
+ *
2450
+ * @summary Retrieves a model instance, providing basic information about the model such as the owner and permissioning.
2451
+ * @param {string} model The ID of the model to use for this request
2452
+ * @param {*} [options] Override http request option.
2453
+ * @throws {RequiredError}
2454
+ */
2455
+ retrieveModel(model: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<Model>>;
2456
+ };
2457
+ /**
2458
+ * OpenAIApi - factory interface
2459
+ * @export
2460
+ */
2461
+ export declare const OpenAIApiFactory: (configuration?: Configuration, basePath?: string, axios?: AxiosInstance) => {
2462
+ /**
2463
+ *
2464
+ * @summary Immediately cancel a fine-tune job.
2465
+ * @param {string} fineTuneId The ID of the fine-tune job to cancel
2466
+ * @param {*} [options] Override http request option.
2467
+ * @throws {RequiredError}
2468
+ */
2469
+ cancelFineTune(fineTuneId: string, options?: any): AxiosPromise<FineTune>;
2470
+ /**
2471
+ *
2472
+ * @summary Answers the specified question using the provided documents and examples. The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions).
2473
+ * @param {CreateAnswerRequest} createAnswerRequest
2474
+ * @param {*} [options] Override http request option.
2475
+ * @deprecated
2476
+ * @throws {RequiredError}
2477
+ */
2478
+ createAnswer(createAnswerRequest: CreateAnswerRequest, options?: any): AxiosPromise<CreateAnswerResponse>;
2479
+ /**
2480
+ *
2481
+ * @summary Creates a model response for the given chat conversation.
2482
+ * @param {CreateChatCompletionRequest} createChatCompletionRequest
2483
+ * @param {*} [options] Override http request option.
2484
+ * @throws {RequiredError}
2485
+ */
2486
+ createChatCompletion(createChatCompletionRequest: CreateChatCompletionRequest, options?: any): AxiosPromise<CreateChatCompletionResponse>;
2487
+ /**
2488
+ *
2489
+ * @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
2490
+ * @param {CreateClassificationRequest} createClassificationRequest
2491
+ * @param {*} [options] Override http request option.
2492
+ * @deprecated
2493
+ * @throws {RequiredError}
2494
+ */
2495
+ createClassification(createClassificationRequest: CreateClassificationRequest, options?: any): AxiosPromise<CreateClassificationResponse>;
2496
+ /**
2497
+ *
2498
+ * @summary Creates a completion for the provided prompt and parameters.
2499
+ * @param {CreateCompletionRequest} createCompletionRequest
2500
+ * @param {*} [options] Override http request option.
2501
+ * @throws {RequiredError}
2502
+ */
2503
+ createCompletion(createCompletionRequest: CreateCompletionRequest, options?: any): AxiosPromise<CreateCompletionResponse>;
2504
+ /**
2505
+ *
2506
+ * @summary Creates a new edit for the provided input, instruction, and parameters.
2507
+ * @param {CreateEditRequest} createEditRequest
2508
+ * @param {*} [options] Override http request option.
2509
+ * @throws {RequiredError}
2510
+ */
2511
+ createEdit(createEditRequest: CreateEditRequest, options?: any): AxiosPromise<CreateEditResponse>;
2512
+ /**
2513
+ *
2514
+ * @summary Creates an embedding vector representing the input text.
2515
+ * @param {CreateEmbeddingRequest} createEmbeddingRequest
2516
+ * @param {*} [options] Override http request option.
2517
+ * @throws {RequiredError}
2518
+ */
2519
+ createEmbedding(createEmbeddingRequest: CreateEmbeddingRequest, options?: any): AxiosPromise<CreateEmbeddingResponse>;
2520
+ /**
2521
+ *
2522
+ * @summary Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit.
2523
+ * @param {File} file Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. If the &#x60;purpose&#x60; is set to \\\&quot;fine-tune\\\&quot;, each line is a JSON record with \\\&quot;prompt\\\&quot; and \\\&quot;completion\\\&quot; fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data).
2524
+ * @param {string} purpose The intended purpose of the uploaded documents. Use \\\&quot;fine-tune\\\&quot; for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file.
2525
+ * @param {*} [options] Override http request option.
2526
+ * @throws {RequiredError}
2527
+ */
2528
+ createFile(file: File, purpose: string, options?: any): AxiosPromise<OpenAIFile>;
2529
+ /**
2530
+ *
2531
+ * @summary Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about Fine-tuning](/docs/guides/fine-tuning)
2532
+ * @param {CreateFineTuneRequest} createFineTuneRequest
2533
+ * @param {*} [options] Override http request option.
2534
+ * @throws {RequiredError}
2535
+ */
2536
+ createFineTune(createFineTuneRequest: CreateFineTuneRequest, options?: any): AxiosPromise<FineTune>;
2537
+ /**
2538
+ *
2539
+ * @summary Creates an image given a prompt.
2540
+ * @param {CreateImageRequest} createImageRequest
2541
+ * @param {*} [options] Override http request option.
2542
+ * @throws {RequiredError}
2543
+ */
2544
+ createImage(createImageRequest: CreateImageRequest, options?: any): AxiosPromise<ImagesResponse>;
2545
+ /**
2546
+ *
2547
+ * @summary Creates an edited or extended image given an original image and a prompt.
2548
+ * @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
2549
+ * @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
2550
+ * @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
2551
+ * @param {number} [n] The number of images to generate. Must be between 1 and 10.
2552
+ * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
2553
+ * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
2554
+ * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
2555
+ * @param {*} [options] Override http request option.
2556
+ * @throws {RequiredError}
2557
+ */
2558
+ createImageEdit(image: File, prompt: string, mask?: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: any): AxiosPromise<ImagesResponse>;
2559
+ /**
2560
+ *
2561
+ * @summary Creates a variation of a given image.
2562
+ * @param {File} image The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.
2563
+ * @param {number} [n] The number of images to generate. Must be between 1 and 10.
2564
+ * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
2565
+ * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
2566
+ * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
2567
+ * @param {*} [options] Override http request option.
2568
+ * @throws {RequiredError}
2569
+ */
2570
+ createImageVariation(image: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: any): AxiosPromise<ImagesResponse>;
2571
+ /**
2572
+ *
2573
+ * @summary Classifies if text violates OpenAI\'s Content Policy
2574
+ * @param {CreateModerationRequest} createModerationRequest
2575
+ * @param {*} [options] Override http request option.
2576
+ * @throws {RequiredError}
2577
+ */
2578
+ createModeration(createModerationRequest: CreateModerationRequest, options?: any): AxiosPromise<CreateModerationResponse>;
2579
+ /**
2580
+ *
2581
+ * @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
2582
+ * @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
2583
+ * @param {CreateSearchRequest} createSearchRequest
2584
+ * @param {*} [options] Override http request option.
2585
+ * @deprecated
2586
+ * @throws {RequiredError}
2587
+ */
2588
+ createSearch(engineId: string, createSearchRequest: CreateSearchRequest, options?: any): AxiosPromise<CreateSearchResponse>;
2589
+ /**
2590
+ *
2591
+ * @summary Transcribes audio into the input language.
2592
+ * @param {File} file The audio file object (not file name) to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
2593
+ * @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
2594
+ * @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
2595
+ * @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
2596
+ * @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
2597
+ * @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
2598
+ * @param {*} [options] Override http request option.
2599
+ * @throws {RequiredError}
2600
+ */
2601
+ createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options?: any): AxiosPromise<CreateTranscriptionResponse>;
2602
+ /**
2603
+ *
2604
+ * @summary Translates audio into into English.
2605
+ * @param {File} file The audio file object (not file name) translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
2606
+ * @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
2607
+ * @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
2608
+ * @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
2609
+ * @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
2610
+ * @param {*} [options] Override http request option.
2611
+ * @throws {RequiredError}
2612
+ */
2613
+ createTranslation(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options?: any): AxiosPromise<CreateTranslationResponse>;
2614
+ /**
2615
+ *
2616
+ * @summary Delete a file.
2617
+ * @param {string} fileId The ID of the file to use for this request
2618
+ * @param {*} [options] Override http request option.
2619
+ * @throws {RequiredError}
2620
+ */
2621
+ deleteFile(fileId: string, options?: any): AxiosPromise<DeleteFileResponse>;
2622
+ /**
2623
+ *
2624
+ * @summary Delete a fine-tuned model. You must have the Owner role in your organization.
2625
+ * @param {string} model The model to delete
2626
+ * @param {*} [options] Override http request option.
2627
+ * @throws {RequiredError}
2628
+ */
2629
+ deleteModel(model: string, options?: any): AxiosPromise<DeleteModelResponse>;
2630
+ /**
2631
+ *
2632
+ * @summary Returns the contents of the specified file
2633
+ * @param {string} fileId The ID of the file to use for this request
2634
+ * @param {*} [options] Override http request option.
2635
+ * @throws {RequiredError}
2636
+ */
2637
+ downloadFile(fileId: string, options?: any): AxiosPromise<string>;
2638
+ /**
2639
+ *
2640
+ * @summary Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability.
2641
+ * @param {*} [options] Override http request option.
2642
+ * @deprecated
2643
+ * @throws {RequiredError}
2644
+ */
2645
+ listEngines(options?: any): AxiosPromise<ListEnginesResponse>;
2646
+ /**
2647
+ *
2648
+ * @summary Returns a list of files that belong to the user\'s organization.
2649
+ * @param {*} [options] Override http request option.
2650
+ * @throws {RequiredError}
2651
+ */
2652
+ listFiles(options?: any): AxiosPromise<ListFilesResponse>;
2653
+ /**
2654
+ *
2655
+ * @summary Get fine-grained status updates for a fine-tune job.
2656
+ * @param {string} fineTuneId The ID of the fine-tune job to get events for.
2657
+ * @param {boolean} [stream] Whether to stream events for the fine-tune job. If set to true, events will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available. The stream will terminate with a &#x60;data: [DONE]&#x60; message when the job is finished (succeeded, cancelled, or failed). If set to false, only events generated so far will be returned.
2658
+ * @param {*} [options] Override http request option.
2659
+ * @throws {RequiredError}
2660
+ */
2661
+ listFineTuneEvents(fineTuneId: string, stream?: boolean, options?: any): AxiosPromise<ListFineTuneEventsResponse>;
2662
+ /**
2663
+ *
2664
+ * @summary List your organization\'s fine-tuning jobs
2665
+ * @param {*} [options] Override http request option.
2666
+ * @throws {RequiredError}
2667
+ */
2668
+ listFineTunes(options?: any): AxiosPromise<ListFineTunesResponse>;
2669
+ /**
2670
+ *
2671
+ * @summary Lists the currently available models, and provides basic information about each one such as the owner and availability.
2672
+ * @param {*} [options] Override http request option.
2673
+ * @throws {RequiredError}
2674
+ */
2675
+ listModels(options?: any): AxiosPromise<ListModelsResponse>;
2676
+ /**
2677
+ *
2678
+ * @summary Retrieves a model instance, providing basic information about it such as the owner and availability.
2679
+ * @param {string} engineId The ID of the engine to use for this request
2680
+ * @param {*} [options] Override http request option.
2681
+ * @deprecated
2682
+ * @throws {RequiredError}
2683
+ */
2684
+ retrieveEngine(engineId: string, options?: any): AxiosPromise<Engine>;
2685
+ /**
2686
+ *
2687
+ * @summary Returns information about a specific file.
2688
+ * @param {string} fileId The ID of the file to use for this request
2689
+ * @param {*} [options] Override http request option.
2690
+ * @throws {RequiredError}
2691
+ */
2692
+ retrieveFile(fileId: string, options?: any): AxiosPromise<OpenAIFile>;
2693
+ /**
2694
+ *
2695
+ * @summary Gets info about the fine-tune job. [Learn more about Fine-tuning](/docs/guides/fine-tuning)
2696
+ * @param {string} fineTuneId The ID of the fine-tune job
2697
+ * @param {*} [options] Override http request option.
2698
+ * @throws {RequiredError}
2699
+ */
2700
+ retrieveFineTune(fineTuneId: string, options?: any): AxiosPromise<FineTune>;
2701
+ /**
2702
+ *
2703
+ * @summary Retrieves a model instance, providing basic information about the model such as the owner and permissioning.
2704
+ * @param {string} model The ID of the model to use for this request
2705
+ * @param {*} [options] Override http request option.
2706
+ * @throws {RequiredError}
2707
+ */
2708
+ retrieveModel(model: string, options?: any): AxiosPromise<Model>;
2709
+ };
2710
+ /**
2711
+ * OpenAIApi - object-oriented interface
2712
+ * @export
2713
+ * @class OpenAIApi
2714
+ * @extends {BaseAPI}
2715
+ */
2716
+ export declare class OpenAIApi extends BaseAPI {
2717
+ /**
2718
+ *
2719
+ * @summary Immediately cancel a fine-tune job.
2720
+ * @param {string} fineTuneId The ID of the fine-tune job to cancel
2721
+ * @param {*} [options] Override http request option.
2722
+ * @throws {RequiredError}
2723
+ * @memberof OpenAIApi
2724
+ */
2725
+ cancelFineTune(fineTuneId: string, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<FineTune, any>>;
2726
+ /**
2727
+ *
2728
+ * @summary Answers the specified question using the provided documents and examples. The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions).
2729
+ * @param {CreateAnswerRequest} createAnswerRequest
2730
+ * @param {*} [options] Override http request option.
2731
+ * @deprecated
2732
+ * @throws {RequiredError}
2733
+ * @memberof OpenAIApi
2734
+ */
2735
+ createAnswer(createAnswerRequest: CreateAnswerRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateAnswerResponse, any>>;
2736
+ /**
2737
+ *
2738
+ * @summary Creates a model response for the given chat conversation.
2739
+ * @param {CreateChatCompletionRequest} createChatCompletionRequest
2740
+ * @param {*} [options] Override http request option.
2741
+ * @throws {RequiredError}
2742
+ * @memberof OpenAIApi
2743
+ */
2744
+ createChatCompletion(createChatCompletionRequest: CreateChatCompletionRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateChatCompletionResponse, any>>;
2745
+ /**
2746
+ *
2747
+ * @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
2748
+ * @param {CreateClassificationRequest} createClassificationRequest
2749
+ * @param {*} [options] Override http request option.
2750
+ * @deprecated
2751
+ * @throws {RequiredError}
2752
+ * @memberof OpenAIApi
2753
+ */
2754
+ createClassification(createClassificationRequest: CreateClassificationRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateClassificationResponse, any>>;
2755
+ /**
2756
+ *
2757
+ * @summary Creates a completion for the provided prompt and parameters.
2758
+ * @param {CreateCompletionRequest} createCompletionRequest
2759
+ * @param {*} [options] Override http request option.
2760
+ * @throws {RequiredError}
2761
+ * @memberof OpenAIApi
2762
+ */
2763
+ createCompletion(createCompletionRequest: CreateCompletionRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateCompletionResponse, any>>;
2764
+ /**
2765
+ *
2766
+ * @summary Creates a new edit for the provided input, instruction, and parameters.
2767
+ * @param {CreateEditRequest} createEditRequest
2768
+ * @param {*} [options] Override http request option.
2769
+ * @throws {RequiredError}
2770
+ * @memberof OpenAIApi
2771
+ */
2772
+ createEdit(createEditRequest: CreateEditRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateEditResponse, any>>;
2773
+ /**
2774
+ *
2775
+ * @summary Creates an embedding vector representing the input text.
2776
+ * @param {CreateEmbeddingRequest} createEmbeddingRequest
2777
+ * @param {*} [options] Override http request option.
2778
+ * @throws {RequiredError}
2779
+ * @memberof OpenAIApi
2780
+ */
2781
+ createEmbedding(createEmbeddingRequest: CreateEmbeddingRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateEmbeddingResponse, any>>;
2782
+ /**
2783
+ *
2784
+ * @summary Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit.
2785
+ * @param {File} file Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. If the &#x60;purpose&#x60; is set to \\\&quot;fine-tune\\\&quot;, each line is a JSON record with \\\&quot;prompt\\\&quot; and \\\&quot;completion\\\&quot; fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data).
2786
+ * @param {string} purpose The intended purpose of the uploaded documents. Use \\\&quot;fine-tune\\\&quot; for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file.
2787
+ * @param {*} [options] Override http request option.
2788
+ * @throws {RequiredError}
2789
+ * @memberof OpenAIApi
2790
+ */
2791
+ createFile(file: File, purpose: string, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<OpenAIFile, any>>;
2792
+ /**
2793
+ *
2794
+ * @summary Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about Fine-tuning](/docs/guides/fine-tuning)
2795
+ * @param {CreateFineTuneRequest} createFineTuneRequest
2796
+ * @param {*} [options] Override http request option.
2797
+ * @throws {RequiredError}
2798
+ * @memberof OpenAIApi
2799
+ */
2800
+ createFineTune(createFineTuneRequest: CreateFineTuneRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<FineTune, any>>;
2801
+ /**
2802
+ *
2803
+ * @summary Creates an image given a prompt.
2804
+ * @param {CreateImageRequest} createImageRequest
2805
+ * @param {*} [options] Override http request option.
2806
+ * @throws {RequiredError}
2807
+ * @memberof OpenAIApi
2808
+ */
2809
+ createImage(createImageRequest: CreateImageRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<ImagesResponse, any>>;
2810
+ /**
2811
+ *
2812
+ * @summary Creates an edited or extended image given an original image and a prompt.
2813
+ * @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
2814
+ * @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
2815
+ * @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
2816
+ * @param {number} [n] The number of images to generate. Must be between 1 and 10.
2817
+ * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
2818
+ * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
2819
+ * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
2820
+ * @param {*} [options] Override http request option.
2821
+ * @throws {RequiredError}
2822
+ * @memberof OpenAIApi
2823
+ */
2824
+ createImageEdit(image: File, prompt: string, mask?: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<ImagesResponse, any>>;
2825
+ /**
2826
+ *
2827
+ * @summary Creates a variation of a given image.
2828
+ * @param {File} image The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.
2829
+ * @param {number} [n] The number of images to generate. Must be between 1 and 10.
2830
+ * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
2831
+ * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
2832
+ * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
2833
+ * @param {*} [options] Override http request option.
2834
+ * @throws {RequiredError}
2835
+ * @memberof OpenAIApi
2836
+ */
2837
+ createImageVariation(image: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<ImagesResponse, any>>;
2838
+ /**
2839
+ *
2840
+ * @summary Classifies if text violates OpenAI\'s Content Policy
2841
+ * @param {CreateModerationRequest} createModerationRequest
2842
+ * @param {*} [options] Override http request option.
2843
+ * @throws {RequiredError}
2844
+ * @memberof OpenAIApi
2845
+ */
2846
+ createModeration(createModerationRequest: CreateModerationRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateModerationResponse, any>>;
2847
+ /**
2848
+ *
2849
+ * @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
2850
+ * @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
2851
+ * @param {CreateSearchRequest} createSearchRequest
2852
+ * @param {*} [options] Override http request option.
2853
+ * @deprecated
2854
+ * @throws {RequiredError}
2855
+ * @memberof OpenAIApi
2856
+ */
2857
+ createSearch(engineId: string, createSearchRequest: CreateSearchRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateSearchResponse, any>>;
2858
+ /**
2859
+ *
2860
+ * @summary Transcribes audio into the input language.
2861
+ * @param {File} file The audio file object (not file name) to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
2862
+ * @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
2863
+ * @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
2864
+ * @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
2865
+ * @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
2866
+ * @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
2867
+ * @param {*} [options] Override http request option.
2868
+ * @throws {RequiredError}
2869
+ * @memberof OpenAIApi
2870
+ */
2871
+ createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateTranscriptionResponse, any>>;
2872
+ /**
2873
+ *
2874
+ * @summary Translates audio into into English.
2875
+ * @param {File} file The audio file object (not file name) translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
2876
+ * @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
2877
+ * @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
2878
+ * @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
2879
+ * @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
2880
+ * @param {*} [options] Override http request option.
2881
+ * @throws {RequiredError}
2882
+ * @memberof OpenAIApi
2883
+ */
2884
+ createTranslation(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateTranslationResponse, any>>;
2885
+ /**
2886
+ *
2887
+ * @summary Delete a file.
2888
+ * @param {string} fileId The ID of the file to use for this request
2889
+ * @param {*} [options] Override http request option.
2890
+ * @throws {RequiredError}
2891
+ * @memberof OpenAIApi
2892
+ */
2893
+ deleteFile(fileId: string, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<DeleteFileResponse, any>>;
2894
+ /**
2895
+ *
2896
+ * @summary Delete a fine-tuned model. You must have the Owner role in your organization.
2897
+ * @param {string} model The model to delete
2898
+ * @param {*} [options] Override http request option.
2899
+ * @throws {RequiredError}
2900
+ * @memberof OpenAIApi
2901
+ */
2902
+ deleteModel(model: string, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<DeleteModelResponse, any>>;
2903
+ /**
2904
+ *
2905
+ * @summary Returns the contents of the specified file
2906
+ * @param {string} fileId The ID of the file to use for this request
2907
+ * @param {*} [options] Override http request option.
2908
+ * @throws {RequiredError}
2909
+ * @memberof OpenAIApi
2910
+ */
2911
+ downloadFile(fileId: string, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<string, any>>;
2912
+ /**
2913
+ *
2914
+ * @summary Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability.
2915
+ * @param {*} [options] Override http request option.
2916
+ * @deprecated
2917
+ * @throws {RequiredError}
2918
+ * @memberof OpenAIApi
2919
+ */
2920
+ listEngines(options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<ListEnginesResponse, any>>;
2921
+ /**
2922
+ *
2923
+ * @summary Returns a list of files that belong to the user\'s organization.
2924
+ * @param {*} [options] Override http request option.
2925
+ * @throws {RequiredError}
2926
+ * @memberof OpenAIApi
2927
+ */
2928
+ listFiles(options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<ListFilesResponse, any>>;
2929
+ /**
2930
+ *
2931
+ * @summary Get fine-grained status updates for a fine-tune job.
2932
+ * @param {string} fineTuneId The ID of the fine-tune job to get events for.
2933
+ * @param {boolean} [stream] Whether to stream events for the fine-tune job. If set to true, events will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available. The stream will terminate with a &#x60;data: [DONE]&#x60; message when the job is finished (succeeded, cancelled, or failed). If set to false, only events generated so far will be returned.
2934
+ * @param {*} [options] Override http request option.
2935
+ * @throws {RequiredError}
2936
+ * @memberof OpenAIApi
2937
+ */
2938
+ listFineTuneEvents(fineTuneId: string, stream?: boolean, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<ListFineTuneEventsResponse, any>>;
2939
+ /**
2940
+ *
2941
+ * @summary List your organization\'s fine-tuning jobs
2942
+ * @param {*} [options] Override http request option.
2943
+ * @throws {RequiredError}
2944
+ * @memberof OpenAIApi
2945
+ */
2946
+ listFineTunes(options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<ListFineTunesResponse, any>>;
2947
+ /**
2948
+ *
2949
+ * @summary Lists the currently available models, and provides basic information about each one such as the owner and availability.
2950
+ * @param {*} [options] Override http request option.
2951
+ * @throws {RequiredError}
2952
+ * @memberof OpenAIApi
2953
+ */
2954
+ listModels(options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<ListModelsResponse, any>>;
2955
+ /**
2956
+ *
2957
+ * @summary Retrieves a model instance, providing basic information about it such as the owner and availability.
2958
+ * @param {string} engineId The ID of the engine to use for this request
2959
+ * @param {*} [options] Override http request option.
2960
+ * @deprecated
2961
+ * @throws {RequiredError}
2962
+ * @memberof OpenAIApi
2963
+ */
2964
+ retrieveEngine(engineId: string, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<Engine, any>>;
2965
+ /**
2966
+ *
2967
+ * @summary Returns information about a specific file.
2968
+ * @param {string} fileId The ID of the file to use for this request
2969
+ * @param {*} [options] Override http request option.
2970
+ * @throws {RequiredError}
2971
+ * @memberof OpenAIApi
2972
+ */
2973
+ retrieveFile(fileId: string, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<OpenAIFile, any>>;
2974
+ /**
2975
+ *
2976
+ * @summary Gets info about the fine-tune job. [Learn more about Fine-tuning](/docs/guides/fine-tuning)
2977
+ * @param {string} fineTuneId The ID of the fine-tune job
2978
+ * @param {*} [options] Override http request option.
2979
+ * @throws {RequiredError}
2980
+ * @memberof OpenAIApi
2981
+ */
2982
+ retrieveFineTune(fineTuneId: string, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<FineTune, any>>;
2983
+ /**
2984
+ *
2985
+ * @summary Retrieves a model instance, providing basic information about the model such as the owner and permissioning.
2986
+ * @param {string} model The ID of the model to use for this request
2987
+ * @param {*} [options] Override http request option.
2988
+ * @throws {RequiredError}
2989
+ * @memberof OpenAIApi
2990
+ */
2991
+ retrieveModel(model: string, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<Model, any>>;
2992
+ }