voice-router-dev 0.8.4 → 0.8.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +120 -0
- package/dist/constants.d.mts +12 -7
- package/dist/constants.d.ts +12 -7
- package/dist/constants.js +35 -21
- package/dist/constants.mjs +35 -21
- package/dist/{field-configs-DLbrsYTk.d.mts → field-configs-D1RCJSmr.d.mts} +4394 -6121
- package/dist/{field-configs-DLbrsYTk.d.ts → field-configs-D1RCJSmr.d.ts} +4394 -6121
- package/dist/field-configs.d.mts +1 -1
- package/dist/field-configs.d.ts +1 -1
- package/dist/field-configs.js +1794 -3231
- package/dist/field-configs.mjs +1794 -3231
- package/dist/index.d.mts +934 -3168
- package/dist/index.d.ts +934 -3168
- package/dist/index.js +2180 -3500
- package/dist/index.mjs +2182 -3502
- package/dist/{provider-metadata-MDUUEuqF.d.mts → provider-metadata-BnkedpXm.d.mts} +4 -4
- package/dist/{provider-metadata-_gUWlRXS.d.ts → provider-metadata-DbsSGAO7.d.ts} +4 -4
- package/dist/provider-metadata.d.mts +1 -1
- package/dist/provider-metadata.d.ts +1 -1
- package/dist/{speechToTextChunkResponseModel-eq8eLKEA.d.ts → speechToTextChunkResponseModel-BZSxrijj.d.ts} +804 -1347
- package/dist/{speechToTextChunkResponseModel-BcT1LJSZ.d.mts → speechToTextChunkResponseModel-DK61nDc5.d.mts} +804 -1347
- package/dist/webhooks.d.mts +2 -2
- package/dist/webhooks.d.ts +2 -2
- package/package.json +2 -2
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { DeepgramModelCode, DeepgramTopicModeType, DeepgramRedactType, AssemblyAISampleRateType, AssemblyAIEncodingType, AssemblyAISpeechModelType, SonioxRealtimeModelCode, SonioxLanguageCode, ElevenLabsRealtimeModelCode, ElevenLabsAudioFormatType, SonioxModelCode, ElevenLabsModelCode, DeepgramLanguageCode, ElevenLabsLanguageCode, SpeechmaticsLanguageCode, AzureLocaleCode } from './constants.js';
|
|
2
|
-
import { e as StreamingProviderType, B as BatchOnlyProviderType, T as TranscriptionProvider } from './provider-metadata-
|
|
2
|
+
import { e as StreamingProviderType, B as BatchOnlyProviderType, T as TranscriptionProvider } from './provider-metadata-DbsSGAO7.js';
|
|
3
3
|
|
|
4
4
|
/**
|
|
5
5
|
* Unified audio encoding types for Voice Router SDK
|
|
@@ -37,82 +37,75 @@ type AudioChannels = 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8;
|
|
|
37
37
|
/**
|
|
38
38
|
* Generated by orval v7.9.0 🍺
|
|
39
39
|
* Do not edit manually.
|
|
40
|
-
* Deepgram API
|
|
41
|
-
*
|
|
42
|
-
|
|
40
|
+
* Deepgram API
|
|
41
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
43
42
|
* OpenAPI spec version: 1.0.0
|
|
44
43
|
*/
|
|
45
|
-
|
|
44
|
+
interface ListenV1ResponseMetadataModelInfo {
|
|
46
45
|
[key: string]: unknown;
|
|
47
|
-
}
|
|
46
|
+
}
|
|
48
47
|
|
|
49
48
|
/**
|
|
50
49
|
* Generated by orval v7.9.0 🍺
|
|
51
50
|
* Do not edit manually.
|
|
52
|
-
* Deepgram API
|
|
53
|
-
*
|
|
54
|
-
|
|
51
|
+
* Deepgram API
|
|
52
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
55
53
|
* OpenAPI spec version: 1.0.0
|
|
56
54
|
*/
|
|
57
|
-
|
|
55
|
+
interface ListenV1ResponseMetadataSummaryInfo {
|
|
58
56
|
model_uuid?: string;
|
|
59
57
|
input_tokens?: number;
|
|
60
58
|
output_tokens?: number;
|
|
61
|
-
}
|
|
59
|
+
}
|
|
62
60
|
|
|
63
61
|
/**
|
|
64
62
|
* Generated by orval v7.9.0 🍺
|
|
65
63
|
* Do not edit manually.
|
|
66
|
-
* Deepgram API
|
|
67
|
-
*
|
|
68
|
-
|
|
64
|
+
* Deepgram API
|
|
65
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
69
66
|
* OpenAPI spec version: 1.0.0
|
|
70
67
|
*/
|
|
71
|
-
|
|
68
|
+
interface ListenV1ResponseMetadataSentimentInfo {
|
|
72
69
|
model_uuid?: string;
|
|
73
70
|
input_tokens?: number;
|
|
74
71
|
output_tokens?: number;
|
|
75
|
-
}
|
|
72
|
+
}
|
|
76
73
|
|
|
77
74
|
/**
|
|
78
75
|
* Generated by orval v7.9.0 🍺
|
|
79
76
|
* Do not edit manually.
|
|
80
|
-
* Deepgram API
|
|
81
|
-
*
|
|
82
|
-
|
|
77
|
+
* Deepgram API
|
|
78
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
83
79
|
* OpenAPI spec version: 1.0.0
|
|
84
80
|
*/
|
|
85
|
-
|
|
81
|
+
interface ListenV1ResponseMetadataTopicsInfo {
|
|
86
82
|
model_uuid?: string;
|
|
87
83
|
input_tokens?: number;
|
|
88
84
|
output_tokens?: number;
|
|
89
|
-
}
|
|
85
|
+
}
|
|
90
86
|
|
|
91
87
|
/**
|
|
92
88
|
* Generated by orval v7.9.0 🍺
|
|
93
89
|
* Do not edit manually.
|
|
94
|
-
* Deepgram API
|
|
95
|
-
*
|
|
96
|
-
|
|
90
|
+
* Deepgram API
|
|
91
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
97
92
|
* OpenAPI spec version: 1.0.0
|
|
98
93
|
*/
|
|
99
|
-
|
|
94
|
+
interface ListenV1ResponseMetadataIntentsInfo {
|
|
100
95
|
model_uuid?: string;
|
|
101
96
|
input_tokens?: number;
|
|
102
97
|
output_tokens?: number;
|
|
103
|
-
}
|
|
98
|
+
}
|
|
104
99
|
|
|
105
100
|
/**
|
|
106
101
|
* Generated by orval v7.9.0 🍺
|
|
107
102
|
* Do not edit manually.
|
|
108
|
-
* Deepgram API
|
|
109
|
-
*
|
|
110
|
-
|
|
103
|
+
* Deepgram API
|
|
104
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
111
105
|
* OpenAPI spec version: 1.0.0
|
|
112
106
|
*/
|
|
113
107
|
|
|
114
108
|
interface ListenV1ResponseMetadata {
|
|
115
|
-
/** @deprecated */
|
|
116
109
|
transaction_key?: string;
|
|
117
110
|
request_id: string;
|
|
118
111
|
sha256: string;
|
|
@@ -131,192 +124,179 @@ interface ListenV1ResponseMetadata {
|
|
|
131
124
|
/**
|
|
132
125
|
* Generated by orval v7.9.0 🍺
|
|
133
126
|
* Do not edit manually.
|
|
134
|
-
* Deepgram API
|
|
135
|
-
*
|
|
136
|
-
|
|
127
|
+
* Deepgram API
|
|
128
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
137
129
|
* OpenAPI spec version: 1.0.0
|
|
138
130
|
*/
|
|
139
|
-
|
|
131
|
+
interface ListenV1ResponseResultsChannelsItemsSearchItemsHitsItems {
|
|
140
132
|
confidence?: number;
|
|
141
133
|
start?: number;
|
|
142
134
|
end?: number;
|
|
143
135
|
snippet?: string;
|
|
144
|
-
}
|
|
136
|
+
}
|
|
145
137
|
|
|
146
138
|
/**
|
|
147
139
|
* Generated by orval v7.9.0 🍺
|
|
148
140
|
* Do not edit manually.
|
|
149
|
-
* Deepgram API
|
|
150
|
-
*
|
|
151
|
-
|
|
141
|
+
* Deepgram API
|
|
142
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
152
143
|
* OpenAPI spec version: 1.0.0
|
|
153
144
|
*/
|
|
154
145
|
|
|
155
|
-
|
|
146
|
+
interface ListenV1ResponseResultsChannelsItemsSearchItems {
|
|
156
147
|
query?: string;
|
|
157
|
-
hits?:
|
|
158
|
-
}
|
|
148
|
+
hits?: ListenV1ResponseResultsChannelsItemsSearchItemsHitsItems[];
|
|
149
|
+
}
|
|
159
150
|
|
|
160
151
|
/**
|
|
161
152
|
* Generated by orval v7.9.0 🍺
|
|
162
153
|
* Do not edit manually.
|
|
163
|
-
* Deepgram API
|
|
164
|
-
*
|
|
165
|
-
|
|
154
|
+
* Deepgram API
|
|
155
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
166
156
|
* OpenAPI spec version: 1.0.0
|
|
167
157
|
*/
|
|
168
|
-
|
|
158
|
+
interface ListenV1ResponseResultsChannelsItemsAlternativesItemsWordsItems {
|
|
169
159
|
word?: string;
|
|
170
160
|
start?: number;
|
|
171
161
|
end?: number;
|
|
172
162
|
confidence?: number;
|
|
173
|
-
}
|
|
163
|
+
}
|
|
174
164
|
|
|
175
165
|
/**
|
|
176
166
|
* Generated by orval v7.9.0 🍺
|
|
177
167
|
* Do not edit manually.
|
|
178
|
-
* Deepgram API
|
|
179
|
-
*
|
|
180
|
-
|
|
168
|
+
* Deepgram API
|
|
169
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
181
170
|
* OpenAPI spec version: 1.0.0
|
|
182
171
|
*/
|
|
183
|
-
|
|
172
|
+
interface ListenV1ResponseResultsChannelsItemsAlternativesItemsParagraphsParagraphsItemsSentencesItems {
|
|
184
173
|
text?: string;
|
|
185
174
|
start?: number;
|
|
186
175
|
end?: number;
|
|
187
|
-
}
|
|
176
|
+
}
|
|
188
177
|
|
|
189
178
|
/**
|
|
190
179
|
* Generated by orval v7.9.0 🍺
|
|
191
180
|
* Do not edit manually.
|
|
192
|
-
* Deepgram API
|
|
193
|
-
*
|
|
194
|
-
|
|
181
|
+
* Deepgram API
|
|
182
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
195
183
|
* OpenAPI spec version: 1.0.0
|
|
196
184
|
*/
|
|
197
185
|
|
|
198
|
-
|
|
199
|
-
sentences?:
|
|
186
|
+
interface ListenV1ResponseResultsChannelsItemsAlternativesItemsParagraphsParagraphsItems {
|
|
187
|
+
sentences?: ListenV1ResponseResultsChannelsItemsAlternativesItemsParagraphsParagraphsItemsSentencesItems[];
|
|
200
188
|
speaker?: number;
|
|
201
189
|
num_words?: number;
|
|
202
190
|
start?: number;
|
|
203
191
|
end?: number;
|
|
204
|
-
}
|
|
192
|
+
}
|
|
205
193
|
|
|
206
194
|
/**
|
|
207
195
|
* Generated by orval v7.9.0 🍺
|
|
208
196
|
* Do not edit manually.
|
|
209
|
-
* Deepgram API
|
|
210
|
-
*
|
|
211
|
-
|
|
197
|
+
* Deepgram API
|
|
198
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
212
199
|
* OpenAPI spec version: 1.0.0
|
|
213
200
|
*/
|
|
214
201
|
|
|
215
|
-
|
|
202
|
+
interface ListenV1ResponseResultsChannelsItemsAlternativesItemsParagraphs {
|
|
216
203
|
transcript?: string;
|
|
217
|
-
paragraphs?:
|
|
218
|
-
}
|
|
204
|
+
paragraphs?: ListenV1ResponseResultsChannelsItemsAlternativesItemsParagraphsParagraphsItems[];
|
|
205
|
+
}
|
|
219
206
|
|
|
220
207
|
/**
|
|
221
208
|
* Generated by orval v7.9.0 🍺
|
|
222
209
|
* Do not edit manually.
|
|
223
|
-
* Deepgram API
|
|
224
|
-
*
|
|
225
|
-
|
|
210
|
+
* Deepgram API
|
|
211
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
226
212
|
* OpenAPI spec version: 1.0.0
|
|
227
213
|
*/
|
|
228
|
-
|
|
214
|
+
interface ListenV1ResponseResultsChannelsItemsAlternativesItemsEntitiesItems {
|
|
229
215
|
label?: string;
|
|
230
216
|
value?: string;
|
|
231
217
|
raw_value?: string;
|
|
232
218
|
confidence?: number;
|
|
233
219
|
start_word?: number;
|
|
234
220
|
end_word?: number;
|
|
235
|
-
}
|
|
221
|
+
}
|
|
236
222
|
|
|
237
223
|
/**
|
|
238
224
|
* Generated by orval v7.9.0 🍺
|
|
239
225
|
* Do not edit manually.
|
|
240
|
-
* Deepgram API
|
|
241
|
-
*
|
|
242
|
-
|
|
226
|
+
* Deepgram API
|
|
227
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
243
228
|
* OpenAPI spec version: 1.0.0
|
|
244
229
|
*/
|
|
245
|
-
|
|
230
|
+
interface ListenV1ResponseResultsChannelsItemsAlternativesItemsSummariesItems {
|
|
246
231
|
summary?: string;
|
|
247
232
|
start_word?: number;
|
|
248
233
|
end_word?: number;
|
|
249
|
-
}
|
|
234
|
+
}
|
|
250
235
|
|
|
251
236
|
/**
|
|
252
237
|
* Generated by orval v7.9.0 🍺
|
|
253
238
|
* Do not edit manually.
|
|
254
|
-
* Deepgram API
|
|
255
|
-
*
|
|
256
|
-
|
|
239
|
+
* Deepgram API
|
|
240
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
257
241
|
* OpenAPI spec version: 1.0.0
|
|
258
242
|
*/
|
|
259
|
-
|
|
243
|
+
interface ListenV1ResponseResultsChannelsItemsAlternativesItemsTopicsItems {
|
|
260
244
|
text?: string;
|
|
261
245
|
start_word?: number;
|
|
262
246
|
end_word?: number;
|
|
263
247
|
topics?: string[];
|
|
264
|
-
}
|
|
248
|
+
}
|
|
265
249
|
|
|
266
250
|
/**
|
|
267
251
|
* Generated by orval v7.9.0 🍺
|
|
268
252
|
* Do not edit manually.
|
|
269
|
-
* Deepgram API
|
|
270
|
-
*
|
|
271
|
-
|
|
253
|
+
* Deepgram API
|
|
254
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
272
255
|
* OpenAPI spec version: 1.0.0
|
|
273
256
|
*/
|
|
274
257
|
|
|
275
|
-
|
|
258
|
+
interface ListenV1ResponseResultsChannelsItemsAlternativesItems {
|
|
276
259
|
transcript?: string;
|
|
277
260
|
confidence?: number;
|
|
278
|
-
words?:
|
|
279
|
-
paragraphs?:
|
|
280
|
-
entities?:
|
|
281
|
-
summaries?:
|
|
282
|
-
topics?:
|
|
283
|
-
}
|
|
261
|
+
words?: ListenV1ResponseResultsChannelsItemsAlternativesItemsWordsItems[];
|
|
262
|
+
paragraphs?: ListenV1ResponseResultsChannelsItemsAlternativesItemsParagraphs;
|
|
263
|
+
entities?: ListenV1ResponseResultsChannelsItemsAlternativesItemsEntitiesItems[];
|
|
264
|
+
summaries?: ListenV1ResponseResultsChannelsItemsAlternativesItemsSummariesItems[];
|
|
265
|
+
topics?: ListenV1ResponseResultsChannelsItemsAlternativesItemsTopicsItems[];
|
|
266
|
+
}
|
|
284
267
|
|
|
285
268
|
/**
|
|
286
269
|
* Generated by orval v7.9.0 🍺
|
|
287
270
|
* Do not edit manually.
|
|
288
|
-
* Deepgram API
|
|
289
|
-
*
|
|
290
|
-
|
|
271
|
+
* Deepgram API
|
|
272
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
291
273
|
* OpenAPI spec version: 1.0.0
|
|
292
274
|
*/
|
|
293
275
|
|
|
294
|
-
|
|
295
|
-
search?:
|
|
296
|
-
alternatives?:
|
|
276
|
+
interface ListenV1ResponseResultsChannelsItems {
|
|
277
|
+
search?: ListenV1ResponseResultsChannelsItemsSearchItems[];
|
|
278
|
+
alternatives?: ListenV1ResponseResultsChannelsItemsAlternativesItems[];
|
|
297
279
|
detected_language?: string;
|
|
298
|
-
}
|
|
280
|
+
}
|
|
299
281
|
|
|
300
282
|
/**
|
|
301
283
|
* Generated by orval v7.9.0 🍺
|
|
302
284
|
* Do not edit manually.
|
|
303
|
-
* Deepgram API
|
|
304
|
-
*
|
|
305
|
-
|
|
285
|
+
* Deepgram API
|
|
286
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
306
287
|
* OpenAPI spec version: 1.0.0
|
|
307
288
|
*/
|
|
308
289
|
|
|
309
|
-
type ListenV1ResponseResultsChannels =
|
|
290
|
+
type ListenV1ResponseResultsChannels = ListenV1ResponseResultsChannelsItems[];
|
|
310
291
|
|
|
311
292
|
/**
|
|
312
293
|
* Generated by orval v7.9.0 🍺
|
|
313
294
|
* Do not edit manually.
|
|
314
|
-
* Deepgram API
|
|
315
|
-
*
|
|
316
|
-
|
|
295
|
+
* Deepgram API
|
|
296
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
317
297
|
* OpenAPI spec version: 1.0.0
|
|
318
298
|
*/
|
|
319
|
-
|
|
299
|
+
interface ListenV1ResponseResultsUtterancesItemsWordsItems {
|
|
320
300
|
word?: string;
|
|
321
301
|
start?: number;
|
|
322
302
|
end?: number;
|
|
@@ -324,45 +304,42 @@ type ListenV1ResponseResultsUtterancesItemWordsItem = {
|
|
|
324
304
|
speaker?: number;
|
|
325
305
|
speaker_confidence?: number;
|
|
326
306
|
punctuated_word?: string;
|
|
327
|
-
}
|
|
307
|
+
}
|
|
328
308
|
|
|
329
309
|
/**
|
|
330
310
|
* Generated by orval v7.9.0 🍺
|
|
331
311
|
* Do not edit manually.
|
|
332
|
-
* Deepgram API
|
|
333
|
-
*
|
|
334
|
-
|
|
312
|
+
* Deepgram API
|
|
313
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
335
314
|
* OpenAPI spec version: 1.0.0
|
|
336
315
|
*/
|
|
337
316
|
|
|
338
|
-
|
|
317
|
+
interface ListenV1ResponseResultsUtterancesItems {
|
|
339
318
|
start?: number;
|
|
340
319
|
end?: number;
|
|
341
320
|
confidence?: number;
|
|
342
321
|
channel?: number;
|
|
343
322
|
transcript?: string;
|
|
344
|
-
words?:
|
|
323
|
+
words?: ListenV1ResponseResultsUtterancesItemsWordsItems[];
|
|
345
324
|
speaker?: number;
|
|
346
325
|
id?: string;
|
|
347
|
-
}
|
|
326
|
+
}
|
|
348
327
|
|
|
349
328
|
/**
|
|
350
329
|
* Generated by orval v7.9.0 🍺
|
|
351
330
|
* Do not edit manually.
|
|
352
|
-
* Deepgram API
|
|
353
|
-
*
|
|
354
|
-
|
|
331
|
+
* Deepgram API
|
|
332
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
355
333
|
* OpenAPI spec version: 1.0.0
|
|
356
334
|
*/
|
|
357
335
|
|
|
358
|
-
type ListenV1ResponseResultsUtterances =
|
|
336
|
+
type ListenV1ResponseResultsUtterances = ListenV1ResponseResultsUtterancesItems[];
|
|
359
337
|
|
|
360
338
|
/**
|
|
361
339
|
* Generated by orval v7.9.0 🍺
|
|
362
340
|
* Do not edit manually.
|
|
363
|
-
* Deepgram API
|
|
364
|
-
*
|
|
365
|
-
|
|
341
|
+
* Deepgram API
|
|
342
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
366
343
|
* OpenAPI spec version: 1.0.0
|
|
367
344
|
*/
|
|
368
345
|
interface ListenV1ResponseResultsSummary {
|
|
@@ -373,64 +350,59 @@ interface ListenV1ResponseResultsSummary {
|
|
|
373
350
|
/**
|
|
374
351
|
* Generated by orval v7.9.0 🍺
|
|
375
352
|
* Do not edit manually.
|
|
376
|
-
* Deepgram API
|
|
377
|
-
*
|
|
378
|
-
|
|
353
|
+
* Deepgram API
|
|
354
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
379
355
|
* OpenAPI spec version: 1.0.0
|
|
380
356
|
*/
|
|
381
|
-
|
|
357
|
+
interface SharedTopicsResultsTopicsSegmentsItemsTopicsItems {
|
|
382
358
|
topic?: string;
|
|
383
359
|
confidence_score?: number;
|
|
384
|
-
}
|
|
360
|
+
}
|
|
385
361
|
|
|
386
362
|
/**
|
|
387
363
|
* Generated by orval v7.9.0 🍺
|
|
388
364
|
* Do not edit manually.
|
|
389
|
-
* Deepgram API
|
|
390
|
-
*
|
|
391
|
-
|
|
365
|
+
* Deepgram API
|
|
366
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
392
367
|
* OpenAPI spec version: 1.0.0
|
|
393
368
|
*/
|
|
394
369
|
|
|
395
|
-
|
|
370
|
+
interface SharedTopicsResultsTopicsSegmentsItems {
|
|
396
371
|
text?: string;
|
|
397
372
|
start_word?: number;
|
|
398
373
|
end_word?: number;
|
|
399
|
-
topics?:
|
|
400
|
-
}
|
|
374
|
+
topics?: SharedTopicsResultsTopicsSegmentsItemsTopicsItems[];
|
|
375
|
+
}
|
|
401
376
|
|
|
402
377
|
/**
|
|
403
378
|
* Generated by orval v7.9.0 🍺
|
|
404
379
|
* Do not edit manually.
|
|
405
|
-
* Deepgram API
|
|
406
|
-
*
|
|
407
|
-
|
|
380
|
+
* Deepgram API
|
|
381
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
408
382
|
* OpenAPI spec version: 1.0.0
|
|
409
383
|
*/
|
|
410
384
|
|
|
411
|
-
|
|
412
|
-
segments?:
|
|
413
|
-
}
|
|
385
|
+
interface SharedTopicsResultsTopics {
|
|
386
|
+
segments?: SharedTopicsResultsTopicsSegmentsItems[];
|
|
387
|
+
}
|
|
414
388
|
|
|
415
389
|
/**
|
|
416
390
|
* Generated by orval v7.9.0 🍺
|
|
417
391
|
* Do not edit manually.
|
|
418
|
-
* Deepgram API
|
|
419
|
-
*
|
|
420
|
-
|
|
392
|
+
* Deepgram API
|
|
393
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
421
394
|
* OpenAPI spec version: 1.0.0
|
|
422
395
|
*/
|
|
423
396
|
|
|
424
|
-
|
|
397
|
+
interface SharedTopicsResults {
|
|
425
398
|
topics?: SharedTopicsResultsTopics;
|
|
426
|
-
}
|
|
399
|
+
}
|
|
427
400
|
|
|
428
401
|
/**
|
|
429
402
|
* Generated by orval v7.9.0 🍺
|
|
430
403
|
* Do not edit manually.
|
|
431
|
-
* Deepgram API
|
|
432
|
-
*
|
|
433
|
-
|
|
404
|
+
* Deepgram API
|
|
405
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
434
406
|
* OpenAPI spec version: 1.0.0
|
|
435
407
|
*/
|
|
436
408
|
|
|
@@ -444,64 +416,59 @@ interface SharedTopics {
|
|
|
444
416
|
/**
|
|
445
417
|
* Generated by orval v7.9.0 🍺
|
|
446
418
|
* Do not edit manually.
|
|
447
|
-
* Deepgram API
|
|
448
|
-
*
|
|
449
|
-
|
|
419
|
+
* Deepgram API
|
|
420
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
450
421
|
* OpenAPI spec version: 1.0.0
|
|
451
422
|
*/
|
|
452
|
-
|
|
423
|
+
interface SharedIntentsResultsIntentsSegmentsItemsIntentsItems {
|
|
453
424
|
intent?: string;
|
|
454
425
|
confidence_score?: number;
|
|
455
|
-
}
|
|
426
|
+
}
|
|
456
427
|
|
|
457
428
|
/**
|
|
458
429
|
* Generated by orval v7.9.0 🍺
|
|
459
430
|
* Do not edit manually.
|
|
460
|
-
* Deepgram API
|
|
461
|
-
*
|
|
462
|
-
|
|
431
|
+
* Deepgram API
|
|
432
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
463
433
|
* OpenAPI spec version: 1.0.0
|
|
464
434
|
*/
|
|
465
435
|
|
|
466
|
-
|
|
436
|
+
interface SharedIntentsResultsIntentsSegmentsItems {
|
|
467
437
|
text?: string;
|
|
468
438
|
start_word?: number;
|
|
469
439
|
end_word?: number;
|
|
470
|
-
intents?:
|
|
471
|
-
}
|
|
440
|
+
intents?: SharedIntentsResultsIntentsSegmentsItemsIntentsItems[];
|
|
441
|
+
}
|
|
472
442
|
|
|
473
443
|
/**
|
|
474
444
|
* Generated by orval v7.9.0 🍺
|
|
475
445
|
* Do not edit manually.
|
|
476
|
-
* Deepgram API
|
|
477
|
-
*
|
|
478
|
-
|
|
446
|
+
* Deepgram API
|
|
447
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
479
448
|
* OpenAPI spec version: 1.0.0
|
|
480
449
|
*/
|
|
481
450
|
|
|
482
|
-
|
|
483
|
-
segments?:
|
|
484
|
-
}
|
|
451
|
+
interface SharedIntentsResultsIntents {
|
|
452
|
+
segments?: SharedIntentsResultsIntentsSegmentsItems[];
|
|
453
|
+
}
|
|
485
454
|
|
|
486
455
|
/**
|
|
487
456
|
* Generated by orval v7.9.0 🍺
|
|
488
457
|
* Do not edit manually.
|
|
489
|
-
* Deepgram API
|
|
490
|
-
*
|
|
491
|
-
|
|
458
|
+
* Deepgram API
|
|
459
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
492
460
|
* OpenAPI spec version: 1.0.0
|
|
493
461
|
*/
|
|
494
462
|
|
|
495
|
-
|
|
463
|
+
interface SharedIntentsResults {
|
|
496
464
|
intents?: SharedIntentsResultsIntents;
|
|
497
|
-
}
|
|
465
|
+
}
|
|
498
466
|
|
|
499
467
|
/**
|
|
500
468
|
* Generated by orval v7.9.0 🍺
|
|
501
469
|
* Do not edit manually.
|
|
502
|
-
* Deepgram API
|
|
503
|
-
*
|
|
504
|
-
|
|
470
|
+
* Deepgram API
|
|
471
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
505
472
|
* OpenAPI spec version: 1.0.0
|
|
506
473
|
*/
|
|
507
474
|
|
|
@@ -515,38 +482,35 @@ interface SharedIntents {
|
|
|
515
482
|
/**
|
|
516
483
|
* Generated by orval v7.9.0 🍺
|
|
517
484
|
* Do not edit manually.
|
|
518
|
-
* Deepgram API
|
|
519
|
-
*
|
|
520
|
-
|
|
485
|
+
* Deepgram API
|
|
486
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
521
487
|
* OpenAPI spec version: 1.0.0
|
|
522
488
|
*/
|
|
523
|
-
|
|
489
|
+
interface SharedSentimentsSegmentsItems {
|
|
524
490
|
text?: string;
|
|
525
491
|
start_word?: number;
|
|
526
492
|
end_word?: number;
|
|
527
493
|
sentiment?: string;
|
|
528
494
|
sentiment_score?: number;
|
|
529
|
-
}
|
|
495
|
+
}
|
|
530
496
|
|
|
531
497
|
/**
|
|
532
498
|
* Generated by orval v7.9.0 🍺
|
|
533
499
|
* Do not edit manually.
|
|
534
|
-
* Deepgram API
|
|
535
|
-
*
|
|
536
|
-
|
|
500
|
+
* Deepgram API
|
|
501
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
537
502
|
* OpenAPI spec version: 1.0.0
|
|
538
503
|
*/
|
|
539
|
-
|
|
504
|
+
interface SharedSentimentsAverage {
|
|
540
505
|
sentiment?: string;
|
|
541
506
|
sentiment_score?: number;
|
|
542
|
-
}
|
|
507
|
+
}
|
|
543
508
|
|
|
544
509
|
/**
|
|
545
510
|
* Generated by orval v7.9.0 🍺
|
|
546
511
|
* Do not edit manually.
|
|
547
|
-
* Deepgram API
|
|
548
|
-
*
|
|
549
|
-
|
|
512
|
+
* Deepgram API
|
|
513
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
550
514
|
* OpenAPI spec version: 1.0.0
|
|
551
515
|
*/
|
|
552
516
|
|
|
@@ -554,16 +518,15 @@ type SharedSentimentsAverage = {
|
|
|
554
518
|
* Output whenever `sentiment=true` is used
|
|
555
519
|
*/
|
|
556
520
|
interface SharedSentiments {
|
|
557
|
-
segments?:
|
|
521
|
+
segments?: SharedSentimentsSegmentsItems[];
|
|
558
522
|
average?: SharedSentimentsAverage;
|
|
559
523
|
}
|
|
560
524
|
|
|
561
525
|
/**
|
|
562
526
|
* Generated by orval v7.9.0 🍺
|
|
563
527
|
* Do not edit manually.
|
|
564
|
-
* Deepgram API
|
|
565
|
-
*
|
|
566
|
-
|
|
528
|
+
* Deepgram API
|
|
529
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
567
530
|
* OpenAPI spec version: 1.0.0
|
|
568
531
|
*/
|
|
569
532
|
|
|
@@ -579,9 +542,8 @@ interface ListenV1ResponseResults {
|
|
|
579
542
|
/**
|
|
580
543
|
* Generated by orval v7.9.0 🍺
|
|
581
544
|
* Do not edit manually.
|
|
582
|
-
* Deepgram API
|
|
583
|
-
*
|
|
584
|
-
|
|
545
|
+
* Deepgram API
|
|
546
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
585
547
|
* OpenAPI spec version: 1.0.0
|
|
586
548
|
*/
|
|
587
549
|
|
|
@@ -853,18 +815,6 @@ interface CustomVocabularyConfigDTO {
|
|
|
853
815
|
default_intensity?: number;
|
|
854
816
|
}
|
|
855
817
|
|
|
856
|
-
/**
|
|
857
|
-
* Generated by orval v7.9.0 🍺
|
|
858
|
-
* Do not edit manually.
|
|
859
|
-
* Gladia Control API
|
|
860
|
-
* OpenAPI spec version: 1.0
|
|
861
|
-
*/
|
|
862
|
-
|
|
863
|
-
interface CodeSwitchingConfigDTO {
|
|
864
|
-
/** Specify the languages you want to use when detecting multiple languages */
|
|
865
|
-
languages?: TranscriptionLanguageCodeEnum[];
|
|
866
|
-
}
|
|
867
|
-
|
|
868
818
|
/**
|
|
869
819
|
* Generated by orval v7.9.0 🍺
|
|
870
820
|
* Do not edit manually.
|
|
@@ -959,7 +909,7 @@ interface SubtitlesConfigDTO {
|
|
|
959
909
|
* @maximum 5
|
|
960
910
|
*/
|
|
961
911
|
maximum_rows_per_caption?: number;
|
|
962
|
-
/** Style of the subtitles. Compliance mode refers to : https://loc.gov/preservation/digital/formats//fdd/fdd000569.shtml#:~:text=SRT%20files%20are%20basic%20text,alongside%2C%20example%3A%20%22MyVideo123
|
|
912
|
+
/** Style of the subtitles. Compliance mode refers to : https://loc.gov/preservation/digital/formats//fdd/fdd000569.shtml#:~:text=SRT%20files%20are%20basic%20text,alongside%2C%20example%3A%20%22MyVideo123 */
|
|
963
913
|
style?: SubtitlesStyleEnum;
|
|
964
914
|
}
|
|
965
915
|
|
|
@@ -1201,12 +1151,14 @@ interface CustomSpellingConfigDTO {
|
|
|
1201
1151
|
* Gladia Control API
|
|
1202
1152
|
* OpenAPI spec version: 1.0
|
|
1203
1153
|
*/
|
|
1204
|
-
interface
|
|
1154
|
+
interface AudioToLlmListConfigDTO {
|
|
1205
1155
|
/**
|
|
1206
|
-
* The list of
|
|
1156
|
+
* The list of prompts applied on the audio transcription
|
|
1207
1157
|
* @minItems 1
|
|
1208
1158
|
*/
|
|
1209
|
-
|
|
1159
|
+
prompts: unknown[][];
|
|
1160
|
+
/** The model to use for the prompt execution. You can find the list of supported models [here](https://openrouter.ai/models). */
|
|
1161
|
+
model?: string;
|
|
1210
1162
|
}
|
|
1211
1163
|
|
|
1212
1164
|
/**
|
|
@@ -1215,12 +1167,122 @@ interface StructuredDataExtractionConfigDTO {
|
|
|
1215
1167
|
* Gladia Control API
|
|
1216
1168
|
* OpenAPI spec version: 1.0
|
|
1217
1169
|
*/
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
|
|
1170
|
+
/**
|
|
1171
|
+
* The entity types to redact
|
|
1172
|
+
*/
|
|
1173
|
+
type PiiRedactionEntityTypeEnum = (typeof PiiRedactionEntityTypeEnum)[keyof typeof PiiRedactionEntityTypeEnum];
|
|
1174
|
+
declare const PiiRedactionEntityTypeEnum: {
|
|
1175
|
+
readonly APPI: "APPI";
|
|
1176
|
+
readonly APPI_SENSITIVE: "APPI_SENSITIVE";
|
|
1177
|
+
readonly CCI: "CCI";
|
|
1178
|
+
readonly CORE_ENTITIES: "CORE_ENTITIES";
|
|
1179
|
+
readonly CPRA: "CPRA";
|
|
1180
|
+
readonly GDPR: "GDPR";
|
|
1181
|
+
readonly GDPR_SENSITIVE: "GDPR_SENSITIVE";
|
|
1182
|
+
readonly HEALTH_INFORMATION: "HEALTH_INFORMATION";
|
|
1183
|
+
readonly HIPAA_SAFE_HARBOR: "HIPAA_SAFE_HARBOR";
|
|
1184
|
+
readonly LIDI: "LIDI";
|
|
1185
|
+
readonly NUMERICAL_EXCL_PCI: "NUMERICAL_EXCL_PCI";
|
|
1186
|
+
readonly PCI: "PCI";
|
|
1187
|
+
readonly QUEBEC_PRIVACY_ACT: "QUEBEC_PRIVACY_ACT";
|
|
1188
|
+
readonly ACCOUNT_NUMBER: "ACCOUNT_NUMBER";
|
|
1189
|
+
readonly AGE: "AGE";
|
|
1190
|
+
readonly DATE: "DATE";
|
|
1191
|
+
readonly DATE_INTERVAL: "DATE_INTERVAL";
|
|
1192
|
+
readonly DOB: "DOB";
|
|
1193
|
+
readonly DRIVER_LICENSE: "DRIVER_LICENSE";
|
|
1194
|
+
readonly DURATION: "DURATION";
|
|
1195
|
+
readonly EMAIL_ADDRESS: "EMAIL_ADDRESS";
|
|
1196
|
+
readonly EVENT: "EVENT";
|
|
1197
|
+
readonly FILENAME: "FILENAME";
|
|
1198
|
+
readonly GENDER: "GENDER";
|
|
1199
|
+
readonly HEALTHCARE_NUMBER: "HEALTHCARE_NUMBER";
|
|
1200
|
+
readonly IP_ADDRESS: "IP_ADDRESS";
|
|
1201
|
+
readonly LANGUAGE: "LANGUAGE";
|
|
1202
|
+
readonly LOCATION: "LOCATION";
|
|
1203
|
+
readonly LOCATION_ADDRESS: "LOCATION_ADDRESS";
|
|
1204
|
+
readonly LOCATION_ADDRESS_STREET: "LOCATION_ADDRESS_STREET";
|
|
1205
|
+
readonly LOCATION_CITY: "LOCATION_CITY";
|
|
1206
|
+
readonly LOCATION_COORDINATE: "LOCATION_COORDINATE";
|
|
1207
|
+
readonly LOCATION_COUNTRY: "LOCATION_COUNTRY";
|
|
1208
|
+
readonly LOCATION_STATE: "LOCATION_STATE";
|
|
1209
|
+
readonly LOCATION_ZIP: "LOCATION_ZIP";
|
|
1210
|
+
readonly MARITAL_STATUS: "MARITAL_STATUS";
|
|
1211
|
+
readonly MONEY: "MONEY";
|
|
1212
|
+
readonly NAME: "NAME";
|
|
1213
|
+
readonly NAME_FAMILY: "NAME_FAMILY";
|
|
1214
|
+
readonly NAME_GIVEN: "NAME_GIVEN";
|
|
1215
|
+
readonly NAME_MEDICAL_PROFESSIONAL: "NAME_MEDICAL_PROFESSIONAL";
|
|
1216
|
+
readonly NUMERICAL_PII: "NUMERICAL_PII";
|
|
1217
|
+
readonly OCCUPATION: "OCCUPATION";
|
|
1218
|
+
readonly ORGANIZATION: "ORGANIZATION";
|
|
1219
|
+
readonly ORGANIZATION_MEDICAL_FACILITY: "ORGANIZATION_MEDICAL_FACILITY";
|
|
1220
|
+
readonly ORIGIN: "ORIGIN";
|
|
1221
|
+
readonly PASSPORT_NUMBER: "PASSPORT_NUMBER";
|
|
1222
|
+
readonly PASSWORD: "PASSWORD";
|
|
1223
|
+
readonly PHONE_NUMBER: "PHONE_NUMBER";
|
|
1224
|
+
readonly PHYSICAL_ATTRIBUTE: "PHYSICAL_ATTRIBUTE";
|
|
1225
|
+
readonly POLITICAL_AFFILIATION: "POLITICAL_AFFILIATION";
|
|
1226
|
+
readonly RELIGION: "RELIGION";
|
|
1227
|
+
readonly SEXUALITY: "SEXUALITY";
|
|
1228
|
+
readonly SSN: "SSN";
|
|
1229
|
+
readonly TIME: "TIME";
|
|
1230
|
+
readonly URL: "URL";
|
|
1231
|
+
readonly USERNAME: "USERNAME";
|
|
1232
|
+
readonly VEHICLE_ID: "VEHICLE_ID";
|
|
1233
|
+
readonly ZODIAC_SIGN: "ZODIAC_SIGN";
|
|
1234
|
+
readonly BLOOD_TYPE: "BLOOD_TYPE";
|
|
1235
|
+
readonly CONDITION: "CONDITION";
|
|
1236
|
+
readonly DOSE: "DOSE";
|
|
1237
|
+
readonly DRUG: "DRUG";
|
|
1238
|
+
readonly INJURY: "INJURY";
|
|
1239
|
+
readonly MEDICAL_PROCESS: "MEDICAL_PROCESS";
|
|
1240
|
+
readonly STATISTICS: "STATISTICS";
|
|
1241
|
+
readonly BANK_ACCOUNT: "BANK_ACCOUNT";
|
|
1242
|
+
readonly CREDIT_CARD: "CREDIT_CARD";
|
|
1243
|
+
readonly CREDIT_CARD_EXPIRATION: "CREDIT_CARD_EXPIRATION";
|
|
1244
|
+
readonly CVV: "CVV";
|
|
1245
|
+
readonly ROUTING_NUMBER: "ROUTING_NUMBER";
|
|
1246
|
+
readonly CORPORATE_ACTION: "CORPORATE_ACTION";
|
|
1247
|
+
readonly DAY: "DAY";
|
|
1248
|
+
readonly EFFECT: "EFFECT";
|
|
1249
|
+
readonly FINANCIAL_METRIC: "FINANCIAL_METRIC";
|
|
1250
|
+
readonly MEDICAL_CODE: "MEDICAL_CODE";
|
|
1251
|
+
readonly MONTH: "MONTH";
|
|
1252
|
+
readonly ORGANIZATION_ID: "ORGANIZATION_ID";
|
|
1253
|
+
readonly PRODUCT: "PRODUCT";
|
|
1254
|
+
readonly PROJECT: "PROJECT";
|
|
1255
|
+
readonly TREND: "TREND";
|
|
1256
|
+
readonly YEAR: "YEAR";
|
|
1257
|
+
};
|
|
1258
|
+
|
|
1259
|
+
/**
|
|
1260
|
+
* Generated by orval v7.9.0 🍺
|
|
1261
|
+
* Do not edit manually.
|
|
1262
|
+
* Gladia Control API
|
|
1263
|
+
* OpenAPI spec version: 1.0
|
|
1264
|
+
*/
|
|
1265
|
+
/**
|
|
1266
|
+
* The type of processed text to return (marker or mask)
|
|
1267
|
+
*/
|
|
1268
|
+
type PiiRedactionConfigDTOProcessedTextType = (typeof PiiRedactionConfigDTOProcessedTextType)[keyof typeof PiiRedactionConfigDTOProcessedTextType];
|
|
1269
|
+
declare const PiiRedactionConfigDTOProcessedTextType: {
|
|
1270
|
+
readonly MARKER: "MARKER";
|
|
1271
|
+
readonly MASK: "MASK";
|
|
1272
|
+
};
|
|
1273
|
+
|
|
1274
|
+
/**
|
|
1275
|
+
* Generated by orval v7.9.0 🍺
|
|
1276
|
+
* Do not edit manually.
|
|
1277
|
+
* Gladia Control API
|
|
1278
|
+
* OpenAPI spec version: 1.0
|
|
1279
|
+
*/
|
|
1280
|
+
|
|
1281
|
+
interface PiiRedactionConfigDTO {
|
|
1282
|
+
/** The entity types to redact */
|
|
1283
|
+
entity_types?: PiiRedactionEntityTypeEnum;
|
|
1284
|
+
/** The type of processed text to return (marker or mask) */
|
|
1285
|
+
processed_text_type?: PiiRedactionConfigDTOProcessedTextType;
|
|
1224
1286
|
}
|
|
1225
1287
|
|
|
1226
1288
|
/**
|
|
@@ -1245,35 +1307,10 @@ interface LanguageConfig {
|
|
|
1245
1307
|
*/
|
|
1246
1308
|
|
|
1247
1309
|
interface PreRecordedRequestParamsResponse {
|
|
1248
|
-
/**
|
|
1249
|
-
* **[Deprecated]** Context to feed the transcription model with for possible better accuracy
|
|
1250
|
-
* @deprecated
|
|
1251
|
-
*/
|
|
1252
|
-
context_prompt?: string;
|
|
1253
1310
|
/** **[Beta]** Can be either boolean to enable custom_vocabulary for this audio or an array with specific vocabulary list to feed the transcription model with */
|
|
1254
1311
|
custom_vocabulary?: boolean;
|
|
1255
1312
|
/** **[Beta]** Custom vocabulary configuration, if `custom_vocabulary` is enabled */
|
|
1256
1313
|
custom_vocabulary_config?: CustomVocabularyConfigDTO;
|
|
1257
|
-
/**
|
|
1258
|
-
* **[Deprecated]** Use `language_config` instead. Detect the language from the given audio
|
|
1259
|
-
* @deprecated
|
|
1260
|
-
*/
|
|
1261
|
-
detect_language?: boolean;
|
|
1262
|
-
/**
|
|
1263
|
-
* **[Deprecated]** Use `language_config` instead.Detect multiple languages in the given audio
|
|
1264
|
-
* @deprecated
|
|
1265
|
-
*/
|
|
1266
|
-
enable_code_switching?: boolean;
|
|
1267
|
-
/**
|
|
1268
|
-
* **[Deprecated]** Use `language_config` instead. Specify the configuration for code switching
|
|
1269
|
-
* @deprecated
|
|
1270
|
-
*/
|
|
1271
|
-
code_switching_config?: CodeSwitchingConfigDTO;
|
|
1272
|
-
/**
|
|
1273
|
-
* **[Deprecated]** Use `language_config` instead. Set the spoken language for the given audio (ISO 639 standard)
|
|
1274
|
-
* @deprecated
|
|
1275
|
-
*/
|
|
1276
|
-
language?: TranscriptionLanguageCodeEnum;
|
|
1277
1314
|
/**
|
|
1278
1315
|
* **[Deprecated]** Use `callback`/`callback_config` instead. Callback URL we will do a `POST` request to with the result of the transcription
|
|
1279
1316
|
* @deprecated
|
|
@@ -1299,32 +1336,24 @@ interface PreRecordedRequestParamsResponse {
|
|
|
1299
1336
|
summarization?: boolean;
|
|
1300
1337
|
/** **[Beta]** Summarization configuration, if `summarization` is enabled */
|
|
1301
1338
|
summarization_config?: SummarizationConfigDTO;
|
|
1302
|
-
/** **[Alpha]** Enable moderation for this audio */
|
|
1303
|
-
moderation?: boolean;
|
|
1304
1339
|
/** **[Alpha]** Enable named entity recognition for this audio */
|
|
1305
1340
|
named_entity_recognition?: boolean;
|
|
1306
|
-
/** **[Alpha]** Enable chapterization for this audio */
|
|
1307
|
-
chapterization?: boolean;
|
|
1308
|
-
/** **[Alpha]** Enable names consistency for this audio */
|
|
1309
|
-
name_consistency?: boolean;
|
|
1310
1341
|
/** **[Alpha]** Enable custom spelling for this audio */
|
|
1311
1342
|
custom_spelling?: boolean;
|
|
1312
1343
|
/** **[Alpha]** Custom spelling configuration, if `custom_spelling` is enabled */
|
|
1313
1344
|
custom_spelling_config?: CustomSpellingConfigDTO;
|
|
1314
|
-
/** **[Alpha]** Enable structured data extraction for this audio */
|
|
1315
|
-
structured_data_extraction?: boolean;
|
|
1316
|
-
/** **[Alpha]** Structured data extraction configuration, if `structured_data_extraction` is enabled */
|
|
1317
|
-
structured_data_extraction_config?: StructuredDataExtractionConfigDTO;
|
|
1318
1345
|
/** Enable sentiment analysis for this audio */
|
|
1319
1346
|
sentiment_analysis?: boolean;
|
|
1320
1347
|
/** **[Alpha]** Enable audio to llm processing for this audio */
|
|
1321
1348
|
audio_to_llm?: boolean;
|
|
1322
1349
|
/** **[Alpha]** Audio to llm configuration, if `audio_to_llm` is enabled */
|
|
1323
1350
|
audio_to_llm_config?: AudioToLlmListConfigDTO;
|
|
1351
|
+
/** Enable PII redaction for this audio */
|
|
1352
|
+
pii_redaction?: boolean;
|
|
1353
|
+
/** PII redaction configuration, if `pii_redaction` is enabled */
|
|
1354
|
+
pii_redaction_config?: PiiRedactionConfigDTO;
|
|
1324
1355
|
/** Enable sentences for this audio */
|
|
1325
1356
|
sentences?: boolean;
|
|
1326
|
-
/** **[Alpha]** Allows to change the output display_mode for this audio. The output will be reordered, creating new utterances when speakers overlapped */
|
|
1327
|
-
display_mode?: boolean;
|
|
1328
1357
|
/** **[Alpha]** Use enhanced punctuation for this audio */
|
|
1329
1358
|
punctuation_enhanced?: boolean;
|
|
1330
1359
|
/** Specify the language configuration */
|
|
@@ -1671,6 +1700,19 @@ interface ModerationDTO {
|
|
|
1671
1700
|
*/
|
|
1672
1701
|
type NamedEntityRecognitionDTOError = AddonErrorDTO | null;
|
|
1673
1702
|
|
|
1703
|
+
/**
|
|
1704
|
+
* Generated by orval v7.9.0 🍺
|
|
1705
|
+
* Do not edit manually.
|
|
1706
|
+
* Gladia Control API
|
|
1707
|
+
* OpenAPI spec version: 1.0
|
|
1708
|
+
*/
|
|
1709
|
+
interface NamedEntityRecognitionResult {
|
|
1710
|
+
entity_type: string;
|
|
1711
|
+
text: string;
|
|
1712
|
+
start: number;
|
|
1713
|
+
end: number;
|
|
1714
|
+
}
|
|
1715
|
+
|
|
1674
1716
|
/**
|
|
1675
1717
|
* Generated by orval v7.9.0 🍺
|
|
1676
1718
|
* Do not edit manually.
|
|
@@ -1690,8 +1732,11 @@ interface NamedEntityRecognitionDTO {
|
|
|
1690
1732
|
* @nullable
|
|
1691
1733
|
*/
|
|
1692
1734
|
error: NamedEntityRecognitionDTOError;
|
|
1693
|
-
/**
|
|
1694
|
-
|
|
1735
|
+
/**
|
|
1736
|
+
* If `named_entity_recognition` has been enabled, the detected entities.
|
|
1737
|
+
* @nullable
|
|
1738
|
+
*/
|
|
1739
|
+
results: NamedEntityRecognitionResult[] | null;
|
|
1695
1740
|
}
|
|
1696
1741
|
|
|
1697
1742
|
/**
|
|
@@ -1726,7 +1771,7 @@ interface NamesConsistencyDTO {
|
|
|
1726
1771
|
* @nullable
|
|
1727
1772
|
*/
|
|
1728
1773
|
error: NamesConsistencyDTOError;
|
|
1729
|
-
/** If `name_consistency` has been enabled, Gladia will improve the consistency of the names across the transcription */
|
|
1774
|
+
/** Deprecated, If `name_consistency` has been enabled, Gladia will improve the consistency of the names across the transcription */
|
|
1730
1775
|
results: string;
|
|
1731
1776
|
}
|
|
1732
1777
|
|
|
@@ -3993,21 +4038,6 @@ Note: This parameter is only supported for the Universal-3 Pro model.
|
|
|
3993
4038
|
*/
|
|
3994
4039
|
type TranscriptRemoveAudioTagsProperty = TranscriptRemoveAudioTags | null;
|
|
3995
4040
|
|
|
3996
|
-
/**
|
|
3997
|
-
* Generated by orval v7.9.0 🍺
|
|
3998
|
-
* Do not edit manually.
|
|
3999
|
-
* AssemblyAI API
|
|
4000
|
-
* AssemblyAI Speech-to-Text API - Batch transcription endpoints. Filtered from the official AssemblyAI docs spec.
|
|
4001
|
-
* OpenAPI spec version: 1.0.0
|
|
4002
|
-
*/
|
|
4003
|
-
/**
|
|
4004
|
-
* The temperature that was used for the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.
|
|
4005
|
-
|
|
4006
|
-
Note: This parameter can only be used with the Universal-3 Pro model.
|
|
4007
|
-
|
|
4008
|
-
*/
|
|
4009
|
-
type TranscriptTemperature = number | null;
|
|
4010
|
-
|
|
4011
4041
|
/**
|
|
4012
4042
|
* Generated by orval v7.9.0 🍺
|
|
4013
4043
|
* Do not edit manually.
|
|
@@ -4415,11 +4445,6 @@ interface Transcript {
|
|
|
4415
4445
|
Note: This parameter is only supported for the Universal-3 Pro model.
|
|
4416
4446
|
*/
|
|
4417
4447
|
remove_audio_tags?: TranscriptRemoveAudioTagsProperty;
|
|
4418
|
-
/** The temperature that was used for the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.
|
|
4419
|
-
|
|
4420
|
-
Note: This parameter can only be used with the Universal-3 Pro model.
|
|
4421
|
-
*/
|
|
4422
|
-
temperature?: TranscriptTemperature;
|
|
4423
4448
|
/** The textual transcript of your media file */
|
|
4424
4449
|
text?: TranscriptText;
|
|
4425
4450
|
/** True while a request is throttled and false when a request is no longer throttled */
|
|
@@ -4769,757 +4794,420 @@ declare const StreamingSupportedModels: {
|
|
|
4769
4794
|
/**
|
|
4770
4795
|
* Generated by orval v7.9.0 🍺
|
|
4771
4796
|
* Do not edit manually.
|
|
4772
|
-
* Deepgram API
|
|
4773
|
-
*
|
|
4774
|
-
|
|
4797
|
+
* Deepgram API
|
|
4798
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
4775
4799
|
* OpenAPI spec version: 1.0.0
|
|
4776
4800
|
*/
|
|
4777
|
-
|
|
4778
|
-
|
|
4779
|
-
|
|
4780
|
-
|
|
4801
|
+
type V1ListenPostParametersCallbackMethod = (typeof V1ListenPostParametersCallbackMethod)[keyof typeof V1ListenPostParametersCallbackMethod];
|
|
4802
|
+
declare const V1ListenPostParametersCallbackMethod: {
|
|
4803
|
+
readonly POST: "POST";
|
|
4804
|
+
readonly PUT: "PUT";
|
|
4805
|
+
};
|
|
4781
4806
|
|
|
4782
4807
|
/**
|
|
4783
4808
|
* Generated by orval v7.9.0 🍺
|
|
4784
4809
|
* Do not edit manually.
|
|
4785
|
-
* Deepgram API
|
|
4786
|
-
*
|
|
4787
|
-
|
|
4810
|
+
* Deepgram API
|
|
4811
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
4788
4812
|
* OpenAPI spec version: 1.0.0
|
|
4789
4813
|
*/
|
|
4814
|
+
type V1ListenPostParametersExtra = string | string[];
|
|
4815
|
+
|
|
4790
4816
|
/**
|
|
4791
|
-
*
|
|
4792
|
-
|
|
4793
|
-
|
|
4794
|
-
*
|
|
4795
|
-
|
|
4796
|
-
/**
|
|
4797
|
-
* SharedCallbackMethodParameter type definition
|
|
4798
|
-
*/
|
|
4799
|
-
/**
|
|
4800
|
-
* SharedCallbackMethodParameter type definition
|
|
4801
|
-
*/
|
|
4802
|
-
/**
|
|
4803
|
-
* SharedCallbackMethodParameter type definition
|
|
4804
|
-
*/
|
|
4805
|
-
/**
|
|
4806
|
-
* SharedCallbackMethodParameter type definition
|
|
4807
|
-
*/
|
|
4808
|
-
/**
|
|
4809
|
-
* SharedCallbackMethodParameter type definition
|
|
4810
|
-
*/
|
|
4811
|
-
/**
|
|
4812
|
-
* SharedCallbackMethodParameter type definition
|
|
4817
|
+
* Generated by orval v7.9.0 🍺
|
|
4818
|
+
* Do not edit manually.
|
|
4819
|
+
* Deepgram API
|
|
4820
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
4821
|
+
* OpenAPI spec version: 1.0.0
|
|
4813
4822
|
*/
|
|
4823
|
+
type V1ListenPostParametersSummarize0 = (typeof V1ListenPostParametersSummarize0)[keyof typeof V1ListenPostParametersSummarize0];
|
|
4824
|
+
declare const V1ListenPostParametersSummarize0: {
|
|
4825
|
+
readonly v2: "v2";
|
|
4826
|
+
};
|
|
4827
|
+
|
|
4814
4828
|
/**
|
|
4815
|
-
*
|
|
4829
|
+
* Generated by orval v7.9.0 🍺
|
|
4830
|
+
* Do not edit manually.
|
|
4831
|
+
* Deepgram API
|
|
4832
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
4833
|
+
* OpenAPI spec version: 1.0.0
|
|
4816
4834
|
*/
|
|
4835
|
+
|
|
4836
|
+
type V1ListenPostParametersSummarize = V1ListenPostParametersSummarize0 | boolean;
|
|
4837
|
+
|
|
4817
4838
|
/**
|
|
4818
|
-
*
|
|
4839
|
+
* Generated by orval v7.9.0 🍺
|
|
4840
|
+
* Do not edit manually.
|
|
4841
|
+
* Deepgram API
|
|
4842
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
4843
|
+
* OpenAPI spec version: 1.0.0
|
|
4819
4844
|
*/
|
|
4845
|
+
type V1ListenPostParametersTag = string | string[];
|
|
4846
|
+
|
|
4820
4847
|
/**
|
|
4821
|
-
*
|
|
4848
|
+
* Generated by orval v7.9.0 🍺
|
|
4849
|
+
* Do not edit manually.
|
|
4850
|
+
* Deepgram API
|
|
4851
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
4852
|
+
* OpenAPI spec version: 1.0.0
|
|
4822
4853
|
*/
|
|
4854
|
+
type V1ListenPostParametersCustomTopic = string | string[];
|
|
4855
|
+
|
|
4823
4856
|
/**
|
|
4824
|
-
*
|
|
4857
|
+
* Generated by orval v7.9.0 🍺
|
|
4858
|
+
* Do not edit manually.
|
|
4859
|
+
* Deepgram API
|
|
4860
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
4861
|
+
* OpenAPI spec version: 1.0.0
|
|
4825
4862
|
*/
|
|
4826
|
-
type
|
|
4827
|
-
declare const
|
|
4828
|
-
readonly
|
|
4829
|
-
readonly
|
|
4863
|
+
type V1ListenPostParametersCustomTopicMode = (typeof V1ListenPostParametersCustomTopicMode)[keyof typeof V1ListenPostParametersCustomTopicMode];
|
|
4864
|
+
declare const V1ListenPostParametersCustomTopicMode: {
|
|
4865
|
+
readonly extended: "extended";
|
|
4866
|
+
readonly strict: "strict";
|
|
4830
4867
|
};
|
|
4831
4868
|
|
|
4832
4869
|
/**
|
|
4833
4870
|
* Generated by orval v7.9.0 🍺
|
|
4834
4871
|
* Do not edit manually.
|
|
4835
|
-
* Deepgram API
|
|
4836
|
-
*
|
|
4837
|
-
|
|
4872
|
+
* Deepgram API
|
|
4873
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
4838
4874
|
* OpenAPI spec version: 1.0.0
|
|
4839
4875
|
*/
|
|
4840
|
-
type
|
|
4876
|
+
type V1ListenPostParametersCustomIntent = string | string[];
|
|
4841
4877
|
|
|
4842
4878
|
/**
|
|
4843
4879
|
* Generated by orval v7.9.0 🍺
|
|
4844
4880
|
* Do not edit manually.
|
|
4845
|
-
* Deepgram API
|
|
4846
|
-
*
|
|
4847
|
-
|
|
4881
|
+
* Deepgram API
|
|
4882
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
4848
4883
|
* OpenAPI spec version: 1.0.0
|
|
4849
4884
|
*/
|
|
4885
|
+
type V1ListenPostParametersCustomIntentMode = (typeof V1ListenPostParametersCustomIntentMode)[keyof typeof V1ListenPostParametersCustomIntentMode];
|
|
4886
|
+
declare const V1ListenPostParametersCustomIntentMode: {
|
|
4887
|
+
readonly extended: "extended";
|
|
4888
|
+
readonly strict: "strict";
|
|
4889
|
+
};
|
|
4890
|
+
|
|
4850
4891
|
/**
|
|
4851
|
-
*
|
|
4892
|
+
* Generated by orval v7.9.0 🍺
|
|
4893
|
+
* Do not edit manually.
|
|
4894
|
+
* Deepgram API
|
|
4895
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
4896
|
+
* OpenAPI spec version: 1.0.0
|
|
4852
4897
|
*/
|
|
4853
|
-
type
|
|
4898
|
+
type V1ListenPostParametersDetectLanguage = boolean | string[];
|
|
4854
4899
|
|
|
4855
4900
|
/**
|
|
4856
4901
|
* Generated by orval v7.9.0 🍺
|
|
4857
4902
|
* Do not edit manually.
|
|
4858
|
-
* Deepgram API
|
|
4859
|
-
*
|
|
4860
|
-
|
|
4903
|
+
* Deepgram API
|
|
4904
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
4861
4905
|
* OpenAPI spec version: 1.0.0
|
|
4862
4906
|
*/
|
|
4863
|
-
type
|
|
4907
|
+
type V1ListenPostParametersEncoding = (typeof V1ListenPostParametersEncoding)[keyof typeof V1ListenPostParametersEncoding];
|
|
4908
|
+
declare const V1ListenPostParametersEncoding: {
|
|
4909
|
+
readonly linear16: "linear16";
|
|
4910
|
+
readonly flac: "flac";
|
|
4911
|
+
readonly mulaw: "mulaw";
|
|
4912
|
+
readonly "amr-nb": "amr-nb";
|
|
4913
|
+
readonly "amr-wb": "amr-wb";
|
|
4914
|
+
readonly opus: "opus";
|
|
4915
|
+
readonly speex: "speex";
|
|
4916
|
+
readonly g729: "g729";
|
|
4917
|
+
};
|
|
4864
4918
|
|
|
4865
4919
|
/**
|
|
4866
4920
|
* Generated by orval v7.9.0 🍺
|
|
4867
4921
|
* Do not edit manually.
|
|
4868
|
-
* Deepgram API
|
|
4869
|
-
*
|
|
4870
|
-
|
|
4922
|
+
* Deepgram API
|
|
4923
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
4871
4924
|
* OpenAPI spec version: 1.0.0
|
|
4872
4925
|
*/
|
|
4873
|
-
type
|
|
4926
|
+
type V1ListenPostParametersKeywords = string | string[];
|
|
4874
4927
|
|
|
4875
4928
|
/**
|
|
4876
4929
|
* Generated by orval v7.9.0 🍺
|
|
4877
4930
|
* Do not edit manually.
|
|
4878
|
-
* Deepgram API
|
|
4879
|
-
*
|
|
4880
|
-
|
|
4931
|
+
* Deepgram API
|
|
4932
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
4881
4933
|
* OpenAPI spec version: 1.0.0
|
|
4882
4934
|
*/
|
|
4883
4935
|
/**
|
|
4884
|
-
*
|
|
4936
|
+
* Our public models available to all accounts
|
|
4885
4937
|
*/
|
|
4886
|
-
type
|
|
4938
|
+
type V1ListenPostParametersModel0 = (typeof V1ListenPostParametersModel0)[keyof typeof V1ListenPostParametersModel0];
|
|
4939
|
+
declare const V1ListenPostParametersModel0: {
|
|
4940
|
+
readonly "nova-3": "nova-3";
|
|
4941
|
+
readonly "nova-3-general": "nova-3-general";
|
|
4942
|
+
readonly "nova-3-medical": "nova-3-medical";
|
|
4943
|
+
readonly "nova-2": "nova-2";
|
|
4944
|
+
readonly "nova-2-general": "nova-2-general";
|
|
4945
|
+
readonly "nova-2-meeting": "nova-2-meeting";
|
|
4946
|
+
readonly "nova-2-finance": "nova-2-finance";
|
|
4947
|
+
readonly "nova-2-conversationalai": "nova-2-conversationalai";
|
|
4948
|
+
readonly "nova-2-voicemail": "nova-2-voicemail";
|
|
4949
|
+
readonly "nova-2-video": "nova-2-video";
|
|
4950
|
+
readonly "nova-2-medical": "nova-2-medical";
|
|
4951
|
+
readonly "nova-2-drivethru": "nova-2-drivethru";
|
|
4952
|
+
readonly "nova-2-automotive": "nova-2-automotive";
|
|
4953
|
+
readonly nova: "nova";
|
|
4954
|
+
readonly "nova-general": "nova-general";
|
|
4955
|
+
readonly "nova-phonecall": "nova-phonecall";
|
|
4956
|
+
readonly "nova-medical": "nova-medical";
|
|
4957
|
+
readonly enhanced: "enhanced";
|
|
4958
|
+
readonly "enhanced-general": "enhanced-general";
|
|
4959
|
+
readonly "enhanced-meeting": "enhanced-meeting";
|
|
4960
|
+
readonly "enhanced-phonecall": "enhanced-phonecall";
|
|
4961
|
+
readonly "enhanced-finance": "enhanced-finance";
|
|
4962
|
+
readonly base: "base";
|
|
4963
|
+
readonly meeting: "meeting";
|
|
4964
|
+
readonly phonecall: "phonecall";
|
|
4965
|
+
readonly finance: "finance";
|
|
4966
|
+
readonly conversationalai: "conversationalai";
|
|
4967
|
+
readonly voicemail: "voicemail";
|
|
4968
|
+
readonly video: "video";
|
|
4969
|
+
};
|
|
4887
4970
|
|
|
4888
4971
|
/**
|
|
4889
4972
|
* Generated by orval v7.9.0 🍺
|
|
4890
4973
|
* Do not edit manually.
|
|
4891
|
-
* Deepgram API
|
|
4892
|
-
*
|
|
4893
|
-
|
|
4974
|
+
* Deepgram API
|
|
4975
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
4894
4976
|
* OpenAPI spec version: 1.0.0
|
|
4895
4977
|
*/
|
|
4896
|
-
|
|
4978
|
+
|
|
4979
|
+
type V1ListenPostParametersModel = V1ListenPostParametersModel0 | string;
|
|
4897
4980
|
|
|
4898
4981
|
/**
|
|
4899
4982
|
* Generated by orval v7.9.0 🍺
|
|
4900
4983
|
* Do not edit manually.
|
|
4901
|
-
* Deepgram API
|
|
4902
|
-
*
|
|
4903
|
-
|
|
4984
|
+
* Deepgram API
|
|
4985
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
4904
4986
|
* OpenAPI spec version: 1.0.0
|
|
4905
4987
|
*/
|
|
4906
|
-
|
|
4907
|
-
|
|
4908
|
-
|
|
4909
|
-
|
|
4910
|
-
|
|
4911
|
-
|
|
4912
|
-
|
|
4913
|
-
* SharedCustomTopicModeParameter type definition
|
|
4914
|
-
*/
|
|
4915
|
-
/**
|
|
4916
|
-
* SharedCustomTopicModeParameter type definition
|
|
4917
|
-
*/
|
|
4918
|
-
/**
|
|
4919
|
-
* SharedCustomTopicModeParameter type definition
|
|
4920
|
-
*/
|
|
4921
|
-
/**
|
|
4922
|
-
* SharedCustomTopicModeParameter type definition
|
|
4923
|
-
*/
|
|
4924
|
-
/**
|
|
4925
|
-
* SharedCustomTopicModeParameter type definition
|
|
4926
|
-
*/
|
|
4927
|
-
/**
|
|
4928
|
-
* SharedCustomTopicModeParameter type definition
|
|
4929
|
-
*/
|
|
4930
|
-
/**
|
|
4931
|
-
* SharedCustomTopicModeParameter type definition
|
|
4932
|
-
*/
|
|
4933
|
-
/**
|
|
4934
|
-
* SharedCustomTopicModeParameter type definition
|
|
4935
|
-
*/
|
|
4936
|
-
/**
|
|
4937
|
-
* SharedCustomTopicModeParameter type definition
|
|
4938
|
-
*/
|
|
4939
|
-
/**
|
|
4940
|
-
* SharedCustomTopicModeParameter type definition
|
|
4941
|
-
*/
|
|
4942
|
-
type SharedCustomTopicModeParameter = typeof SharedCustomTopicModeParameter[keyof typeof SharedCustomTopicModeParameter];
|
|
4943
|
-
declare const SharedCustomTopicModeParameter: {
|
|
4944
|
-
readonly extended: "extended";
|
|
4945
|
-
readonly strict: "strict";
|
|
4946
|
-
};
|
|
4947
|
-
|
|
4988
|
+
type V1ListenPostParametersRedactSchemaOneOf1Items = (typeof V1ListenPostParametersRedactSchemaOneOf1Items)[keyof typeof V1ListenPostParametersRedactSchemaOneOf1Items];
|
|
4989
|
+
declare const V1ListenPostParametersRedactSchemaOneOf1Items: {
|
|
4990
|
+
readonly pci: "pci";
|
|
4991
|
+
readonly pii: "pii";
|
|
4992
|
+
readonly numbers: "numbers";
|
|
4993
|
+
};
|
|
4994
|
+
|
|
4948
4995
|
/**
|
|
4949
4996
|
* Generated by orval v7.9.0 🍺
|
|
4950
4997
|
* Do not edit manually.
|
|
4951
|
-
* Deepgram API
|
|
4952
|
-
*
|
|
4953
|
-
|
|
4998
|
+
* Deepgram API
|
|
4999
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
4954
5000
|
* OpenAPI spec version: 1.0.0
|
|
4955
5001
|
*/
|
|
4956
|
-
/**
|
|
4957
|
-
* Recognizes speaker intent throughout a transcript or text
|
|
4958
|
-
*/
|
|
4959
|
-
type SharedIntentsParameter = boolean;
|
|
4960
5002
|
|
|
4961
|
-
|
|
4962
|
-
* Generated by orval v7.9.0 🍺
|
|
4963
|
-
* Do not edit manually.
|
|
4964
|
-
* Deepgram API Specification
|
|
4965
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4966
|
-
|
|
4967
|
-
* OpenAPI spec version: 1.0.0
|
|
4968
|
-
*/
|
|
4969
|
-
type SharedCustomIntentParameter = string | string[];
|
|
5003
|
+
type V1ListenPostParametersRedact1 = V1ListenPostParametersRedactSchemaOneOf1Items[];
|
|
4970
5004
|
|
|
4971
5005
|
/**
|
|
4972
5006
|
* Generated by orval v7.9.0 🍺
|
|
4973
5007
|
* Do not edit manually.
|
|
4974
|
-
* Deepgram API
|
|
4975
|
-
*
|
|
4976
|
-
|
|
5008
|
+
* Deepgram API
|
|
5009
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
4977
5010
|
* OpenAPI spec version: 1.0.0
|
|
4978
5011
|
*/
|
|
4979
|
-
/**
|
|
4980
|
-
* SharedCustomIntentModeParameter type definition
|
|
4981
|
-
*/
|
|
4982
|
-
/**
|
|
4983
|
-
* SharedCustomIntentModeParameter type definition
|
|
4984
|
-
*/
|
|
4985
|
-
/**
|
|
4986
|
-
* SharedCustomIntentModeParameter type definition
|
|
4987
|
-
*/
|
|
4988
|
-
/**
|
|
4989
|
-
* SharedCustomIntentModeParameter type definition
|
|
4990
|
-
*/
|
|
4991
|
-
/**
|
|
4992
|
-
* SharedCustomIntentModeParameter type definition
|
|
4993
|
-
*/
|
|
4994
|
-
/**
|
|
4995
|
-
* SharedCustomIntentModeParameter type definition
|
|
4996
|
-
*/
|
|
4997
|
-
/**
|
|
4998
|
-
* SharedCustomIntentModeParameter type definition
|
|
4999
|
-
*/
|
|
5000
|
-
/**
|
|
5001
|
-
* SharedCustomIntentModeParameter type definition
|
|
5002
|
-
*/
|
|
5003
|
-
/**
|
|
5004
|
-
* SharedCustomIntentModeParameter type definition
|
|
5005
|
-
*/
|
|
5006
|
-
/**
|
|
5007
|
-
* SharedCustomIntentModeParameter type definition
|
|
5008
|
-
*/
|
|
5009
|
-
/**
|
|
5010
|
-
* SharedCustomIntentModeParameter type definition
|
|
5011
|
-
*/
|
|
5012
|
-
/**
|
|
5013
|
-
* SharedCustomIntentModeParameter type definition
|
|
5014
|
-
*/
|
|
5015
|
-
type SharedCustomIntentModeParameter = typeof SharedCustomIntentModeParameter[keyof typeof SharedCustomIntentModeParameter];
|
|
5016
|
-
declare const SharedCustomIntentModeParameter: {
|
|
5017
|
-
readonly extended: "extended";
|
|
5018
|
-
readonly strict: "strict";
|
|
5019
|
-
};
|
|
5020
5012
|
|
|
5021
|
-
|
|
5022
|
-
* Generated by orval v7.9.0 🍺
|
|
5023
|
-
* Do not edit manually.
|
|
5024
|
-
* Deepgram API Specification
|
|
5025
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
5026
|
-
|
|
5027
|
-
* OpenAPI spec version: 1.0.0
|
|
5028
|
-
*/
|
|
5029
|
-
/**
|
|
5030
|
-
* Identifies and extracts key entities from content in submitted audio
|
|
5031
|
-
*/
|
|
5032
|
-
type ListenV1DetectEntitiesParameter = boolean;
|
|
5013
|
+
type V1ListenPostParametersRedact = string | V1ListenPostParametersRedact1;
|
|
5033
5014
|
|
|
5034
5015
|
/**
|
|
5035
5016
|
* Generated by orval v7.9.0 🍺
|
|
5036
5017
|
* Do not edit manually.
|
|
5037
|
-
* Deepgram API
|
|
5038
|
-
*
|
|
5039
|
-
|
|
5018
|
+
* Deepgram API
|
|
5019
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
5040
5020
|
* OpenAPI spec version: 1.0.0
|
|
5041
5021
|
*/
|
|
5042
|
-
type
|
|
5043
|
-
|
|
5044
|
-
/**
|
|
5045
|
-
* Generated by orval v7.9.0 🍺
|
|
5046
|
-
* Do not edit manually.
|
|
5047
|
-
* Deepgram API Specification
|
|
5048
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
5049
|
-
|
|
5050
|
-
* OpenAPI spec version: 1.0.0
|
|
5051
|
-
*/
|
|
5052
|
-
/**
|
|
5053
|
-
* Recognize speaker changes. Each word in the transcript will be assigned a speaker number starting at 0
|
|
5054
|
-
*/
|
|
5055
|
-
type ListenV1DiarizeParameter = boolean;
|
|
5022
|
+
type V1ListenPostParametersReplace = string | string[];
|
|
5056
5023
|
|
|
5057
5024
|
/**
|
|
5058
5025
|
* Generated by orval v7.9.0 🍺
|
|
5059
5026
|
* Do not edit manually.
|
|
5060
|
-
* Deepgram API
|
|
5061
|
-
*
|
|
5062
|
-
|
|
5027
|
+
* Deepgram API
|
|
5028
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
5063
5029
|
* OpenAPI spec version: 1.0.0
|
|
5064
5030
|
*/
|
|
5065
|
-
|
|
5066
|
-
* Dictation mode for controlling formatting with dictated speech
|
|
5067
|
-
*/
|
|
5068
|
-
type ListenV1DictationParameter = boolean;
|
|
5031
|
+
type V1ListenPostParametersSearch = string | string[];
|
|
5069
5032
|
|
|
5070
5033
|
/**
|
|
5071
5034
|
* Generated by orval v7.9.0 🍺
|
|
5072
5035
|
* Do not edit manually.
|
|
5073
|
-
* Deepgram API
|
|
5074
|
-
*
|
|
5075
|
-
|
|
5036
|
+
* Deepgram API
|
|
5037
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
5076
5038
|
* OpenAPI spec version: 1.0.0
|
|
5077
5039
|
*/
|
|
5078
5040
|
/**
|
|
5079
|
-
*
|
|
5080
|
-
*/
|
|
5081
|
-
/**
|
|
5082
|
-
* ListenV1EncodingParameter type definition
|
|
5083
|
-
*/
|
|
5084
|
-
/**
|
|
5085
|
-
* ListenV1EncodingParameter type definition
|
|
5086
|
-
*/
|
|
5087
|
-
/**
|
|
5088
|
-
* ListenV1EncodingParameter type definition
|
|
5089
|
-
*/
|
|
5090
|
-
/**
|
|
5091
|
-
* ListenV1EncodingParameter type definition
|
|
5092
|
-
*/
|
|
5093
|
-
/**
|
|
5094
|
-
* ListenV1EncodingParameter type definition
|
|
5041
|
+
* Use the latest version of a model
|
|
5095
5042
|
*/
|
|
5096
|
-
|
|
5097
|
-
|
|
5098
|
-
|
|
5099
|
-
/**
|
|
5100
|
-
* ListenV1EncodingParameter type definition
|
|
5101
|
-
*/
|
|
5102
|
-
/**
|
|
5103
|
-
* ListenV1EncodingParameter type definition
|
|
5104
|
-
*/
|
|
5105
|
-
/**
|
|
5106
|
-
* ListenV1EncodingParameter type definition
|
|
5107
|
-
*/
|
|
5108
|
-
/**
|
|
5109
|
-
* ListenV1EncodingParameter type definition
|
|
5110
|
-
*/
|
|
5111
|
-
/**
|
|
5112
|
-
* ListenV1EncodingParameter type definition
|
|
5113
|
-
*/
|
|
5114
|
-
type ListenV1EncodingParameter = typeof ListenV1EncodingParameter[keyof typeof ListenV1EncodingParameter];
|
|
5115
|
-
declare const ListenV1EncodingParameter: {
|
|
5116
|
-
readonly linear16: "linear16";
|
|
5117
|
-
readonly flac: "flac";
|
|
5118
|
-
readonly mulaw: "mulaw";
|
|
5119
|
-
readonly opus: "opus";
|
|
5120
|
-
readonly speex: "speex";
|
|
5121
|
-
readonly g729: "g729";
|
|
5043
|
+
type V1ListenPostParametersVersion0 = (typeof V1ListenPostParametersVersion0)[keyof typeof V1ListenPostParametersVersion0];
|
|
5044
|
+
declare const V1ListenPostParametersVersion0: {
|
|
5045
|
+
readonly latest: "latest";
|
|
5122
5046
|
};
|
|
5123
5047
|
|
|
5124
5048
|
/**
|
|
5125
5049
|
* Generated by orval v7.9.0 🍺
|
|
5126
5050
|
* Do not edit manually.
|
|
5127
|
-
* Deepgram API
|
|
5128
|
-
*
|
|
5129
|
-
|
|
5130
|
-
* OpenAPI spec version: 1.0.0
|
|
5131
|
-
*/
|
|
5132
|
-
/**
|
|
5133
|
-
* Filler Words can help transcribe interruptions in your audio, like "uh" and "um"
|
|
5134
|
-
*/
|
|
5135
|
-
type ListenV1FillerWordsParameter = boolean;
|
|
5136
|
-
|
|
5137
|
-
/**
|
|
5138
|
-
* Generated by orval v7.9.0 🍺
|
|
5139
|
-
* Do not edit manually.
|
|
5140
|
-
* Deepgram API Specification
|
|
5141
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
5142
|
-
|
|
5143
|
-
* OpenAPI spec version: 1.0.0
|
|
5144
|
-
*/
|
|
5145
|
-
/**
|
|
5146
|
-
* Key term prompting can boost or suppress specialized terminology and brands. Only compatible with Nova-3
|
|
5147
|
-
*/
|
|
5148
|
-
type ListenV1KeytermParameter = string[];
|
|
5149
|
-
|
|
5150
|
-
/**
|
|
5151
|
-
* Generated by orval v7.9.0 🍺
|
|
5152
|
-
* Do not edit manually.
|
|
5153
|
-
* Deepgram API Specification
|
|
5154
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
5155
|
-
|
|
5156
|
-
* OpenAPI spec version: 1.0.0
|
|
5157
|
-
*/
|
|
5158
|
-
type ListenV1KeywordsParameter = string | string[];
|
|
5159
|
-
|
|
5160
|
-
/**
|
|
5161
|
-
* Generated by orval v7.9.0 🍺
|
|
5162
|
-
* Do not edit manually.
|
|
5163
|
-
* Deepgram API Specification
|
|
5164
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
5165
|
-
|
|
5166
|
-
* OpenAPI spec version: 1.0.0
|
|
5167
|
-
*/
|
|
5168
|
-
/**
|
|
5169
|
-
* The [BCP-47 language tag](https://tools.ietf.org/html/bcp47) that hints at the primary spoken language. Depending on the Model and API endpoint you choose only certain languages are available
|
|
5170
|
-
*/
|
|
5171
|
-
type ListenV1LanguageParameter = string;
|
|
5172
|
-
|
|
5173
|
-
/**
|
|
5174
|
-
* Generated by orval v7.9.0 🍺
|
|
5175
|
-
* Do not edit manually.
|
|
5176
|
-
* Deepgram API Specification
|
|
5177
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
5178
|
-
|
|
5179
|
-
* OpenAPI spec version: 1.0.0
|
|
5180
|
-
*/
|
|
5181
|
-
/**
|
|
5182
|
-
* Spoken measurements will be converted to their corresponding abbreviations
|
|
5183
|
-
*/
|
|
5184
|
-
type ListenV1MeasurementsParameter = boolean;
|
|
5185
|
-
|
|
5186
|
-
/**
|
|
5187
|
-
* Generated by orval v7.9.0 🍺
|
|
5188
|
-
* Do not edit manually.
|
|
5189
|
-
* Deepgram API Specification
|
|
5190
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
5191
|
-
|
|
5192
|
-
* OpenAPI spec version: 1.0.0
|
|
5193
|
-
*/
|
|
5194
|
-
type ListenV1ModelParameter = "nova-3" | "nova-3-general" | "nova-3-medical" | "nova-2" | "nova-2-general" | "nova-2-meeting" | "nova-2-finance" | "nova-2-conversationalai" | "nova-2-voicemail" | "nova-2-video" | "nova-2-medical" | "nova-2-drivethru" | "nova-2-automotive" | "nova" | "nova-general" | "nova-phonecall" | "nova-medical" | "enhanced" | "enhanced-general" | "enhanced-meeting" | "enhanced-phonecall" | "enhanced-finance" | "base" | "meeting" | "phonecall" | "finance" | "conversationalai" | "voicemail" | "video" | string;
|
|
5195
|
-
|
|
5196
|
-
/**
|
|
5197
|
-
* Generated by orval v7.9.0 🍺
|
|
5198
|
-
* Do not edit manually.
|
|
5199
|
-
* Deepgram API Specification
|
|
5200
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
5201
|
-
|
|
5202
|
-
* OpenAPI spec version: 1.0.0
|
|
5203
|
-
*/
|
|
5204
|
-
/**
|
|
5205
|
-
* Transcribe each audio channel independently
|
|
5206
|
-
*/
|
|
5207
|
-
type ListenV1MultichannelParameter = boolean;
|
|
5208
|
-
|
|
5209
|
-
/**
|
|
5210
|
-
* Generated by orval v7.9.0 🍺
|
|
5211
|
-
* Do not edit manually.
|
|
5212
|
-
* Deepgram API Specification
|
|
5213
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
5214
|
-
|
|
5215
|
-
* OpenAPI spec version: 1.0.0
|
|
5216
|
-
*/
|
|
5217
|
-
/**
|
|
5218
|
-
* Numerals converts numbers from written format to numerical format
|
|
5219
|
-
*/
|
|
5220
|
-
type ListenV1NumeralsParameter = boolean;
|
|
5221
|
-
|
|
5222
|
-
/**
|
|
5223
|
-
* Generated by orval v7.9.0 🍺
|
|
5224
|
-
* Do not edit manually.
|
|
5225
|
-
* Deepgram API Specification
|
|
5226
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
5227
|
-
|
|
5228
|
-
* OpenAPI spec version: 1.0.0
|
|
5229
|
-
*/
|
|
5230
|
-
/**
|
|
5231
|
-
* Splits audio into paragraphs to improve transcript readability
|
|
5232
|
-
*/
|
|
5233
|
-
type ListenV1ParagraphsParameter = boolean;
|
|
5234
|
-
|
|
5235
|
-
/**
|
|
5236
|
-
* Generated by orval v7.9.0 🍺
|
|
5237
|
-
* Do not edit manually.
|
|
5238
|
-
* Deepgram API Specification
|
|
5239
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
5240
|
-
|
|
5051
|
+
* Deepgram API
|
|
5052
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
5241
5053
|
* OpenAPI spec version: 1.0.0
|
|
5242
5054
|
*/
|
|
5243
|
-
/**
|
|
5244
|
-
* Profanity Filter looks for recognized profanity and converts it to the nearest recognized non-profane word or removes it from the transcript completely
|
|
5245
|
-
*/
|
|
5246
|
-
type ListenV1ProfanityFilterParameter = boolean;
|
|
5247
|
-
|
|
5248
|
-
/**
|
|
5249
|
-
* Generated by orval v7.9.0 🍺
|
|
5250
|
-
* Do not edit manually.
|
|
5251
|
-
* Deepgram API Specification
|
|
5252
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
5253
5055
|
|
|
5254
|
-
|
|
5255
|
-
*/
|
|
5256
|
-
/**
|
|
5257
|
-
* Add punctuation and capitalization to the transcript
|
|
5258
|
-
*/
|
|
5259
|
-
type ListenV1PunctuateParameter = boolean;
|
|
5056
|
+
type V1ListenPostParametersVersion = V1ListenPostParametersVersion0 | string;
|
|
5260
5057
|
|
|
5261
5058
|
/**
|
|
5262
5059
|
* Generated by orval v7.9.0 🍺
|
|
5263
5060
|
* Do not edit manually.
|
|
5264
|
-
* Deepgram API
|
|
5265
|
-
*
|
|
5266
|
-
|
|
5061
|
+
* Deepgram API
|
|
5062
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
5267
5063
|
* OpenAPI spec version: 1.0.0
|
|
5268
5064
|
*/
|
|
5269
|
-
type ListenV1RedactParameterOneOfItem = (typeof ListenV1RedactParameterOneOfItem)[keyof typeof ListenV1RedactParameterOneOfItem];
|
|
5270
|
-
declare const ListenV1RedactParameterOneOfItem: {
|
|
5271
|
-
readonly pci: "pci";
|
|
5272
|
-
readonly pii: "pii";
|
|
5273
|
-
readonly numbers: "numbers";
|
|
5274
|
-
};
|
|
5275
|
-
|
|
5276
|
-
/**
|
|
5277
|
-
* Generated by orval v7.9.0 🍺
|
|
5278
|
-
* Do not edit manually.
|
|
5279
|
-
* Deepgram API Specification
|
|
5280
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
5281
5065
|
|
|
5282
|
-
|
|
5283
|
-
*/
|
|
5284
|
-
|
|
5285
|
-
type ListenV1RedactParameter = string | ListenV1RedactParameterOneOfItem[];
|
|
5286
|
-
|
|
5287
|
-
/**
|
|
5288
|
-
* Generated by orval v7.9.0 🍺
|
|
5289
|
-
* Do not edit manually.
|
|
5290
|
-
* Deepgram API Specification
|
|
5291
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
5292
|
-
|
|
5293
|
-
* OpenAPI spec version: 1.0.0
|
|
5294
|
-
*/
|
|
5295
|
-
type ListenV1ReplaceParameter = string | string[];
|
|
5296
|
-
|
|
5297
|
-
/**
|
|
5298
|
-
* Generated by orval v7.9.0 🍺
|
|
5299
|
-
* Do not edit manually.
|
|
5300
|
-
* Deepgram API Specification
|
|
5301
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
5302
|
-
|
|
5303
|
-
* OpenAPI spec version: 1.0.0
|
|
5304
|
-
*/
|
|
5305
|
-
type ListenV1SearchParameter = string | string[];
|
|
5306
|
-
|
|
5307
|
-
/**
|
|
5308
|
-
* Generated by orval v7.9.0 🍺
|
|
5309
|
-
* Do not edit manually.
|
|
5310
|
-
* Deepgram API Specification
|
|
5311
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
5312
|
-
|
|
5313
|
-
* OpenAPI spec version: 1.0.0
|
|
5314
|
-
*/
|
|
5315
|
-
/**
|
|
5316
|
-
* Apply formatting to transcript output. When set to true, additional formatting will be applied to transcripts to improve readability
|
|
5317
|
-
*/
|
|
5318
|
-
type ListenV1SmartFormatParameter = boolean;
|
|
5319
|
-
|
|
5320
|
-
/**
|
|
5321
|
-
* Generated by orval v7.9.0 🍺
|
|
5322
|
-
* Do not edit manually.
|
|
5323
|
-
* Deepgram API Specification
|
|
5324
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
5325
|
-
|
|
5326
|
-
* OpenAPI spec version: 1.0.0
|
|
5327
|
-
*/
|
|
5328
|
-
/**
|
|
5329
|
-
* Segments speech into meaningful semantic units
|
|
5330
|
-
*/
|
|
5331
|
-
type ListenV1UtterancesParameter = boolean;
|
|
5332
|
-
|
|
5333
|
-
/**
|
|
5334
|
-
* Generated by orval v7.9.0 🍺
|
|
5335
|
-
* Do not edit manually.
|
|
5336
|
-
* Deepgram API Specification
|
|
5337
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
5338
|
-
|
|
5339
|
-
* OpenAPI spec version: 1.0.0
|
|
5340
|
-
*/
|
|
5341
|
-
/**
|
|
5342
|
-
* Seconds to wait before detecting a pause between words in submitted audio
|
|
5343
|
-
*/
|
|
5344
|
-
type ListenV1UttSplitParameter = number;
|
|
5345
|
-
|
|
5346
|
-
/**
|
|
5347
|
-
* Generated by orval v7.9.0 🍺
|
|
5348
|
-
* Do not edit manually.
|
|
5349
|
-
* Deepgram API Specification
|
|
5350
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
5351
|
-
|
|
5352
|
-
* OpenAPI spec version: 1.0.0
|
|
5353
|
-
*/
|
|
5354
|
-
type ListenV1VersionParameter = "latest" | string;
|
|
5355
|
-
|
|
5356
|
-
/**
|
|
5357
|
-
* Generated by orval v7.9.0 🍺
|
|
5358
|
-
* Do not edit manually.
|
|
5359
|
-
* Deepgram API Specification
|
|
5360
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
5361
|
-
|
|
5362
|
-
* OpenAPI spec version: 1.0.0
|
|
5363
|
-
*/
|
|
5364
|
-
/**
|
|
5365
|
-
* Opts out requests from the Deepgram Model Improvement Program. Refer to our Docs for pricing impacts before setting this to true. https://dpgr.am/deepgram-mip
|
|
5366
|
-
*/
|
|
5367
|
-
type SharedMipOptOutParameter = boolean;
|
|
5368
|
-
|
|
5369
|
-
/**
|
|
5370
|
-
* Generated by orval v7.9.0 🍺
|
|
5371
|
-
* Do not edit manually.
|
|
5372
|
-
* Deepgram API Specification
|
|
5373
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
5374
|
-
|
|
5375
|
-
* OpenAPI spec version: 1.0.0
|
|
5376
|
-
*/
|
|
5377
|
-
|
|
5378
|
-
type ListenV1MediaTranscribeParams = {
|
|
5066
|
+
type ListenTranscribeParams = {
|
|
5379
5067
|
/**
|
|
5380
5068
|
* URL to which we'll make the callback request
|
|
5381
5069
|
*/
|
|
5382
|
-
callback?:
|
|
5070
|
+
callback?: string;
|
|
5383
5071
|
/**
|
|
5384
5072
|
* HTTP method by which the callback request will be made
|
|
5385
5073
|
*/
|
|
5386
|
-
callback_method?:
|
|
5074
|
+
callback_method?: V1ListenPostParametersCallbackMethod;
|
|
5387
5075
|
/**
|
|
5388
5076
|
* Arbitrary key-value pairs that are attached to the API response for usage in downstream processing
|
|
5389
5077
|
*/
|
|
5390
|
-
extra?:
|
|
5078
|
+
extra?: V1ListenPostParametersExtra;
|
|
5391
5079
|
/**
|
|
5392
5080
|
* Recognizes the sentiment throughout a transcript or text
|
|
5393
5081
|
*/
|
|
5394
|
-
sentiment?:
|
|
5082
|
+
sentiment?: boolean;
|
|
5395
5083
|
/**
|
|
5396
5084
|
* Summarize content. For Listen API, supports string version option. For Read API, accepts boolean only.
|
|
5397
5085
|
*/
|
|
5398
|
-
summarize?:
|
|
5086
|
+
summarize?: V1ListenPostParametersSummarize;
|
|
5399
5087
|
/**
|
|
5400
5088
|
* Label your requests for the purpose of identification during usage reporting
|
|
5401
5089
|
*/
|
|
5402
|
-
tag?:
|
|
5090
|
+
tag?: V1ListenPostParametersTag;
|
|
5403
5091
|
/**
|
|
5404
5092
|
* Detect topics throughout a transcript or text
|
|
5405
5093
|
*/
|
|
5406
|
-
topics?:
|
|
5094
|
+
topics?: boolean;
|
|
5407
5095
|
/**
|
|
5408
5096
|
* Custom topics you want the model to detect within your input audio or text if present Submit up to `100`.
|
|
5409
5097
|
*/
|
|
5410
|
-
custom_topic?:
|
|
5098
|
+
custom_topic?: V1ListenPostParametersCustomTopic;
|
|
5411
5099
|
/**
|
|
5412
5100
|
* Sets how the model will interpret strings submitted to the `custom_topic` param. When `strict`, the model will only return topics submitted using the `custom_topic` param. When `extended`, the model will return its own detected topics in addition to those submitted using the `custom_topic` param
|
|
5413
5101
|
*/
|
|
5414
|
-
custom_topic_mode?:
|
|
5102
|
+
custom_topic_mode?: V1ListenPostParametersCustomTopicMode;
|
|
5415
5103
|
/**
|
|
5416
5104
|
* Recognizes speaker intent throughout a transcript or text
|
|
5417
5105
|
*/
|
|
5418
|
-
intents?:
|
|
5106
|
+
intents?: boolean;
|
|
5419
5107
|
/**
|
|
5420
5108
|
* Custom intents you want the model to detect within your input audio if present
|
|
5421
5109
|
*/
|
|
5422
|
-
custom_intent?:
|
|
5110
|
+
custom_intent?: V1ListenPostParametersCustomIntent;
|
|
5423
5111
|
/**
|
|
5424
5112
|
* Sets how the model will interpret intents submitted to the `custom_intent` param. When `strict`, the model will only return intents submitted using the `custom_intent` param. When `extended`, the model will return its own detected intents in the `custom_intent` param.
|
|
5425
5113
|
*/
|
|
5426
|
-
custom_intent_mode?:
|
|
5114
|
+
custom_intent_mode?: V1ListenPostParametersCustomIntentMode;
|
|
5427
5115
|
/**
|
|
5428
5116
|
* Identifies and extracts key entities from content in submitted audio
|
|
5429
5117
|
*/
|
|
5430
|
-
detect_entities?:
|
|
5118
|
+
detect_entities?: boolean;
|
|
5431
5119
|
/**
|
|
5432
5120
|
* Identifies the dominant language spoken in submitted audio
|
|
5433
5121
|
*/
|
|
5434
|
-
detect_language?:
|
|
5122
|
+
detect_language?: V1ListenPostParametersDetectLanguage;
|
|
5435
5123
|
/**
|
|
5436
5124
|
* Recognize speaker changes. Each word in the transcript will be assigned a speaker number starting at 0
|
|
5437
5125
|
*/
|
|
5438
|
-
diarize?:
|
|
5126
|
+
diarize?: boolean;
|
|
5439
5127
|
/**
|
|
5440
5128
|
* Dictation mode for controlling formatting with dictated speech
|
|
5441
5129
|
*/
|
|
5442
|
-
dictation?:
|
|
5130
|
+
dictation?: boolean;
|
|
5443
5131
|
/**
|
|
5444
5132
|
* Specify the expected encoding of your submitted audio
|
|
5445
5133
|
*/
|
|
5446
|
-
encoding?:
|
|
5134
|
+
encoding?: V1ListenPostParametersEncoding;
|
|
5447
5135
|
/**
|
|
5448
5136
|
* Filler Words can help transcribe interruptions in your audio, like "uh" and "um"
|
|
5449
5137
|
*/
|
|
5450
|
-
filler_words?:
|
|
5138
|
+
filler_words?: boolean;
|
|
5451
5139
|
/**
|
|
5452
5140
|
* Key term prompting can boost or suppress specialized terminology and brands. Only compatible with Nova-3
|
|
5453
5141
|
*/
|
|
5454
|
-
keyterm?:
|
|
5142
|
+
keyterm?: string[];
|
|
5455
5143
|
/**
|
|
5456
5144
|
* Keywords can boost or suppress specialized terminology and brands
|
|
5457
5145
|
*/
|
|
5458
|
-
keywords?:
|
|
5146
|
+
keywords?: V1ListenPostParametersKeywords;
|
|
5459
5147
|
/**
|
|
5460
5148
|
* The [BCP-47 language tag](https://tools.ietf.org/html/bcp47) that hints at the primary spoken language. Depending on the Model and API endpoint you choose only certain languages are available
|
|
5461
5149
|
*/
|
|
5462
|
-
language?:
|
|
5150
|
+
language?: string;
|
|
5463
5151
|
/**
|
|
5464
5152
|
* Spoken measurements will be converted to their corresponding abbreviations
|
|
5465
5153
|
*/
|
|
5466
|
-
measurements?:
|
|
5154
|
+
measurements?: boolean;
|
|
5467
5155
|
/**
|
|
5468
5156
|
* AI model used to process submitted audio
|
|
5469
5157
|
*/
|
|
5470
|
-
model?:
|
|
5158
|
+
model?: V1ListenPostParametersModel;
|
|
5471
5159
|
/**
|
|
5472
5160
|
* Transcribe each audio channel independently
|
|
5473
5161
|
*/
|
|
5474
|
-
multichannel?:
|
|
5162
|
+
multichannel?: boolean;
|
|
5475
5163
|
/**
|
|
5476
5164
|
* Numerals converts numbers from written format to numerical format
|
|
5477
5165
|
*/
|
|
5478
|
-
numerals?:
|
|
5166
|
+
numerals?: boolean;
|
|
5479
5167
|
/**
|
|
5480
5168
|
* Splits audio into paragraphs to improve transcript readability
|
|
5481
5169
|
*/
|
|
5482
|
-
paragraphs?:
|
|
5170
|
+
paragraphs?: boolean;
|
|
5483
5171
|
/**
|
|
5484
5172
|
* Profanity Filter looks for recognized profanity and converts it to the nearest recognized non-profane word or removes it from the transcript completely
|
|
5485
5173
|
*/
|
|
5486
|
-
profanity_filter?:
|
|
5174
|
+
profanity_filter?: boolean;
|
|
5487
5175
|
/**
|
|
5488
5176
|
* Add punctuation and capitalization to the transcript
|
|
5489
5177
|
*/
|
|
5490
|
-
punctuate?:
|
|
5178
|
+
punctuate?: boolean;
|
|
5491
5179
|
/**
|
|
5492
5180
|
* Redaction removes sensitive information from your transcripts
|
|
5493
5181
|
*/
|
|
5494
|
-
redact?:
|
|
5182
|
+
redact?: V1ListenPostParametersRedact;
|
|
5495
5183
|
/**
|
|
5496
5184
|
* Search for terms or phrases in submitted audio and replaces them
|
|
5497
5185
|
*/
|
|
5498
|
-
replace?:
|
|
5186
|
+
replace?: V1ListenPostParametersReplace;
|
|
5499
5187
|
/**
|
|
5500
5188
|
* Search for terms or phrases in submitted audio
|
|
5501
5189
|
*/
|
|
5502
|
-
search?:
|
|
5190
|
+
search?: V1ListenPostParametersSearch;
|
|
5503
5191
|
/**
|
|
5504
5192
|
* Apply formatting to transcript output. When set to true, additional formatting will be applied to transcripts to improve readability
|
|
5505
5193
|
*/
|
|
5506
|
-
smart_format?:
|
|
5194
|
+
smart_format?: boolean;
|
|
5507
5195
|
/**
|
|
5508
5196
|
* Segments speech into meaningful semantic units
|
|
5509
5197
|
*/
|
|
5510
|
-
utterances?:
|
|
5198
|
+
utterances?: boolean;
|
|
5511
5199
|
/**
|
|
5512
5200
|
* Seconds to wait before detecting a pause between words in submitted audio
|
|
5513
5201
|
*/
|
|
5514
|
-
utt_split?:
|
|
5202
|
+
utt_split?: number;
|
|
5515
5203
|
/**
|
|
5516
5204
|
* Version of an AI model to use
|
|
5517
5205
|
*/
|
|
5518
|
-
version?:
|
|
5206
|
+
version?: V1ListenPostParametersVersion;
|
|
5519
5207
|
/**
|
|
5520
5208
|
* Opts out requests from the Deepgram Model Improvement Program. Refer to our Docs for pricing impacts before setting this to true. https://dpgr.am/deepgram-mip
|
|
5521
5209
|
*/
|
|
5522
|
-
mip_opt_out?:
|
|
5210
|
+
mip_opt_out?: boolean;
|
|
5523
5211
|
};
|
|
5524
5212
|
|
|
5525
5213
|
/**
|
|
@@ -5802,20 +5490,6 @@ type TranscriptParamsWebhookAuthHeaderName = string | null;
|
|
|
5802
5490
|
*/
|
|
5803
5491
|
type TranscriptParamsWebhookAuthHeaderValue = string | null;
|
|
5804
5492
|
|
|
5805
|
-
/**
|
|
5806
|
-
* Generated by orval v7.9.0 🍺
|
|
5807
|
-
* Do not edit manually.
|
|
5808
|
-
* AssemblyAI API
|
|
5809
|
-
* AssemblyAI Speech-to-Text API - Batch transcription endpoints. Filtered from the official AssemblyAI docs spec.
|
|
5810
|
-
* OpenAPI spec version: 1.0.0
|
|
5811
|
-
*/
|
|
5812
|
-
|
|
5813
|
-
/**
|
|
5814
|
-
* This parameter has been replaced with the `speech_models` parameter, learn more about the `speech_models` parameter [here](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model).
|
|
5815
|
-
|
|
5816
|
-
*/
|
|
5817
|
-
type TranscriptParamsSpeechModel = SpeechModel | null;
|
|
5818
|
-
|
|
5819
5493
|
/**
|
|
5820
5494
|
* Generated by orval v7.9.0 🍺
|
|
5821
5495
|
* Do not edit manually.
|
|
@@ -5933,11 +5607,6 @@ interface TranscriptParams {
|
|
|
5933
5607
|
Note: This parameter is only supported for the Universal-3 Pro model.
|
|
5934
5608
|
*/
|
|
5935
5609
|
remove_audio_tags?: TranscriptParamsRemoveAudioTags;
|
|
5936
|
-
/** Control the amount of randomness injected into the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.
|
|
5937
|
-
|
|
5938
|
-
Note: This parameter can only be used with the Universal-3 Pro model.
|
|
5939
|
-
*/
|
|
5940
|
-
temperature?: number;
|
|
5941
5610
|
/** The header name to be sent with the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests */
|
|
5942
5611
|
webhook_auth_header_name?: TranscriptParamsWebhookAuthHeaderName;
|
|
5943
5612
|
/** The header value to send back with the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests for added security */
|
|
@@ -5947,9 +5616,6 @@ interface TranscriptParams {
|
|
|
5947
5616
|
webhook_url?: string;
|
|
5948
5617
|
/** This parameter does not currently have any functionality attached to it. */
|
|
5949
5618
|
custom_topics?: boolean;
|
|
5950
|
-
/** This parameter has been replaced with the `speech_models` parameter, learn more about the `speech_models` parameter [here](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model).
|
|
5951
|
-
*/
|
|
5952
|
-
speech_model?: TranscriptParamsSpeechModel;
|
|
5953
5619
|
/** This parameter does not currently have any functionality attached to it. */
|
|
5954
5620
|
topics?: string[];
|
|
5955
5621
|
/** The URL of the audio or video file to transcribe. */
|
|
@@ -5977,35 +5643,10 @@ type InitTranscriptionRequestCustomMetadata = {
|
|
|
5977
5643
|
*/
|
|
5978
5644
|
|
|
5979
5645
|
interface InitTranscriptionRequest {
|
|
5980
|
-
/**
|
|
5981
|
-
* **[Deprecated]** Context to feed the transcription model with for possible better accuracy
|
|
5982
|
-
* @deprecated
|
|
5983
|
-
*/
|
|
5984
|
-
context_prompt?: string;
|
|
5985
5646
|
/** **[Beta]** Can be either boolean to enable custom_vocabulary for this audio or an array with specific vocabulary list to feed the transcription model with */
|
|
5986
5647
|
custom_vocabulary?: boolean;
|
|
5987
5648
|
/** **[Beta]** Custom vocabulary configuration, if `custom_vocabulary` is enabled */
|
|
5988
5649
|
custom_vocabulary_config?: CustomVocabularyConfigDTO;
|
|
5989
|
-
/**
|
|
5990
|
-
* **[Deprecated]** Use `language_config` instead. Detect the language from the given audio
|
|
5991
|
-
* @deprecated
|
|
5992
|
-
*/
|
|
5993
|
-
detect_language?: boolean;
|
|
5994
|
-
/**
|
|
5995
|
-
* **[Deprecated]** Use `language_config` instead.Detect multiple languages in the given audio
|
|
5996
|
-
* @deprecated
|
|
5997
|
-
*/
|
|
5998
|
-
enable_code_switching?: boolean;
|
|
5999
|
-
/**
|
|
6000
|
-
* **[Deprecated]** Use `language_config` instead. Specify the configuration for code switching
|
|
6001
|
-
* @deprecated
|
|
6002
|
-
*/
|
|
6003
|
-
code_switching_config?: CodeSwitchingConfigDTO;
|
|
6004
|
-
/**
|
|
6005
|
-
* **[Deprecated]** Use `language_config` instead. Set the spoken language for the given audio (ISO 639 standard)
|
|
6006
|
-
* @deprecated
|
|
6007
|
-
*/
|
|
6008
|
-
language?: TranscriptionLanguageCodeEnum;
|
|
6009
5650
|
/**
|
|
6010
5651
|
* **[Deprecated]** Use `callback`/`callback_config` instead. Callback URL we will do a `POST` request to with the result of the transcription
|
|
6011
5652
|
* @deprecated
|
|
@@ -6031,34 +5672,26 @@ interface InitTranscriptionRequest {
|
|
|
6031
5672
|
summarization?: boolean;
|
|
6032
5673
|
/** **[Beta]** Summarization configuration, if `summarization` is enabled */
|
|
6033
5674
|
summarization_config?: SummarizationConfigDTO;
|
|
6034
|
-
/** **[Alpha]** Enable moderation for this audio */
|
|
6035
|
-
moderation?: boolean;
|
|
6036
5675
|
/** **[Alpha]** Enable named entity recognition for this audio */
|
|
6037
5676
|
named_entity_recognition?: boolean;
|
|
6038
|
-
/** **[Alpha]** Enable chapterization for this audio */
|
|
6039
|
-
chapterization?: boolean;
|
|
6040
|
-
/** **[Alpha]** Enable names consistency for this audio */
|
|
6041
|
-
name_consistency?: boolean;
|
|
6042
5677
|
/** **[Alpha]** Enable custom spelling for this audio */
|
|
6043
5678
|
custom_spelling?: boolean;
|
|
6044
5679
|
/** **[Alpha]** Custom spelling configuration, if `custom_spelling` is enabled */
|
|
6045
5680
|
custom_spelling_config?: CustomSpellingConfigDTO;
|
|
6046
|
-
/** **[Alpha]** Enable structured data extraction for this audio */
|
|
6047
|
-
structured_data_extraction?: boolean;
|
|
6048
|
-
/** **[Alpha]** Structured data extraction configuration, if `structured_data_extraction` is enabled */
|
|
6049
|
-
structured_data_extraction_config?: StructuredDataExtractionConfigDTO;
|
|
6050
5681
|
/** Enable sentiment analysis for this audio */
|
|
6051
5682
|
sentiment_analysis?: boolean;
|
|
6052
5683
|
/** **[Alpha]** Enable audio to llm processing for this audio */
|
|
6053
5684
|
audio_to_llm?: boolean;
|
|
6054
5685
|
/** **[Alpha]** Audio to llm configuration, if `audio_to_llm` is enabled */
|
|
6055
5686
|
audio_to_llm_config?: AudioToLlmListConfigDTO;
|
|
5687
|
+
/** Enable PII redaction for this audio */
|
|
5688
|
+
pii_redaction?: boolean;
|
|
5689
|
+
/** PII redaction configuration, if `pii_redaction` is enabled */
|
|
5690
|
+
pii_redaction_config?: PiiRedactionConfigDTO;
|
|
6056
5691
|
/** Custom metadata you can attach to this transcription */
|
|
6057
5692
|
custom_metadata?: InitTranscriptionRequestCustomMetadata;
|
|
6058
5693
|
/** Enable sentences for this audio */
|
|
6059
5694
|
sentences?: boolean;
|
|
6060
|
-
/** **[Alpha]** Allows to change the output display_mode for this audio. The output will be reordered, creating new utterances when speakers overlapped */
|
|
6061
|
-
display_mode?: boolean;
|
|
6062
5695
|
/** **[Alpha]** Use enhanced punctuation for this audio */
|
|
6063
5696
|
punctuation_enhanced?: boolean;
|
|
6064
5697
|
/** Specify the language configuration */
|
|
@@ -6188,7 +5821,7 @@ interface VadConfig {
|
|
|
6188
5821
|
/**
|
|
6189
5822
|
* Controls how the audio is cut into chunks. When set to `"auto"`, the server first normalizes loudness and then uses voice activity detection (VAD) to choose boundaries. `server_vad` object can be provided to tweak VAD detection parameters manually. If unset, the audio is transcribed as a single block. Required when using `gpt-4o-transcribe-diarize` for inputs longer than 30 seconds.
|
|
6190
5823
|
*/
|
|
6191
|
-
type
|
|
5824
|
+
type CreateTranscriptionRequestChunkingStrategyAnyOf = "auto" | VadConfig;
|
|
6192
5825
|
|
|
6193
5826
|
/**
|
|
6194
5827
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6198,7 +5831,7 @@ type TranscriptionChunkingStrategyAnyOf = "auto" | VadConfig;
|
|
|
6198
5831
|
* OpenAPI spec version: 2.3.0
|
|
6199
5832
|
*/
|
|
6200
5833
|
|
|
6201
|
-
type
|
|
5834
|
+
type CreateTranscriptionRequestChunkingStrategy = CreateTranscriptionRequestChunkingStrategyAnyOf | null;
|
|
6202
5835
|
|
|
6203
5836
|
/**
|
|
6204
5837
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6218,7 +5851,7 @@ interface CreateTranscriptionRequest {
|
|
|
6218
5851
|
/** The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) format will improve accuracy and latency.
|
|
6219
5852
|
*/
|
|
6220
5853
|
language?: string;
|
|
6221
|
-
/** An optional text to guide the model's style or continue a previous audio segment. The [prompt](
|
|
5854
|
+
/** An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the audio language. This field is not supported when using `gpt-4o-transcribe-diarize`.
|
|
6222
5855
|
*/
|
|
6223
5856
|
prompt?: string;
|
|
6224
5857
|
response_format?: AudioResponseFormat;
|
|
@@ -6237,7 +5870,7 @@ interface CreateTranscriptionRequest {
|
|
|
6237
5870
|
*/
|
|
6238
5871
|
timestamp_granularities?: CreateTranscriptionRequestTimestampGranularitiesItem[];
|
|
6239
5872
|
stream?: CreateTranscriptionRequestStream;
|
|
6240
|
-
chunking_strategy?:
|
|
5873
|
+
chunking_strategy?: CreateTranscriptionRequestChunkingStrategy;
|
|
6241
5874
|
/**
|
|
6242
5875
|
* Optional list of speaker names that correspond to the audio samples provided in `known_speaker_references[]`. Each entry should be a short identifier (for example `customer` or `agent`). Up to 4 speakers are supported.
|
|
6243
5876
|
|
|
@@ -6259,9 +5892,13 @@ interface CreateTranscriptionRequest {
|
|
|
6259
5892
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6260
5893
|
* OpenAPI spec version: 1.0
|
|
6261
5894
|
*/
|
|
6262
|
-
|
|
6263
|
-
|
|
6264
|
-
|
|
5895
|
+
/**
|
|
5896
|
+
* The ID of the model to use for transcription.
|
|
5897
|
+
*/
|
|
5898
|
+
type BodySpeechToTextV1SpeechToTextPostModelId = (typeof BodySpeechToTextV1SpeechToTextPostModelId)[keyof typeof BodySpeechToTextV1SpeechToTextPostModelId];
|
|
5899
|
+
declare const BodySpeechToTextV1SpeechToTextPostModelId: {
|
|
5900
|
+
readonly scribe_v1: "scribe_v1";
|
|
5901
|
+
readonly scribe_v2: "scribe_v2";
|
|
6265
5902
|
};
|
|
6266
5903
|
|
|
6267
5904
|
/**
|
|
@@ -6271,7 +5908,10 @@ declare const DocxExportOptionsFormat: {
|
|
|
6271
5908
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6272
5909
|
* OpenAPI spec version: 1.0
|
|
6273
5910
|
*/
|
|
6274
|
-
|
|
5911
|
+
/**
|
|
5912
|
+
* The file to transcribe (100ms minimum audio length). All major audio and video formats are supported. Exactly one of the file or cloud_storage_url parameters must be provided. The file size must be less than 3.0GB.
|
|
5913
|
+
*/
|
|
5914
|
+
type BodySpeechToTextV1SpeechToTextPostFile = Blob | null;
|
|
6275
5915
|
|
|
6276
5916
|
/**
|
|
6277
5917
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6280,7 +5920,10 @@ type DocxExportOptionsMaxSegmentChars = number | null;
|
|
|
6280
5920
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6281
5921
|
* OpenAPI spec version: 1.0
|
|
6282
5922
|
*/
|
|
6283
|
-
|
|
5923
|
+
/**
|
|
5924
|
+
* An ISO-639-1 or ISO-639-3 language_code corresponding to the language of the audio file. Can sometimes improve transcription performance if known beforehand. Defaults to null, in this case the language is predicted automatically.
|
|
5925
|
+
*/
|
|
5926
|
+
type BodySpeechToTextV1SpeechToTextPostLanguageCode = string | null;
|
|
6284
5927
|
|
|
6285
5928
|
/**
|
|
6286
5929
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6289,7 +5932,10 @@ type DocxExportOptionsMaxSegmentDurationS = number | null;
|
|
|
6289
5932
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6290
5933
|
* OpenAPI spec version: 1.0
|
|
6291
5934
|
*/
|
|
6292
|
-
|
|
5935
|
+
/**
|
|
5936
|
+
* The maximum amount of speakers talking in the uploaded file. Can help with predicting who speaks when. The maximum amount of speakers that can be predicted is 32. Defaults to null, in this case the amount of speakers is set to the maximum value the model supports.
|
|
5937
|
+
*/
|
|
5938
|
+
type BodySpeechToTextV1SpeechToTextPostNumSpeakers = number | null;
|
|
6293
5939
|
|
|
6294
5940
|
/**
|
|
6295
5941
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6298,15 +5944,27 @@ type DocxExportOptionsSegmentOnSilenceLongerThanS = number | null;
|
|
|
6298
5944
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6299
5945
|
* OpenAPI spec version: 1.0
|
|
6300
5946
|
*/
|
|
5947
|
+
/**
|
|
5948
|
+
* The granularity of the timestamps in the transcription. 'word' provides word-level timestamps and 'character' provides character-level timestamps per word.
|
|
5949
|
+
*/
|
|
5950
|
+
type BodySpeechToTextV1SpeechToTextPostTimestampsGranularity = (typeof BodySpeechToTextV1SpeechToTextPostTimestampsGranularity)[keyof typeof BodySpeechToTextV1SpeechToTextPostTimestampsGranularity];
|
|
5951
|
+
declare const BodySpeechToTextV1SpeechToTextPostTimestampsGranularity: {
|
|
5952
|
+
readonly none: "none";
|
|
5953
|
+
readonly word: "word";
|
|
5954
|
+
readonly character: "character";
|
|
5955
|
+
};
|
|
6301
5956
|
|
|
6302
|
-
|
|
6303
|
-
|
|
6304
|
-
|
|
6305
|
-
|
|
6306
|
-
|
|
6307
|
-
|
|
6308
|
-
|
|
6309
|
-
|
|
5957
|
+
/**
|
|
5958
|
+
* Generated by orval v7.9.0 🍺
|
|
5959
|
+
* Do not edit manually.
|
|
5960
|
+
* ElevenLabs Speech-to-Text API
|
|
5961
|
+
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
5962
|
+
* OpenAPI spec version: 1.0
|
|
5963
|
+
*/
|
|
5964
|
+
/**
|
|
5965
|
+
* Diarization threshold to apply during speaker diarization. A higher value means there will be a lower chance of one speaker being diarized as two different speakers but also a higher chance of two different speakers being diarized as one speaker (less total speakers predicted). A low value means there will be a higher chance of one speaker being diarized as two different speakers but also a lower chance of two different speakers being diarized as one speaker (more total speakers predicted). Can only be set when diarize=True and num_speakers=None. Defaults to None, in which case we will choose a threshold based on the model_id (0.22 usually).
|
|
5966
|
+
*/
|
|
5967
|
+
type BodySpeechToTextV1SpeechToTextPostDiarizationThreshold = number | null;
|
|
6310
5968
|
|
|
6311
5969
|
/**
|
|
6312
5970
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6315,9 +5973,9 @@ interface DocxExportOptions {
|
|
|
6315
5973
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6316
5974
|
* OpenAPI spec version: 1.0
|
|
6317
5975
|
*/
|
|
6318
|
-
type
|
|
6319
|
-
declare const
|
|
6320
|
-
readonly
|
|
5976
|
+
type SegmentedJsonExportOptionsFormat = (typeof SegmentedJsonExportOptionsFormat)[keyof typeof SegmentedJsonExportOptionsFormat];
|
|
5977
|
+
declare const SegmentedJsonExportOptionsFormat: {
|
|
5978
|
+
readonly segmented_json: "segmented_json";
|
|
6321
5979
|
};
|
|
6322
5980
|
|
|
6323
5981
|
/**
|
|
@@ -6327,7 +5985,7 @@ declare const HtmlExportOptionsFormat: {
|
|
|
6327
5985
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6328
5986
|
* OpenAPI spec version: 1.0
|
|
6329
5987
|
*/
|
|
6330
|
-
type
|
|
5988
|
+
type SegmentedJsonExportOptionsSegmentOnSilenceLongerThanS = number | null;
|
|
6331
5989
|
|
|
6332
5990
|
/**
|
|
6333
5991
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6336,7 +5994,7 @@ type HtmlExportOptionsMaxSegmentChars = number | null;
|
|
|
6336
5994
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6337
5995
|
* OpenAPI spec version: 1.0
|
|
6338
5996
|
*/
|
|
6339
|
-
type
|
|
5997
|
+
type SegmentedJsonExportOptionsMaxSegmentDurationS = number | null;
|
|
6340
5998
|
|
|
6341
5999
|
/**
|
|
6342
6000
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6345,7 +6003,7 @@ type HtmlExportOptionsMaxSegmentDurationS = number | null;
|
|
|
6345
6003
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6346
6004
|
* OpenAPI spec version: 1.0
|
|
6347
6005
|
*/
|
|
6348
|
-
type
|
|
6006
|
+
type SegmentedJsonExportOptionsMaxSegmentChars = number | null;
|
|
6349
6007
|
|
|
6350
6008
|
/**
|
|
6351
6009
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6355,13 +6013,13 @@ type HtmlExportOptionsSegmentOnSilenceLongerThanS = number | null;
|
|
|
6355
6013
|
* OpenAPI spec version: 1.0
|
|
6356
6014
|
*/
|
|
6357
6015
|
|
|
6358
|
-
interface
|
|
6016
|
+
interface SegmentedJsonExportOptions {
|
|
6359
6017
|
include_speakers?: boolean;
|
|
6360
6018
|
include_timestamps?: boolean;
|
|
6361
|
-
format:
|
|
6362
|
-
segment_on_silence_longer_than_s?:
|
|
6363
|
-
max_segment_duration_s?:
|
|
6364
|
-
max_segment_chars?:
|
|
6019
|
+
format: SegmentedJsonExportOptionsFormat;
|
|
6020
|
+
segment_on_silence_longer_than_s?: SegmentedJsonExportOptionsSegmentOnSilenceLongerThanS;
|
|
6021
|
+
max_segment_duration_s?: SegmentedJsonExportOptionsMaxSegmentDurationS;
|
|
6022
|
+
max_segment_chars?: SegmentedJsonExportOptionsMaxSegmentChars;
|
|
6365
6023
|
}
|
|
6366
6024
|
|
|
6367
6025
|
/**
|
|
@@ -6371,9 +6029,9 @@ interface HtmlExportOptions {
|
|
|
6371
6029
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6372
6030
|
* OpenAPI spec version: 1.0
|
|
6373
6031
|
*/
|
|
6374
|
-
type
|
|
6375
|
-
declare const
|
|
6376
|
-
readonly
|
|
6032
|
+
type DocxExportOptionsFormat = (typeof DocxExportOptionsFormat)[keyof typeof DocxExportOptionsFormat];
|
|
6033
|
+
declare const DocxExportOptionsFormat: {
|
|
6034
|
+
readonly docx: "docx";
|
|
6377
6035
|
};
|
|
6378
6036
|
|
|
6379
6037
|
/**
|
|
@@ -6383,7 +6041,7 @@ declare const PdfExportOptionsFormat: {
|
|
|
6383
6041
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6384
6042
|
* OpenAPI spec version: 1.0
|
|
6385
6043
|
*/
|
|
6386
|
-
type
|
|
6044
|
+
type DocxExportOptionsSegmentOnSilenceLongerThanS = number | null;
|
|
6387
6045
|
|
|
6388
6046
|
/**
|
|
6389
6047
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6392,7 +6050,7 @@ type PdfExportOptionsMaxSegmentChars = number | null;
|
|
|
6392
6050
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6393
6051
|
* OpenAPI spec version: 1.0
|
|
6394
6052
|
*/
|
|
6395
|
-
type
|
|
6053
|
+
type DocxExportOptionsMaxSegmentDurationS = number | null;
|
|
6396
6054
|
|
|
6397
6055
|
/**
|
|
6398
6056
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6401,7 +6059,7 @@ type PdfExportOptionsMaxSegmentDurationS = number | null;
|
|
|
6401
6059
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6402
6060
|
* OpenAPI spec version: 1.0
|
|
6403
6061
|
*/
|
|
6404
|
-
type
|
|
6062
|
+
type DocxExportOptionsMaxSegmentChars = number | null;
|
|
6405
6063
|
|
|
6406
6064
|
/**
|
|
6407
6065
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6411,13 +6069,13 @@ type PdfExportOptionsSegmentOnSilenceLongerThanS = number | null;
|
|
|
6411
6069
|
* OpenAPI spec version: 1.0
|
|
6412
6070
|
*/
|
|
6413
6071
|
|
|
6414
|
-
interface
|
|
6072
|
+
interface DocxExportOptions {
|
|
6415
6073
|
include_speakers?: boolean;
|
|
6416
6074
|
include_timestamps?: boolean;
|
|
6417
|
-
format:
|
|
6418
|
-
segment_on_silence_longer_than_s?:
|
|
6419
|
-
max_segment_duration_s?:
|
|
6420
|
-
max_segment_chars?:
|
|
6075
|
+
format: DocxExportOptionsFormat;
|
|
6076
|
+
segment_on_silence_longer_than_s?: DocxExportOptionsSegmentOnSilenceLongerThanS;
|
|
6077
|
+
max_segment_duration_s?: DocxExportOptionsMaxSegmentDurationS;
|
|
6078
|
+
max_segment_chars?: DocxExportOptionsMaxSegmentChars;
|
|
6421
6079
|
}
|
|
6422
6080
|
|
|
6423
6081
|
/**
|
|
@@ -6427,9 +6085,9 @@ interface PdfExportOptions {
|
|
|
6427
6085
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6428
6086
|
* OpenAPI spec version: 1.0
|
|
6429
6087
|
*/
|
|
6430
|
-
type
|
|
6431
|
-
declare const
|
|
6432
|
-
readonly
|
|
6088
|
+
type PdfExportOptionsFormat = (typeof PdfExportOptionsFormat)[keyof typeof PdfExportOptionsFormat];
|
|
6089
|
+
declare const PdfExportOptionsFormat: {
|
|
6090
|
+
readonly pdf: "pdf";
|
|
6433
6091
|
};
|
|
6434
6092
|
|
|
6435
6093
|
/**
|
|
@@ -6439,7 +6097,7 @@ declare const SegmentedJsonExportOptionsFormat: {
|
|
|
6439
6097
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6440
6098
|
* OpenAPI spec version: 1.0
|
|
6441
6099
|
*/
|
|
6442
|
-
type
|
|
6100
|
+
type PdfExportOptionsSegmentOnSilenceLongerThanS = number | null;
|
|
6443
6101
|
|
|
6444
6102
|
/**
|
|
6445
6103
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6448,7 +6106,7 @@ type SegmentedJsonExportOptionsMaxSegmentChars = number | null;
|
|
|
6448
6106
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6449
6107
|
* OpenAPI spec version: 1.0
|
|
6450
6108
|
*/
|
|
6451
|
-
type
|
|
6109
|
+
type PdfExportOptionsMaxSegmentDurationS = number | null;
|
|
6452
6110
|
|
|
6453
6111
|
/**
|
|
6454
6112
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6457,7 +6115,7 @@ type SegmentedJsonExportOptionsMaxSegmentDurationS = number | null;
|
|
|
6457
6115
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6458
6116
|
* OpenAPI spec version: 1.0
|
|
6459
6117
|
*/
|
|
6460
|
-
type
|
|
6118
|
+
type PdfExportOptionsMaxSegmentChars = number | null;
|
|
6461
6119
|
|
|
6462
6120
|
/**
|
|
6463
6121
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6467,13 +6125,13 @@ type SegmentedJsonExportOptionsSegmentOnSilenceLongerThanS = number | null;
|
|
|
6467
6125
|
* OpenAPI spec version: 1.0
|
|
6468
6126
|
*/
|
|
6469
6127
|
|
|
6470
|
-
interface
|
|
6128
|
+
interface PdfExportOptions {
|
|
6471
6129
|
include_speakers?: boolean;
|
|
6472
6130
|
include_timestamps?: boolean;
|
|
6473
|
-
format:
|
|
6474
|
-
segment_on_silence_longer_than_s?:
|
|
6475
|
-
max_segment_duration_s?:
|
|
6476
|
-
max_segment_chars?:
|
|
6131
|
+
format: PdfExportOptionsFormat;
|
|
6132
|
+
segment_on_silence_longer_than_s?: PdfExportOptionsSegmentOnSilenceLongerThanS;
|
|
6133
|
+
max_segment_duration_s?: PdfExportOptionsMaxSegmentDurationS;
|
|
6134
|
+
max_segment_chars?: PdfExportOptionsMaxSegmentChars;
|
|
6477
6135
|
}
|
|
6478
6136
|
|
|
6479
6137
|
/**
|
|
@@ -6483,10 +6141,7 @@ interface SegmentedJsonExportOptions {
|
|
|
6483
6141
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6484
6142
|
* OpenAPI spec version: 1.0
|
|
6485
6143
|
*/
|
|
6486
|
-
type
|
|
6487
|
-
declare const SrtExportOptionsFormat: {
|
|
6488
|
-
readonly srt: "srt";
|
|
6489
|
-
};
|
|
6144
|
+
type TxtExportOptionsMaxCharactersPerLine = number | null;
|
|
6490
6145
|
|
|
6491
6146
|
/**
|
|
6492
6147
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6495,7 +6150,10 @@ declare const SrtExportOptionsFormat: {
|
|
|
6495
6150
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6496
6151
|
* OpenAPI spec version: 1.0
|
|
6497
6152
|
*/
|
|
6498
|
-
type
|
|
6153
|
+
type TxtExportOptionsFormat = (typeof TxtExportOptionsFormat)[keyof typeof TxtExportOptionsFormat];
|
|
6154
|
+
declare const TxtExportOptionsFormat: {
|
|
6155
|
+
readonly txt: "txt";
|
|
6156
|
+
};
|
|
6499
6157
|
|
|
6500
6158
|
/**
|
|
6501
6159
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6504,7 +6162,7 @@ type SrtExportOptionsMaxCharactersPerLine = number | null;
|
|
|
6504
6162
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6505
6163
|
* OpenAPI spec version: 1.0
|
|
6506
6164
|
*/
|
|
6507
|
-
type
|
|
6165
|
+
type TxtExportOptionsSegmentOnSilenceLongerThanS = number | null;
|
|
6508
6166
|
|
|
6509
6167
|
/**
|
|
6510
6168
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6513,7 +6171,7 @@ type SrtExportOptionsMaxSegmentChars = number | null;
|
|
|
6513
6171
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6514
6172
|
* OpenAPI spec version: 1.0
|
|
6515
6173
|
*/
|
|
6516
|
-
type
|
|
6174
|
+
type TxtExportOptionsMaxSegmentDurationS = number | null;
|
|
6517
6175
|
|
|
6518
6176
|
/**
|
|
6519
6177
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6522,7 +6180,7 @@ type SrtExportOptionsMaxSegmentDurationS = number | null;
|
|
|
6522
6180
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6523
6181
|
* OpenAPI spec version: 1.0
|
|
6524
6182
|
*/
|
|
6525
|
-
type
|
|
6183
|
+
type TxtExportOptionsMaxSegmentChars = number | null;
|
|
6526
6184
|
|
|
6527
6185
|
/**
|
|
6528
6186
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6532,14 +6190,14 @@ type SrtExportOptionsSegmentOnSilenceLongerThanS = number | null;
|
|
|
6532
6190
|
* OpenAPI spec version: 1.0
|
|
6533
6191
|
*/
|
|
6534
6192
|
|
|
6535
|
-
interface
|
|
6536
|
-
max_characters_per_line?:
|
|
6193
|
+
interface TxtExportOptions {
|
|
6194
|
+
max_characters_per_line?: TxtExportOptionsMaxCharactersPerLine;
|
|
6537
6195
|
include_speakers?: boolean;
|
|
6538
6196
|
include_timestamps?: boolean;
|
|
6539
|
-
format:
|
|
6540
|
-
segment_on_silence_longer_than_s?:
|
|
6541
|
-
max_segment_duration_s?:
|
|
6542
|
-
max_segment_chars?:
|
|
6197
|
+
format: TxtExportOptionsFormat;
|
|
6198
|
+
segment_on_silence_longer_than_s?: TxtExportOptionsSegmentOnSilenceLongerThanS;
|
|
6199
|
+
max_segment_duration_s?: TxtExportOptionsMaxSegmentDurationS;
|
|
6200
|
+
max_segment_chars?: TxtExportOptionsMaxSegmentChars;
|
|
6543
6201
|
}
|
|
6544
6202
|
|
|
6545
6203
|
/**
|
|
@@ -6549,9 +6207,9 @@ interface SrtExportOptions {
|
|
|
6549
6207
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6550
6208
|
* OpenAPI spec version: 1.0
|
|
6551
6209
|
*/
|
|
6552
|
-
type
|
|
6553
|
-
declare const
|
|
6554
|
-
readonly
|
|
6210
|
+
type HtmlExportOptionsFormat = (typeof HtmlExportOptionsFormat)[keyof typeof HtmlExportOptionsFormat];
|
|
6211
|
+
declare const HtmlExportOptionsFormat: {
|
|
6212
|
+
readonly html: "html";
|
|
6555
6213
|
};
|
|
6556
6214
|
|
|
6557
6215
|
/**
|
|
@@ -6561,7 +6219,7 @@ declare const TxtExportOptionsFormat: {
|
|
|
6561
6219
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6562
6220
|
* OpenAPI spec version: 1.0
|
|
6563
6221
|
*/
|
|
6564
|
-
type
|
|
6222
|
+
type HtmlExportOptionsSegmentOnSilenceLongerThanS = number | null;
|
|
6565
6223
|
|
|
6566
6224
|
/**
|
|
6567
6225
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6570,7 +6228,7 @@ type TxtExportOptionsMaxCharactersPerLine = number | null;
|
|
|
6570
6228
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6571
6229
|
* OpenAPI spec version: 1.0
|
|
6572
6230
|
*/
|
|
6573
|
-
type
|
|
6231
|
+
type HtmlExportOptionsMaxSegmentDurationS = number | null;
|
|
6574
6232
|
|
|
6575
6233
|
/**
|
|
6576
6234
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6579,7 +6237,7 @@ type TxtExportOptionsMaxSegmentChars = number | null;
|
|
|
6579
6237
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6580
6238
|
* OpenAPI spec version: 1.0
|
|
6581
6239
|
*/
|
|
6582
|
-
type
|
|
6240
|
+
type HtmlExportOptionsMaxSegmentChars = number | null;
|
|
6583
6241
|
|
|
6584
6242
|
/**
|
|
6585
6243
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6588,7 +6246,15 @@ type TxtExportOptionsMaxSegmentDurationS = number | null;
|
|
|
6588
6246
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6589
6247
|
* OpenAPI spec version: 1.0
|
|
6590
6248
|
*/
|
|
6591
|
-
|
|
6249
|
+
|
|
6250
|
+
interface HtmlExportOptions {
|
|
6251
|
+
include_speakers?: boolean;
|
|
6252
|
+
include_timestamps?: boolean;
|
|
6253
|
+
format: HtmlExportOptionsFormat;
|
|
6254
|
+
segment_on_silence_longer_than_s?: HtmlExportOptionsSegmentOnSilenceLongerThanS;
|
|
6255
|
+
max_segment_duration_s?: HtmlExportOptionsMaxSegmentDurationS;
|
|
6256
|
+
max_segment_chars?: HtmlExportOptionsMaxSegmentChars;
|
|
6257
|
+
}
|
|
6592
6258
|
|
|
6593
6259
|
/**
|
|
6594
6260
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6597,16 +6263,7 @@ type TxtExportOptionsSegmentOnSilenceLongerThanS = number | null;
|
|
|
6597
6263
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6598
6264
|
* OpenAPI spec version: 1.0
|
|
6599
6265
|
*/
|
|
6600
|
-
|
|
6601
|
-
interface TxtExportOptions {
|
|
6602
|
-
max_characters_per_line?: TxtExportOptionsMaxCharactersPerLine;
|
|
6603
|
-
include_speakers?: boolean;
|
|
6604
|
-
include_timestamps?: boolean;
|
|
6605
|
-
format: TxtExportOptionsFormat;
|
|
6606
|
-
segment_on_silence_longer_than_s?: TxtExportOptionsSegmentOnSilenceLongerThanS;
|
|
6607
|
-
max_segment_duration_s?: TxtExportOptionsMaxSegmentDurationS;
|
|
6608
|
-
max_segment_chars?: TxtExportOptionsMaxSegmentChars;
|
|
6609
|
-
}
|
|
6266
|
+
type SrtExportOptionsMaxCharactersPerLine = number | null;
|
|
6610
6267
|
|
|
6611
6268
|
/**
|
|
6612
6269
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6615,8 +6272,10 @@ interface TxtExportOptions {
|
|
|
6615
6272
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6616
6273
|
* OpenAPI spec version: 1.0
|
|
6617
6274
|
*/
|
|
6618
|
-
|
|
6619
|
-
|
|
6275
|
+
type SrtExportOptionsFormat = (typeof SrtExportOptionsFormat)[keyof typeof SrtExportOptionsFormat];
|
|
6276
|
+
declare const SrtExportOptionsFormat: {
|
|
6277
|
+
readonly srt: "srt";
|
|
6278
|
+
};
|
|
6620
6279
|
|
|
6621
6280
|
/**
|
|
6622
6281
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6625,11 +6284,16 @@ type ExportOptions = SegmentedJsonExportOptions | DocxExportOptions | PdfExportO
|
|
|
6625
6284
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6626
6285
|
* OpenAPI spec version: 1.0
|
|
6627
6286
|
*/
|
|
6287
|
+
type SrtExportOptionsSegmentOnSilenceLongerThanS = number | null;
|
|
6628
6288
|
|
|
6629
6289
|
/**
|
|
6630
|
-
*
|
|
6290
|
+
* Generated by orval v7.9.0 🍺
|
|
6291
|
+
* Do not edit manually.
|
|
6292
|
+
* ElevenLabs Speech-to-Text API
|
|
6293
|
+
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6294
|
+
* OpenAPI spec version: 1.0
|
|
6631
6295
|
*/
|
|
6632
|
-
type
|
|
6296
|
+
type SrtExportOptionsMaxSegmentDurationS = number | null;
|
|
6633
6297
|
|
|
6634
6298
|
/**
|
|
6635
6299
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6638,10 +6302,7 @@ type AdditionalFormats = ExportOptions[];
|
|
|
6638
6302
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6639
6303
|
* OpenAPI spec version: 1.0
|
|
6640
6304
|
*/
|
|
6641
|
-
|
|
6642
|
-
* The HTTPS URL of the file to transcribe. Exactly one of the file or cloud_storage_url parameters must be provided. The file must be accessible via HTTPS and the file size must be less than 2GB. Any valid HTTPS URL is accepted, including URLs from cloud storage providers (AWS S3, Google Cloud Storage, Cloudflare R2, etc.), CDNs, or any other HTTPS source. URLs can be pre-signed or include authentication tokens in query parameters.
|
|
6643
|
-
*/
|
|
6644
|
-
type BodySpeechToTextV1SpeechToTextPostCloudStorageUrl = string | null;
|
|
6305
|
+
type SrtExportOptionsMaxSegmentChars = number | null;
|
|
6645
6306
|
|
|
6646
6307
|
/**
|
|
6647
6308
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6650,10 +6311,16 @@ type BodySpeechToTextV1SpeechToTextPostCloudStorageUrl = string | null;
|
|
|
6650
6311
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6651
6312
|
* OpenAPI spec version: 1.0
|
|
6652
6313
|
*/
|
|
6653
|
-
|
|
6654
|
-
|
|
6655
|
-
|
|
6656
|
-
|
|
6314
|
+
|
|
6315
|
+
interface SrtExportOptions {
|
|
6316
|
+
max_characters_per_line?: SrtExportOptionsMaxCharactersPerLine;
|
|
6317
|
+
include_speakers?: boolean;
|
|
6318
|
+
include_timestamps?: boolean;
|
|
6319
|
+
format: SrtExportOptionsFormat;
|
|
6320
|
+
segment_on_silence_longer_than_s?: SrtExportOptionsSegmentOnSilenceLongerThanS;
|
|
6321
|
+
max_segment_duration_s?: SrtExportOptionsMaxSegmentDurationS;
|
|
6322
|
+
max_segment_chars?: SrtExportOptionsMaxSegmentChars;
|
|
6323
|
+
}
|
|
6657
6324
|
|
|
6658
6325
|
/**
|
|
6659
6326
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6662,10 +6329,8 @@ type BodySpeechToTextV1SpeechToTextPostDiarizationThreshold = number | null;
|
|
|
6662
6329
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6663
6330
|
* OpenAPI spec version: 1.0
|
|
6664
6331
|
*/
|
|
6665
|
-
|
|
6666
|
-
|
|
6667
|
-
*/
|
|
6668
|
-
type BodySpeechToTextV1SpeechToTextPostEntityDetection = string | string[] | null;
|
|
6332
|
+
|
|
6333
|
+
type ExportOptions = SegmentedJsonExportOptions | DocxExportOptions | PdfExportOptions | TxtExportOptions | HtmlExportOptions | SrtExportOptions;
|
|
6669
6334
|
|
|
6670
6335
|
/**
|
|
6671
6336
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6674,10 +6339,11 @@ type BodySpeechToTextV1SpeechToTextPostEntityDetection = string | string[] | nul
|
|
|
6674
6339
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6675
6340
|
* OpenAPI spec version: 1.0
|
|
6676
6341
|
*/
|
|
6342
|
+
|
|
6677
6343
|
/**
|
|
6678
|
-
*
|
|
6344
|
+
* @maxItems 10
|
|
6679
6345
|
*/
|
|
6680
|
-
type
|
|
6346
|
+
type AdditionalFormats = ExportOptions[];
|
|
6681
6347
|
|
|
6682
6348
|
/**
|
|
6683
6349
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6703,9 +6369,10 @@ declare const BodySpeechToTextV1SpeechToTextPostFileFormat: {
|
|
|
6703
6369
|
* OpenAPI spec version: 1.0
|
|
6704
6370
|
*/
|
|
6705
6371
|
/**
|
|
6706
|
-
*
|
|
6372
|
+
* The HTTPS URL of the file to transcribe. Exactly one of the file or cloud_storage_url parameters must be provided. The file must be accessible via HTTPS and the file size must be less than 2GB. Any valid HTTPS URL is accepted, including URLs from cloud storage providers (AWS S3, Google Cloud Storage, Cloudflare R2, etc.), CDNs, or any other HTTPS source. URLs can be pre-signed or include authentication tokens in query parameters.
|
|
6373
|
+
* @deprecated
|
|
6707
6374
|
*/
|
|
6708
|
-
type
|
|
6375
|
+
type BodySpeechToTextV1SpeechToTextPostCloudStorageUrl = string | null;
|
|
6709
6376
|
|
|
6710
6377
|
/**
|
|
6711
6378
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6715,13 +6382,9 @@ type BodySpeechToTextV1SpeechToTextPostLanguageCode = string | null;
|
|
|
6715
6382
|
* OpenAPI spec version: 1.0
|
|
6716
6383
|
*/
|
|
6717
6384
|
/**
|
|
6718
|
-
* The
|
|
6385
|
+
* The URL of an audio or video file to transcribe. Supports hosted video or audio files, YouTube video URLs, TikTok video URLs, and other video hosting services.
|
|
6719
6386
|
*/
|
|
6720
|
-
type
|
|
6721
|
-
declare const BodySpeechToTextV1SpeechToTextPostModelId: {
|
|
6722
|
-
readonly scribe_v1: "scribe_v1";
|
|
6723
|
-
readonly scribe_v2: "scribe_v2";
|
|
6724
|
-
};
|
|
6387
|
+
type BodySpeechToTextV1SpeechToTextPostSourceUrl = string | null;
|
|
6725
6388
|
|
|
6726
6389
|
/**
|
|
6727
6390
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6731,9 +6394,9 @@ declare const BodySpeechToTextV1SpeechToTextPostModelId: {
|
|
|
6731
6394
|
* OpenAPI spec version: 1.0
|
|
6732
6395
|
*/
|
|
6733
6396
|
/**
|
|
6734
|
-
*
|
|
6397
|
+
* Optional specific webhook ID to send the transcription result to. Only valid when webhook is set to true. If not provided, transcription will be sent to all configured speech-to-text webhooks.
|
|
6735
6398
|
*/
|
|
6736
|
-
type
|
|
6399
|
+
type BodySpeechToTextV1SpeechToTextPostWebhookId = string | null;
|
|
6737
6400
|
|
|
6738
6401
|
/**
|
|
6739
6402
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6743,9 +6406,9 @@ type BodySpeechToTextV1SpeechToTextPostNumSpeakers = number | null;
|
|
|
6743
6406
|
* OpenAPI spec version: 1.0
|
|
6744
6407
|
*/
|
|
6745
6408
|
/**
|
|
6746
|
-
*
|
|
6409
|
+
* Controls the randomness of the transcription output. Accepts values between 0.0 and 2.0, where higher values result in more diverse and less deterministic results. If omitted, we will use a temperature based on the model you selected which is usually 0.
|
|
6747
6410
|
*/
|
|
6748
|
-
type
|
|
6411
|
+
type BodySpeechToTextV1SpeechToTextPostTemperature = number | null;
|
|
6749
6412
|
|
|
6750
6413
|
/**
|
|
6751
6414
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6755,9 +6418,9 @@ type BodySpeechToTextV1SpeechToTextPostSeed = number | null;
|
|
|
6755
6418
|
* OpenAPI spec version: 1.0
|
|
6756
6419
|
*/
|
|
6757
6420
|
/**
|
|
6758
|
-
*
|
|
6421
|
+
* If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be an integer between 0 and 2147483647.
|
|
6759
6422
|
*/
|
|
6760
|
-
type
|
|
6423
|
+
type BodySpeechToTextV1SpeechToTextPostSeed = number | null;
|
|
6761
6424
|
|
|
6762
6425
|
/**
|
|
6763
6426
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6766,14 +6429,8 @@ type BodySpeechToTextV1SpeechToTextPostTemperature = number | null;
|
|
|
6766
6429
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6767
6430
|
* OpenAPI spec version: 1.0
|
|
6768
6431
|
*/
|
|
6769
|
-
|
|
6770
|
-
|
|
6771
|
-
*/
|
|
6772
|
-
type BodySpeechToTextV1SpeechToTextPostTimestampsGranularity = (typeof BodySpeechToTextV1SpeechToTextPostTimestampsGranularity)[keyof typeof BodySpeechToTextV1SpeechToTextPostTimestampsGranularity];
|
|
6773
|
-
declare const BodySpeechToTextV1SpeechToTextPostTimestampsGranularity: {
|
|
6774
|
-
readonly none: "none";
|
|
6775
|
-
readonly word: "word";
|
|
6776
|
-
readonly character: "character";
|
|
6432
|
+
type BodySpeechToTextV1SpeechToTextPostWebhookMetadataAnyOf = {
|
|
6433
|
+
[key: string]: unknown;
|
|
6777
6434
|
};
|
|
6778
6435
|
|
|
6779
6436
|
/**
|
|
@@ -6783,10 +6440,11 @@ declare const BodySpeechToTextV1SpeechToTextPostTimestampsGranularity: {
|
|
|
6783
6440
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6784
6441
|
* OpenAPI spec version: 1.0
|
|
6785
6442
|
*/
|
|
6443
|
+
|
|
6786
6444
|
/**
|
|
6787
|
-
* Optional
|
|
6445
|
+
* Optional metadata to be included in the webhook response. This should be a JSON string representing an object with a maximum depth of 2 levels and maximum size of 16KB. Useful for tracking internal IDs, job references, or other contextual information.
|
|
6788
6446
|
*/
|
|
6789
|
-
type
|
|
6447
|
+
type BodySpeechToTextV1SpeechToTextPostWebhookMetadata = string | BodySpeechToTextV1SpeechToTextPostWebhookMetadataAnyOf | null;
|
|
6790
6448
|
|
|
6791
6449
|
/**
|
|
6792
6450
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6795,9 +6453,10 @@ type BodySpeechToTextV1SpeechToTextPostWebhookId = string | null;
|
|
|
6795
6453
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6796
6454
|
* OpenAPI spec version: 1.0
|
|
6797
6455
|
*/
|
|
6798
|
-
|
|
6799
|
-
|
|
6800
|
-
|
|
6456
|
+
/**
|
|
6457
|
+
* Detect entities in the transcript. Can be 'all' to detect all entities, a single entity type or category string, or a list of entity types/categories. Categories include 'pii', 'phi', 'pci', 'other', 'offensive_language'. When enabled, detected entities will be returned in the 'entities' field with their text, type, and character positions. Usage of this parameter will incur an additional 30% surcharge on the base transcription cost.
|
|
6458
|
+
*/
|
|
6459
|
+
type BodySpeechToTextV1SpeechToTextPostEntityDetection = string | string[] | null;
|
|
6801
6460
|
|
|
6802
6461
|
/**
|
|
6803
6462
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6806,11 +6465,10 @@ type BodySpeechToTextV1SpeechToTextPostWebhookMetadataAnyOf = {
|
|
|
6806
6465
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
6807
6466
|
* OpenAPI spec version: 1.0
|
|
6808
6467
|
*/
|
|
6809
|
-
|
|
6810
6468
|
/**
|
|
6811
|
-
*
|
|
6469
|
+
* Redact entities from the transcript text. Accepts the same format as entity_detection: 'all', a category ('pii', 'phi'), or specific entity types. Must be a subset of entity_detection. When redaction is enabled, the entities field will not be returned. Usage of this parameter will incur an additional 30% surcharge on the base transcription cost.
|
|
6812
6470
|
*/
|
|
6813
|
-
type
|
|
6471
|
+
type BodySpeechToTextV1SpeechToTextPostEntityRedaction = string | string[] | null;
|
|
6814
6472
|
|
|
6815
6473
|
/**
|
|
6816
6474
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6823,7 +6481,7 @@ type BodySpeechToTextV1SpeechToTextPostWebhookMetadata = string | BodySpeechToTe
|
|
|
6823
6481
|
interface BodySpeechToTextV1SpeechToTextPost {
|
|
6824
6482
|
/** The ID of the model to use for transcription. */
|
|
6825
6483
|
model_id: BodySpeechToTextV1SpeechToTextPostModelId;
|
|
6826
|
-
/** The file to transcribe. All major audio and video formats are supported. Exactly one of the file or cloud_storage_url parameters must be provided. The file size must be less than 3.0GB. */
|
|
6484
|
+
/** The file to transcribe (100ms minimum audio length). All major audio and video formats are supported. Exactly one of the file or cloud_storage_url parameters must be provided. The file size must be less than 3.0GB. */
|
|
6827
6485
|
file?: BodySpeechToTextV1SpeechToTextPostFile;
|
|
6828
6486
|
/** An ISO-639-1 or ISO-639-3 language_code corresponding to the language of the audio file. Can sometimes improve transcription performance if known beforehand. Defaults to null, in this case the language is predicted automatically. */
|
|
6829
6487
|
language_code?: BodySpeechToTextV1SpeechToTextPostLanguageCode;
|
|
@@ -6841,8 +6499,13 @@ interface BodySpeechToTextV1SpeechToTextPost {
|
|
|
6841
6499
|
additional_formats?: AdditionalFormats;
|
|
6842
6500
|
/** The format of input audio. Options are 'pcm_s16le_16' or 'other' For `pcm_s16le_16`, the input audio must be 16-bit PCM at a 16kHz sample rate, single channel (mono), and little-endian byte order. Latency will be lower than with passing an encoded waveform. */
|
|
6843
6501
|
file_format?: BodySpeechToTextV1SpeechToTextPostFileFormat;
|
|
6844
|
-
/**
|
|
6502
|
+
/**
|
|
6503
|
+
* The HTTPS URL of the file to transcribe. Exactly one of the file or cloud_storage_url parameters must be provided. The file must be accessible via HTTPS and the file size must be less than 2GB. Any valid HTTPS URL is accepted, including URLs from cloud storage providers (AWS S3, Google Cloud Storage, Cloudflare R2, etc.), CDNs, or any other HTTPS source. URLs can be pre-signed or include authentication tokens in query parameters.
|
|
6504
|
+
* @deprecated
|
|
6505
|
+
*/
|
|
6845
6506
|
cloud_storage_url?: BodySpeechToTextV1SpeechToTextPostCloudStorageUrl;
|
|
6507
|
+
/** The URL of an audio or video file to transcribe. Supports hosted video or audio files, YouTube video URLs, TikTok video URLs, and other video hosting services. */
|
|
6508
|
+
source_url?: BodySpeechToTextV1SpeechToTextPostSourceUrl;
|
|
6846
6509
|
/** Whether to send the transcription result to configured speech-to-text webhooks. If set the request will return early without the transcription, which will be delivered later via webhook. */
|
|
6847
6510
|
webhook?: boolean;
|
|
6848
6511
|
/** Optional specific webhook ID to send the transcription result to. Only valid when webhook is set to true. If not provided, transcription will be sent to all configured speech-to-text webhooks. */
|
|
@@ -6851,15 +6514,21 @@ interface BodySpeechToTextV1SpeechToTextPost {
|
|
|
6851
6514
|
temperature?: BodySpeechToTextV1SpeechToTextPostTemperature;
|
|
6852
6515
|
/** If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be an integer between 0 and 2147483647. */
|
|
6853
6516
|
seed?: BodySpeechToTextV1SpeechToTextPostSeed;
|
|
6854
|
-
/** Whether the audio file contains multiple channels where each channel contains a single speaker. When enabled, each channel will be transcribed independently and the results will be combined. Each word in the response will include a 'channel_index' field indicating which channel it was spoken on. A maximum of 5 channels is supported. */
|
|
6517
|
+
/** Whether the audio file contains multiple channels where each channel contains a single speaker. When enabled, each channel will be transcribed independently and the results will be combined. Each word in the response will include a 'channel_index' field indicating which channel it was spoken on. A maximum of 5 channels is supported. Each channel is billed independently at the full audio duration, so cost scales linearly with the number of channels. */
|
|
6855
6518
|
use_multi_channel?: boolean;
|
|
6856
6519
|
/** Optional metadata to be included in the webhook response. This should be a JSON string representing an object with a maximum depth of 2 levels and maximum size of 16KB. Useful for tracking internal IDs, job references, or other contextual information. */
|
|
6857
6520
|
webhook_metadata?: BodySpeechToTextV1SpeechToTextPostWebhookMetadata;
|
|
6858
|
-
/** Detect entities in the transcript. Can be 'all' to detect all entities, a single entity type or category string, or a list of entity types/categories. Categories include 'pii', 'phi', 'pci', 'other', 'offensive_language'. When enabled, detected entities will be returned in the 'entities' field with their text, type, and character positions. Usage of this parameter will incur additional
|
|
6521
|
+
/** Detect entities in the transcript. Can be 'all' to detect all entities, a single entity type or category string, or a list of entity types/categories. Categories include 'pii', 'phi', 'pci', 'other', 'offensive_language'. When enabled, detected entities will be returned in the 'entities' field with their text, type, and character positions. Usage of this parameter will incur an additional 30% surcharge on the base transcription cost. */
|
|
6859
6522
|
entity_detection?: BodySpeechToTextV1SpeechToTextPostEntityDetection;
|
|
6860
6523
|
/** If true, the transcription will not have any filler words, false starts and non-speech sounds. Only supported with scribe_v2 model. */
|
|
6861
6524
|
no_verbatim?: boolean;
|
|
6862
|
-
/**
|
|
6525
|
+
/** Whether to detect speaker roles (agent vs customer). Requires diarize=true. Cannot be used with use_multi_channel=true. When enabled, speaker_id values will be 'agent' and 'customer' instead of 'speaker_0', 'speaker_1', etc. Usage incurs an additional 10% surcharge on base transcription cost. */
|
|
6526
|
+
detect_speaker_roles?: boolean;
|
|
6527
|
+
/** Redact entities from the transcript text. Accepts the same format as entity_detection: 'all', a category ('pii', 'phi'), or specific entity types. Must be a subset of entity_detection. When redaction is enabled, the entities field will not be returned. Usage of this parameter will incur an additional 30% surcharge on the base transcription cost. */
|
|
6528
|
+
entity_redaction?: BodySpeechToTextV1SpeechToTextPostEntityRedaction;
|
|
6529
|
+
/** How to format redacted entities. 'redacted' replaces with {REDACTED}, 'entity_type' replaces with {ENTITY_TYPE}, 'enumerated_entity_type' replaces with {ENTITY_TYPE_N} where N enumerates each occurrence. Only used when entity_redaction is set. */
|
|
6530
|
+
entity_redaction_mode?: string;
|
|
6531
|
+
/** A list of keyterms to bias the transcription towards. The keyterms are words or phrases you want the model to recognise more accurately. The number of keyterms cannot exceed 1000. The length of each keyterm must be less than 50 characters. Keyterms can contain at most 5 words (after normalisation). For example ["hello", "world", "technical term"]. Usage of this parameter will incur an additional 20% surcharge on the base transcription cost. When more than 100 keyterms are provided, a minimum billable duration of 20 seconds applies per request. */
|
|
6863
6532
|
keyterms?: string[];
|
|
6864
6533
|
}
|
|
6865
6534
|
|
|
@@ -7127,7 +6796,7 @@ declare const StreamingSupportedRegions: {
|
|
|
7127
6796
|
* The Realtime model used for this session.
|
|
7128
6797
|
|
|
7129
6798
|
*/
|
|
7130
|
-
type RealtimeSessionCreateRequestGAModel = string | "gpt-realtime" | "gpt-realtime-2025-08-28" | "gpt-4o-realtime-preview" | "gpt-4o-realtime-preview-2024-10-01" | "gpt-4o-realtime-preview-2024-12-17" | "gpt-4o-realtime-preview-2025-06-03" | "gpt-4o-mini-realtime-preview" | "gpt-4o-mini-realtime-preview-2024-12-17" | "gpt-realtime-mini" | "gpt-realtime-mini-2025-10-06" | "gpt-realtime-mini-2025-12-15" | "gpt-audio-mini" | "gpt-audio-mini-2025-10-06" | "gpt-audio-mini-2025-12-15";
|
|
6799
|
+
type RealtimeSessionCreateRequestGAModel = string | "gpt-realtime" | "gpt-realtime-1.5" | "gpt-realtime-2025-08-28" | "gpt-4o-realtime-preview" | "gpt-4o-realtime-preview-2024-10-01" | "gpt-4o-realtime-preview-2024-12-17" | "gpt-4o-realtime-preview-2025-06-03" | "gpt-4o-mini-realtime-preview" | "gpt-4o-mini-realtime-preview-2024-12-17" | "gpt-realtime-mini" | "gpt-realtime-mini-2025-10-06" | "gpt-realtime-mini-2025-12-15" | "gpt-audio-1.5" | "gpt-audio-mini" | "gpt-audio-mini-2025-10-06" | "gpt-audio-mini-2025-12-15";
|
|
7131
6800
|
|
|
7132
6801
|
/**
|
|
7133
6802
|
* Generated by orval v7.9.0 🍺
|
|
@@ -7276,13 +6945,13 @@ interface DeepgramStreamingOptions {
|
|
|
7276
6945
|
* { encoding: DeepgramEncoding.linear16 }
|
|
7277
6946
|
* ```
|
|
7278
6947
|
*/
|
|
7279
|
-
encoding?: (typeof
|
|
6948
|
+
encoding?: (typeof V1ListenPostParametersEncoding)[keyof typeof V1ListenPostParametersEncoding];
|
|
7280
6949
|
/** Sample rate in Hz */
|
|
7281
6950
|
sampleRate?: number;
|
|
7282
6951
|
/** Number of audio channels */
|
|
7283
6952
|
channels?: number;
|
|
7284
6953
|
/** Language code (BCP-47 format, e.g., 'en', 'en-US', 'es') */
|
|
7285
|
-
language?:
|
|
6954
|
+
language?: string;
|
|
7286
6955
|
/**
|
|
7287
6956
|
* Model to use for transcription
|
|
7288
6957
|
*
|
|
@@ -7299,7 +6968,7 @@ interface DeepgramStreamingOptions {
|
|
|
7299
6968
|
*/
|
|
7300
6969
|
model?: DeepgramModelCode;
|
|
7301
6970
|
/** Model version (e.g., 'latest') */
|
|
7302
|
-
version?:
|
|
6971
|
+
version?: V1ListenPostParametersVersion;
|
|
7303
6972
|
/** Enable language detection */
|
|
7304
6973
|
languageDetection?: boolean;
|
|
7305
6974
|
/** Enable speaker diarization */
|
|
@@ -7830,221 +7499,70 @@ declare const TranscriptionControllerListV2KindItem: {
|
|
|
7830
7499
|
/**
|
|
7831
7500
|
* Generated by orval v7.9.0 🍺
|
|
7832
7501
|
* Do not edit manually.
|
|
7833
|
-
* Gladia Control API
|
|
7834
|
-
* OpenAPI spec version: 1.0
|
|
7835
|
-
*/
|
|
7836
|
-
|
|
7837
|
-
type TranscriptionControllerListV2Params = {
|
|
7838
|
-
/**
|
|
7839
|
-
* The starting point for pagination. A value of 0 starts from the first item.
|
|
7840
|
-
*/
|
|
7841
|
-
offset?: number;
|
|
7842
|
-
/**
|
|
7843
|
-
* The maximum number of items to return. Useful for pagination and controlling data payload size.
|
|
7844
|
-
*/
|
|
7845
|
-
limit?: number;
|
|
7846
|
-
/**
|
|
7847
|
-
* Filter items relevant to a specific date in ISO format (YYYY-MM-DD).
|
|
7848
|
-
*/
|
|
7849
|
-
date?: string;
|
|
7850
|
-
/**
|
|
7851
|
-
* Include items that occurred before the specified date in ISO format.
|
|
7852
|
-
*/
|
|
7853
|
-
before_date?: string;
|
|
7854
|
-
/**
|
|
7855
|
-
* Filter for items after the specified date. Use with `before_date` for a range. Date in ISO format.
|
|
7856
|
-
*/
|
|
7857
|
-
after_date?: string;
|
|
7858
|
-
/**
|
|
7859
|
-
* Filter the list based on item status. Accepts multiple values from the predefined list.
|
|
7860
|
-
*/
|
|
7861
|
-
status?: TranscriptionControllerListV2StatusItem[];
|
|
7862
|
-
custom_metadata?: {
|
|
7863
|
-
[key: string]: unknown;
|
|
7864
|
-
};
|
|
7865
|
-
/**
|
|
7866
|
-
* Filter the list based on the item type. Supports multiple values from the predefined list.
|
|
7867
|
-
*/
|
|
7868
|
-
kind?: TranscriptionControllerListV2KindItem[];
|
|
7869
|
-
};
|
|
7870
|
-
|
|
7871
|
-
/**
|
|
7872
|
-
* Generated by orval v7.9.0 🍺
|
|
7873
|
-
* Do not edit manually.
|
|
7874
|
-
* Deepgram API Specification
|
|
7875
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
7876
|
-
|
|
7877
|
-
* OpenAPI spec version: 1.0.0
|
|
7878
|
-
*/
|
|
7879
|
-
/**
|
|
7880
|
-
* Start date of the requested date range. Formats accepted are YYYY-MM-DD, YYYY-MM-DDTHH:MM:SS, or YYYY-MM-DDTHH:MM:SS+HH:MM
|
|
7881
|
-
*/
|
|
7882
|
-
type ManageV1StartDateTimeParameter = string;
|
|
7883
|
-
|
|
7884
|
-
/**
|
|
7885
|
-
* Generated by orval v7.9.0 🍺
|
|
7886
|
-
* Do not edit manually.
|
|
7887
|
-
* Deepgram API Specification
|
|
7888
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
7889
|
-
|
|
7890
|
-
* OpenAPI spec version: 1.0.0
|
|
7891
|
-
*/
|
|
7892
|
-
/**
|
|
7893
|
-
* End date of the requested date range. Formats accepted are YYYY-MM-DD, YYYY-MM-DDTHH:MM:SS, or YYYY-MM-DDTHH:MM:SS+HH:MM
|
|
7894
|
-
*/
|
|
7895
|
-
type ManageV1EndDateTimeParameter = string;
|
|
7896
|
-
|
|
7897
|
-
/**
|
|
7898
|
-
* Generated by orval v7.9.0 🍺
|
|
7899
|
-
* Do not edit manually.
|
|
7900
|
-
* Deepgram API Specification
|
|
7901
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
7902
|
-
|
|
7903
|
-
* OpenAPI spec version: 1.0.0
|
|
7904
|
-
*/
|
|
7905
|
-
/**
|
|
7906
|
-
* Number of results to return per page. Default 10. Range [1,1000]
|
|
7907
|
-
*/
|
|
7908
|
-
type ManageV1LimitParameter = number;
|
|
7909
|
-
|
|
7910
|
-
/**
|
|
7911
|
-
* Generated by orval v7.9.0 🍺
|
|
7912
|
-
* Do not edit manually.
|
|
7913
|
-
* Deepgram API Specification
|
|
7914
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
7915
|
-
|
|
7916
|
-
* OpenAPI spec version: 1.0.0
|
|
7917
|
-
*/
|
|
7918
|
-
/**
|
|
7919
|
-
* Navigate and return the results to retrieve specific portions of information of the response
|
|
7920
|
-
*/
|
|
7921
|
-
type ManageV1PageParameter = number;
|
|
7922
|
-
|
|
7923
|
-
/**
|
|
7924
|
-
* Generated by orval v7.9.0 🍺
|
|
7925
|
-
* Do not edit manually.
|
|
7926
|
-
* Deepgram API Specification
|
|
7927
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
7928
|
-
|
|
7929
|
-
* OpenAPI spec version: 1.0.0
|
|
7930
|
-
*/
|
|
7931
|
-
/**
|
|
7932
|
-
* Filter for requests where a specific accessor was used
|
|
7933
|
-
*/
|
|
7934
|
-
type ManageV1FilterAccessorParameter = string;
|
|
7935
|
-
|
|
7936
|
-
/**
|
|
7937
|
-
* Generated by orval v7.9.0 🍺
|
|
7938
|
-
* Do not edit manually.
|
|
7939
|
-
* Deepgram API Specification
|
|
7940
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
7941
|
-
|
|
7942
|
-
* OpenAPI spec version: 1.0.0
|
|
7943
|
-
*/
|
|
7944
|
-
/**
|
|
7945
|
-
* Filter for a specific request id
|
|
7502
|
+
* Gladia Control API
|
|
7503
|
+
* OpenAPI spec version: 1.0
|
|
7946
7504
|
*/
|
|
7947
|
-
|
|
7505
|
+
|
|
7506
|
+
type TranscriptionControllerListV2Params = {
|
|
7507
|
+
/**
|
|
7508
|
+
* The starting point for pagination. A value of 0 starts from the first item.
|
|
7509
|
+
*/
|
|
7510
|
+
offset?: number;
|
|
7511
|
+
/**
|
|
7512
|
+
* The maximum number of items to return. Useful for pagination and controlling data payload size.
|
|
7513
|
+
*/
|
|
7514
|
+
limit?: number;
|
|
7515
|
+
/**
|
|
7516
|
+
* Filter items relevant to a specific date in ISO format (YYYY-MM-DD).
|
|
7517
|
+
*/
|
|
7518
|
+
date?: string;
|
|
7519
|
+
/**
|
|
7520
|
+
* Include items that occurred before the specified date in ISO format.
|
|
7521
|
+
*/
|
|
7522
|
+
before_date?: string;
|
|
7523
|
+
/**
|
|
7524
|
+
* Filter for items after the specified date. Use with `before_date` for a range. Date in ISO format.
|
|
7525
|
+
*/
|
|
7526
|
+
after_date?: string;
|
|
7527
|
+
/**
|
|
7528
|
+
* Filter the list based on item status. Accepts multiple values from the predefined list.
|
|
7529
|
+
*/
|
|
7530
|
+
status?: TranscriptionControllerListV2StatusItem[];
|
|
7531
|
+
custom_metadata?: {
|
|
7532
|
+
[key: string]: unknown;
|
|
7533
|
+
};
|
|
7534
|
+
/**
|
|
7535
|
+
* Filter the list based on the item type. Supports multiple values from the predefined list.
|
|
7536
|
+
*/
|
|
7537
|
+
kind?: TranscriptionControllerListV2KindItem[];
|
|
7538
|
+
};
|
|
7948
7539
|
|
|
7949
7540
|
/**
|
|
7950
7541
|
* Generated by orval v7.9.0 🍺
|
|
7951
7542
|
* Do not edit manually.
|
|
7952
|
-
* Deepgram API
|
|
7953
|
-
*
|
|
7954
|
-
|
|
7543
|
+
* Deepgram API
|
|
7544
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
7955
7545
|
* OpenAPI spec version: 1.0.0
|
|
7956
7546
|
*/
|
|
7957
7547
|
/**
|
|
7958
7548
|
* Deployment type for the requests
|
|
7959
7549
|
*/
|
|
7960
|
-
|
|
7961
|
-
|
|
7962
|
-
*/
|
|
7963
|
-
/**
|
|
7964
|
-
* ManageV1FilterDeploymentParameter type definition
|
|
7965
|
-
*/
|
|
7966
|
-
/**
|
|
7967
|
-
* ManageV1FilterDeploymentParameter type definition
|
|
7968
|
-
*/
|
|
7969
|
-
/**
|
|
7970
|
-
* ManageV1FilterDeploymentParameter type definition
|
|
7971
|
-
*/
|
|
7972
|
-
/**
|
|
7973
|
-
* ManageV1FilterDeploymentParameter type definition
|
|
7974
|
-
*/
|
|
7975
|
-
/**
|
|
7976
|
-
* ManageV1FilterDeploymentParameter type definition
|
|
7977
|
-
*/
|
|
7978
|
-
/**
|
|
7979
|
-
* ManageV1FilterDeploymentParameter type definition
|
|
7980
|
-
*/
|
|
7981
|
-
/**
|
|
7982
|
-
* ManageV1FilterDeploymentParameter type definition
|
|
7983
|
-
*/
|
|
7984
|
-
/**
|
|
7985
|
-
* ManageV1FilterDeploymentParameter type definition
|
|
7986
|
-
*/
|
|
7987
|
-
/**
|
|
7988
|
-
* ManageV1FilterDeploymentParameter type definition
|
|
7989
|
-
*/
|
|
7990
|
-
/**
|
|
7991
|
-
* ManageV1FilterDeploymentParameter type definition
|
|
7992
|
-
*/
|
|
7993
|
-
/**
|
|
7994
|
-
* ManageV1FilterDeploymentParameter type definition
|
|
7995
|
-
*/
|
|
7996
|
-
type ManageV1FilterDeploymentParameter = typeof ManageV1FilterDeploymentParameter[keyof typeof ManageV1FilterDeploymentParameter];
|
|
7997
|
-
declare const ManageV1FilterDeploymentParameter: {
|
|
7550
|
+
type V1ProjectsProjectIdRequestsGetParametersDeployment = (typeof V1ProjectsProjectIdRequestsGetParametersDeployment)[keyof typeof V1ProjectsProjectIdRequestsGetParametersDeployment];
|
|
7551
|
+
declare const V1ProjectsProjectIdRequestsGetParametersDeployment: {
|
|
7998
7552
|
readonly hosted: "hosted";
|
|
7999
7553
|
readonly beta: "beta";
|
|
7554
|
+
readonly "self-hosted": "self-hosted";
|
|
8000
7555
|
};
|
|
8001
7556
|
|
|
8002
7557
|
/**
|
|
8003
7558
|
* Generated by orval v7.9.0 🍺
|
|
8004
7559
|
* Do not edit manually.
|
|
8005
|
-
* Deepgram API
|
|
8006
|
-
*
|
|
8007
|
-
|
|
7560
|
+
* Deepgram API
|
|
7561
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
8008
7562
|
* OpenAPI spec version: 1.0.0
|
|
8009
7563
|
*/
|
|
8010
|
-
|
|
8011
|
-
|
|
8012
|
-
*/
|
|
8013
|
-
/**
|
|
8014
|
-
* ManageV1FilterEndpointParameter type definition
|
|
8015
|
-
*/
|
|
8016
|
-
/**
|
|
8017
|
-
* ManageV1FilterEndpointParameter type definition
|
|
8018
|
-
*/
|
|
8019
|
-
/**
|
|
8020
|
-
* ManageV1FilterEndpointParameter type definition
|
|
8021
|
-
*/
|
|
8022
|
-
/**
|
|
8023
|
-
* ManageV1FilterEndpointParameter type definition
|
|
8024
|
-
*/
|
|
8025
|
-
/**
|
|
8026
|
-
* ManageV1FilterEndpointParameter type definition
|
|
8027
|
-
*/
|
|
8028
|
-
/**
|
|
8029
|
-
* ManageV1FilterEndpointParameter type definition
|
|
8030
|
-
*/
|
|
8031
|
-
/**
|
|
8032
|
-
* ManageV1FilterEndpointParameter type definition
|
|
8033
|
-
*/
|
|
8034
|
-
/**
|
|
8035
|
-
* ManageV1FilterEndpointParameter type definition
|
|
8036
|
-
*/
|
|
8037
|
-
/**
|
|
8038
|
-
* ManageV1FilterEndpointParameter type definition
|
|
8039
|
-
*/
|
|
8040
|
-
/**
|
|
8041
|
-
* ManageV1FilterEndpointParameter type definition
|
|
8042
|
-
*/
|
|
8043
|
-
/**
|
|
8044
|
-
* ManageV1FilterEndpointParameter type definition
|
|
8045
|
-
*/
|
|
8046
|
-
type ManageV1FilterEndpointParameter = typeof ManageV1FilterEndpointParameter[keyof typeof ManageV1FilterEndpointParameter];
|
|
8047
|
-
declare const ManageV1FilterEndpointParameter: {
|
|
7564
|
+
type V1ProjectsProjectIdRequestsGetParametersEndpoint = (typeof V1ProjectsProjectIdRequestsGetParametersEndpoint)[keyof typeof V1ProjectsProjectIdRequestsGetParametersEndpoint];
|
|
7565
|
+
declare const V1ProjectsProjectIdRequestsGetParametersEndpoint: {
|
|
8048
7566
|
readonly listen: "listen";
|
|
8049
7567
|
readonly read: "read";
|
|
8050
7568
|
readonly speak: "speak";
|
|
@@ -8054,52 +7572,15 @@ declare const ManageV1FilterEndpointParameter: {
|
|
|
8054
7572
|
/**
|
|
8055
7573
|
* Generated by orval v7.9.0 🍺
|
|
8056
7574
|
* Do not edit manually.
|
|
8057
|
-
* Deepgram API
|
|
8058
|
-
*
|
|
8059
|
-
|
|
7575
|
+
* Deepgram API
|
|
7576
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
8060
7577
|
* OpenAPI spec version: 1.0.0
|
|
8061
7578
|
*/
|
|
8062
7579
|
/**
|
|
8063
7580
|
* Method type for the request
|
|
8064
7581
|
*/
|
|
8065
|
-
|
|
8066
|
-
|
|
8067
|
-
*/
|
|
8068
|
-
/**
|
|
8069
|
-
* ManageV1FilterMethodParameter type definition
|
|
8070
|
-
*/
|
|
8071
|
-
/**
|
|
8072
|
-
* ManageV1FilterMethodParameter type definition
|
|
8073
|
-
*/
|
|
8074
|
-
/**
|
|
8075
|
-
* ManageV1FilterMethodParameter type definition
|
|
8076
|
-
*/
|
|
8077
|
-
/**
|
|
8078
|
-
* ManageV1FilterMethodParameter type definition
|
|
8079
|
-
*/
|
|
8080
|
-
/**
|
|
8081
|
-
* ManageV1FilterMethodParameter type definition
|
|
8082
|
-
*/
|
|
8083
|
-
/**
|
|
8084
|
-
* ManageV1FilterMethodParameter type definition
|
|
8085
|
-
*/
|
|
8086
|
-
/**
|
|
8087
|
-
* ManageV1FilterMethodParameter type definition
|
|
8088
|
-
*/
|
|
8089
|
-
/**
|
|
8090
|
-
* ManageV1FilterMethodParameter type definition
|
|
8091
|
-
*/
|
|
8092
|
-
/**
|
|
8093
|
-
* ManageV1FilterMethodParameter type definition
|
|
8094
|
-
*/
|
|
8095
|
-
/**
|
|
8096
|
-
* ManageV1FilterMethodParameter type definition
|
|
8097
|
-
*/
|
|
8098
|
-
/**
|
|
8099
|
-
* ManageV1FilterMethodParameter type definition
|
|
8100
|
-
*/
|
|
8101
|
-
type ManageV1FilterMethodParameter = typeof ManageV1FilterMethodParameter[keyof typeof ManageV1FilterMethodParameter];
|
|
8102
|
-
declare const ManageV1FilterMethodParameter: {
|
|
7582
|
+
type V1ProjectsProjectIdRequestsGetParametersMethod = (typeof V1ProjectsProjectIdRequestsGetParametersMethod)[keyof typeof V1ProjectsProjectIdRequestsGetParametersMethod];
|
|
7583
|
+
declare const V1ProjectsProjectIdRequestsGetParametersMethod: {
|
|
8103
7584
|
readonly sync: "sync";
|
|
8104
7585
|
readonly async: "async";
|
|
8105
7586
|
readonly streaming: "streaming";
|
|
@@ -8108,49 +7589,12 @@ declare const ManageV1FilterMethodParameter: {
|
|
|
8108
7589
|
/**
|
|
8109
7590
|
* Generated by orval v7.9.0 🍺
|
|
8110
7591
|
* Do not edit manually.
|
|
8111
|
-
* Deepgram API
|
|
8112
|
-
*
|
|
8113
|
-
|
|
7592
|
+
* Deepgram API
|
|
7593
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
8114
7594
|
* OpenAPI spec version: 1.0.0
|
|
8115
7595
|
*/
|
|
8116
|
-
|
|
8117
|
-
|
|
8118
|
-
*/
|
|
8119
|
-
/**
|
|
8120
|
-
* ManageV1FilterStatusParameter type definition
|
|
8121
|
-
*/
|
|
8122
|
-
/**
|
|
8123
|
-
* ManageV1FilterStatusParameter type definition
|
|
8124
|
-
*/
|
|
8125
|
-
/**
|
|
8126
|
-
* ManageV1FilterStatusParameter type definition
|
|
8127
|
-
*/
|
|
8128
|
-
/**
|
|
8129
|
-
* ManageV1FilterStatusParameter type definition
|
|
8130
|
-
*/
|
|
8131
|
-
/**
|
|
8132
|
-
* ManageV1FilterStatusParameter type definition
|
|
8133
|
-
*/
|
|
8134
|
-
/**
|
|
8135
|
-
* ManageV1FilterStatusParameter type definition
|
|
8136
|
-
*/
|
|
8137
|
-
/**
|
|
8138
|
-
* ManageV1FilterStatusParameter type definition
|
|
8139
|
-
*/
|
|
8140
|
-
/**
|
|
8141
|
-
* ManageV1FilterStatusParameter type definition
|
|
8142
|
-
*/
|
|
8143
|
-
/**
|
|
8144
|
-
* ManageV1FilterStatusParameter type definition
|
|
8145
|
-
*/
|
|
8146
|
-
/**
|
|
8147
|
-
* ManageV1FilterStatusParameter type definition
|
|
8148
|
-
*/
|
|
8149
|
-
/**
|
|
8150
|
-
* ManageV1FilterStatusParameter type definition
|
|
8151
|
-
*/
|
|
8152
|
-
type ManageV1FilterStatusParameter = typeof ManageV1FilterStatusParameter[keyof typeof ManageV1FilterStatusParameter];
|
|
8153
|
-
declare const ManageV1FilterStatusParameter: {
|
|
7596
|
+
type V1ProjectsProjectIdRequestsGetParametersStatus = (typeof V1ProjectsProjectIdRequestsGetParametersStatus)[keyof typeof V1ProjectsProjectIdRequestsGetParametersStatus];
|
|
7597
|
+
declare const V1ProjectsProjectIdRequestsGetParametersStatus: {
|
|
8154
7598
|
readonly succeeded: "succeeded";
|
|
8155
7599
|
readonly failed: "failed";
|
|
8156
7600
|
};
|
|
@@ -8158,53 +7602,52 @@ declare const ManageV1FilterStatusParameter: {
|
|
|
8158
7602
|
/**
|
|
8159
7603
|
* Generated by orval v7.9.0 🍺
|
|
8160
7604
|
* Do not edit manually.
|
|
8161
|
-
* Deepgram API
|
|
8162
|
-
*
|
|
8163
|
-
|
|
7605
|
+
* Deepgram API
|
|
7606
|
+
* Deepgram API - Transcription, TTS, text analysis, and request history endpoints. Filtered from the official Deepgram API spec.
|
|
8164
7607
|
* OpenAPI spec version: 1.0.0
|
|
8165
7608
|
*/
|
|
8166
7609
|
|
|
8167
|
-
type
|
|
7610
|
+
type ListProjectRequestsParams = {
|
|
8168
7611
|
/**
|
|
8169
7612
|
* Start date of the requested date range. Formats accepted are YYYY-MM-DD, YYYY-MM-DDTHH:MM:SS, or YYYY-MM-DDTHH:MM:SS+HH:MM
|
|
8170
7613
|
*/
|
|
8171
|
-
start?:
|
|
7614
|
+
start?: string;
|
|
8172
7615
|
/**
|
|
8173
7616
|
* End date of the requested date range. Formats accepted are YYYY-MM-DD, YYYY-MM-DDTHH:MM:SS, or YYYY-MM-DDTHH:MM:SS+HH:MM
|
|
8174
7617
|
*/
|
|
8175
|
-
end?:
|
|
7618
|
+
end?: string;
|
|
8176
7619
|
/**
|
|
8177
7620
|
* Number of results to return per page. Default 10. Range [1,1000]
|
|
8178
7621
|
*/
|
|
8179
|
-
limit?:
|
|
7622
|
+
limit?: number;
|
|
8180
7623
|
/**
|
|
8181
7624
|
* Navigate and return the results to retrieve specific portions of information of the response
|
|
8182
7625
|
*/
|
|
8183
|
-
page?:
|
|
7626
|
+
page?: number;
|
|
8184
7627
|
/**
|
|
8185
7628
|
* Filter for requests where a specific accessor was used
|
|
8186
7629
|
*/
|
|
8187
|
-
accessor?:
|
|
7630
|
+
accessor?: string;
|
|
8188
7631
|
/**
|
|
8189
7632
|
* Filter for a specific request id
|
|
8190
7633
|
*/
|
|
8191
|
-
request_id?:
|
|
7634
|
+
request_id?: string;
|
|
8192
7635
|
/**
|
|
8193
7636
|
* Filter for requests where a specific deployment was used
|
|
8194
7637
|
*/
|
|
8195
|
-
deployment?:
|
|
7638
|
+
deployment?: V1ProjectsProjectIdRequestsGetParametersDeployment;
|
|
8196
7639
|
/**
|
|
8197
7640
|
* Filter for requests where a specific endpoint was used
|
|
8198
7641
|
*/
|
|
8199
|
-
endpoint?:
|
|
7642
|
+
endpoint?: V1ProjectsProjectIdRequestsGetParametersEndpoint;
|
|
8200
7643
|
/**
|
|
8201
7644
|
* Filter for requests where a specific method was used
|
|
8202
7645
|
*/
|
|
8203
|
-
method?:
|
|
7646
|
+
method?: V1ProjectsProjectIdRequestsGetParametersMethod;
|
|
8204
7647
|
/**
|
|
8205
7648
|
* Filter for requests that succeeded (status code < 300) or failed (status code >=400)
|
|
8206
7649
|
*/
|
|
8207
|
-
status?:
|
|
7650
|
+
status?: V1ProjectsProjectIdRequestsGetParametersStatus;
|
|
8208
7651
|
};
|
|
8209
7652
|
|
|
8210
7653
|
/**
|
|
@@ -8418,7 +7861,7 @@ interface ListTranscriptsOptions {
|
|
|
8418
7861
|
/** Gladia-specific list options */
|
|
8419
7862
|
gladia?: Partial<TranscriptionControllerListV2Params>;
|
|
8420
7863
|
/** Deepgram-specific list options (request history) */
|
|
8421
|
-
deepgram?: Partial<
|
|
7864
|
+
deepgram?: Partial<ListProjectRequestsParams>;
|
|
8422
7865
|
}
|
|
8423
7866
|
/**
|
|
8424
7867
|
* Common transcription options across all providers
|
|
@@ -8459,7 +7902,7 @@ interface TranscribeOptions {
|
|
|
8459
7902
|
* Code switching configuration (Gladia-specific)
|
|
8460
7903
|
* @see GladiaCodeSwitchingConfig
|
|
8461
7904
|
*/
|
|
8462
|
-
codeSwitchingConfig?:
|
|
7905
|
+
codeSwitchingConfig?: LanguageConfig;
|
|
8463
7906
|
/** Enable speaker diarization */
|
|
8464
7907
|
diarization?: boolean;
|
|
8465
7908
|
/** Expected number of speakers (for diarization) */
|
|
@@ -8488,7 +7931,7 @@ interface TranscribeOptions {
|
|
|
8488
7931
|
* Deepgram-specific options (passed directly to API)
|
|
8489
7932
|
* @see https://developers.deepgram.com/reference/listen-file
|
|
8490
7933
|
*/
|
|
8491
|
-
deepgram?: Partial<
|
|
7934
|
+
deepgram?: Partial<ListenTranscribeParams>;
|
|
8492
7935
|
/**
|
|
8493
7936
|
* AssemblyAI-specific options (passed directly to API)
|
|
8494
7937
|
* @see https://www.assemblyai.com/docs/api-reference/transcripts/submit
|
|
@@ -10596,21 +10039,10 @@ interface DetectedEntity {
|
|
|
10596
10039
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
10597
10040
|
* OpenAPI spec version: 1.0
|
|
10598
10041
|
*/
|
|
10599
|
-
|
|
10600
|
-
type SpeechToTextChunkResponseModelAdditionalFormatsAnyOfItem = AdditionalFormatResponseModel | null;
|
|
10601
|
-
|
|
10602
|
-
/**
|
|
10603
|
-
* Generated by orval v7.9.0 🍺
|
|
10604
|
-
* Do not edit manually.
|
|
10605
|
-
* ElevenLabs Speech-to-Text API
|
|
10606
|
-
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
10607
|
-
* OpenAPI spec version: 1.0
|
|
10608
|
-
*/
|
|
10609
|
-
|
|
10610
10042
|
/**
|
|
10611
|
-
*
|
|
10043
|
+
* The start time of the word or sound in seconds.
|
|
10612
10044
|
*/
|
|
10613
|
-
type
|
|
10045
|
+
type SpeechToTextWordResponseModelStart = number | null;
|
|
10614
10046
|
|
|
10615
10047
|
/**
|
|
10616
10048
|
* Generated by orval v7.9.0 🍺
|
|
@@ -10620,9 +10052,9 @@ type SpeechToTextChunkResponseModelAdditionalFormats = SpeechToTextChunkResponse
|
|
|
10620
10052
|
* OpenAPI spec version: 1.0
|
|
10621
10053
|
*/
|
|
10622
10054
|
/**
|
|
10623
|
-
* The
|
|
10055
|
+
* The end time of the word or sound in seconds.
|
|
10624
10056
|
*/
|
|
10625
|
-
type
|
|
10057
|
+
type SpeechToTextWordResponseModelEnd = number | null;
|
|
10626
10058
|
|
|
10627
10059
|
/**
|
|
10628
10060
|
* Generated by orval v7.9.0 🍺
|
|
@@ -10631,11 +10063,15 @@ type SpeechToTextChunkResponseModelChannelIndex = number | null;
|
|
|
10631
10063
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
10632
10064
|
* OpenAPI spec version: 1.0
|
|
10633
10065
|
*/
|
|
10634
|
-
|
|
10635
10066
|
/**
|
|
10636
|
-
*
|
|
10067
|
+
* The type of the word or sound. 'audio_event' is used for non-word sounds like laughter or footsteps.
|
|
10637
10068
|
*/
|
|
10638
|
-
type
|
|
10069
|
+
type SpeechToTextWordResponseModelType = (typeof SpeechToTextWordResponseModelType)[keyof typeof SpeechToTextWordResponseModelType];
|
|
10070
|
+
declare const SpeechToTextWordResponseModelType: {
|
|
10071
|
+
readonly word: "word";
|
|
10072
|
+
readonly spacing: "spacing";
|
|
10073
|
+
readonly audio_event: "audio_event";
|
|
10074
|
+
};
|
|
10639
10075
|
|
|
10640
10076
|
/**
|
|
10641
10077
|
* Generated by orval v7.9.0 🍺
|
|
@@ -10645,9 +10081,9 @@ type SpeechToTextChunkResponseModelEntities = DetectedEntity[] | null;
|
|
|
10645
10081
|
* OpenAPI spec version: 1.0
|
|
10646
10082
|
*/
|
|
10647
10083
|
/**
|
|
10648
|
-
*
|
|
10084
|
+
* Unique identifier for the speaker of this word.
|
|
10649
10085
|
*/
|
|
10650
|
-
type
|
|
10086
|
+
type SpeechToTextWordResponseModelSpeakerId = string | null;
|
|
10651
10087
|
|
|
10652
10088
|
/**
|
|
10653
10089
|
* Generated by orval v7.9.0 🍺
|
|
@@ -10657,9 +10093,9 @@ type SpeechToTextChunkResponseModelTranscriptionId = string | null;
|
|
|
10657
10093
|
* OpenAPI spec version: 1.0
|
|
10658
10094
|
*/
|
|
10659
10095
|
/**
|
|
10660
|
-
* The
|
|
10096
|
+
* The start time of the character in seconds.
|
|
10661
10097
|
*/
|
|
10662
|
-
type
|
|
10098
|
+
type SpeechToTextCharacterResponseModelStart = number | null;
|
|
10663
10099
|
|
|
10664
10100
|
/**
|
|
10665
10101
|
* Generated by orval v7.9.0 🍺
|
|
@@ -10669,9 +10105,9 @@ type SpeechToTextCharacterResponseModelEnd = number | null;
|
|
|
10669
10105
|
* OpenAPI spec version: 1.0
|
|
10670
10106
|
*/
|
|
10671
10107
|
/**
|
|
10672
|
-
* The
|
|
10108
|
+
* The end time of the character in seconds.
|
|
10673
10109
|
*/
|
|
10674
|
-
type
|
|
10110
|
+
type SpeechToTextCharacterResponseModelEnd = number | null;
|
|
10675
10111
|
|
|
10676
10112
|
/**
|
|
10677
10113
|
* Generated by orval v7.9.0 🍺
|
|
@@ -10710,10 +10146,26 @@ type SpeechToTextWordResponseModelCharacters = SpeechToTextCharacterResponseMode
|
|
|
10710
10146
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
10711
10147
|
* OpenAPI spec version: 1.0
|
|
10712
10148
|
*/
|
|
10149
|
+
|
|
10713
10150
|
/**
|
|
10714
|
-
*
|
|
10151
|
+
* Word-level detail of the transcription with timing information.
|
|
10715
10152
|
*/
|
|
10716
|
-
|
|
10153
|
+
interface SpeechToTextWordResponseModel {
|
|
10154
|
+
/** The word or sound that was transcribed. */
|
|
10155
|
+
text: string;
|
|
10156
|
+
/** The start time of the word or sound in seconds. */
|
|
10157
|
+
start?: SpeechToTextWordResponseModelStart;
|
|
10158
|
+
/** The end time of the word or sound in seconds. */
|
|
10159
|
+
end?: SpeechToTextWordResponseModelEnd;
|
|
10160
|
+
/** The type of the word or sound. 'audio_event' is used for non-word sounds like laughter or footsteps. */
|
|
10161
|
+
type: SpeechToTextWordResponseModelType;
|
|
10162
|
+
/** Unique identifier for the speaker of this word. */
|
|
10163
|
+
speaker_id?: SpeechToTextWordResponseModelSpeakerId;
|
|
10164
|
+
/** The log of the probability with which this word was predicted. Logprobs are in range [-infinity, 0], higher logprobs indicate a higher confidence the model has in its predictions. */
|
|
10165
|
+
logprob: number;
|
|
10166
|
+
/** The characters that make up the word and their timing information. */
|
|
10167
|
+
characters?: SpeechToTextWordResponseModelCharacters;
|
|
10168
|
+
}
|
|
10717
10169
|
|
|
10718
10170
|
/**
|
|
10719
10171
|
* Generated by orval v7.9.0 🍺
|
|
@@ -10723,9 +10175,9 @@ type SpeechToTextWordResponseModelEnd = number | null;
|
|
|
10723
10175
|
* OpenAPI spec version: 1.0
|
|
10724
10176
|
*/
|
|
10725
10177
|
/**
|
|
10726
|
-
*
|
|
10178
|
+
* The channel index this transcript belongs to (for multichannel audio).
|
|
10727
10179
|
*/
|
|
10728
|
-
type
|
|
10180
|
+
type SpeechToTextChunkResponseModelChannelIndex = number | null;
|
|
10729
10181
|
|
|
10730
10182
|
/**
|
|
10731
10183
|
* Generated by orval v7.9.0 🍺
|
|
@@ -10734,10 +10186,21 @@ type SpeechToTextWordResponseModelSpeakerId = string | null;
|
|
|
10734
10186
|
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
10735
10187
|
* OpenAPI spec version: 1.0
|
|
10736
10188
|
*/
|
|
10189
|
+
|
|
10190
|
+
type SpeechToTextChunkResponseModelAdditionalFormatsAnyOfItem = AdditionalFormatResponseModel | null;
|
|
10191
|
+
|
|
10737
10192
|
/**
|
|
10738
|
-
*
|
|
10193
|
+
* Generated by orval v7.9.0 🍺
|
|
10194
|
+
* Do not edit manually.
|
|
10195
|
+
* ElevenLabs Speech-to-Text API
|
|
10196
|
+
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
10197
|
+
* OpenAPI spec version: 1.0
|
|
10739
10198
|
*/
|
|
10740
|
-
|
|
10199
|
+
|
|
10200
|
+
/**
|
|
10201
|
+
* Requested additional formats of the transcript.
|
|
10202
|
+
*/
|
|
10203
|
+
type SpeechToTextChunkResponseModelAdditionalFormats = SpeechToTextChunkResponseModelAdditionalFormatsAnyOfItem[] | null;
|
|
10741
10204
|
|
|
10742
10205
|
/**
|
|
10743
10206
|
* Generated by orval v7.9.0 🍺
|
|
@@ -10747,14 +10210,9 @@ type SpeechToTextWordResponseModelStart = number | null;
|
|
|
10747
10210
|
* OpenAPI spec version: 1.0
|
|
10748
10211
|
*/
|
|
10749
10212
|
/**
|
|
10750
|
-
* The
|
|
10213
|
+
* The transcription ID of the response.
|
|
10751
10214
|
*/
|
|
10752
|
-
type
|
|
10753
|
-
declare const SpeechToTextWordResponseModelType: {
|
|
10754
|
-
readonly word: "word";
|
|
10755
|
-
readonly spacing: "spacing";
|
|
10756
|
-
readonly audio_event: "audio_event";
|
|
10757
|
-
};
|
|
10215
|
+
type SpeechToTextChunkResponseModelTranscriptionId = string | null;
|
|
10758
10216
|
|
|
10759
10217
|
/**
|
|
10760
10218
|
* Generated by orval v7.9.0 🍺
|
|
@@ -10765,24 +10223,21 @@ declare const SpeechToTextWordResponseModelType: {
|
|
|
10765
10223
|
*/
|
|
10766
10224
|
|
|
10767
10225
|
/**
|
|
10768
|
-
*
|
|
10226
|
+
* List of detected entities with their text, type, and character positions in the transcript.
|
|
10769
10227
|
*/
|
|
10770
|
-
|
|
10771
|
-
|
|
10772
|
-
|
|
10773
|
-
|
|
10774
|
-
|
|
10775
|
-
|
|
10776
|
-
|
|
10777
|
-
|
|
10778
|
-
|
|
10779
|
-
|
|
10780
|
-
|
|
10781
|
-
|
|
10782
|
-
|
|
10783
|
-
/** The characters that make up the word and their timing information. */
|
|
10784
|
-
characters?: SpeechToTextWordResponseModelCharacters;
|
|
10785
|
-
}
|
|
10228
|
+
type SpeechToTextChunkResponseModelEntities = DetectedEntity[] | null;
|
|
10229
|
+
|
|
10230
|
+
/**
|
|
10231
|
+
* Generated by orval v7.9.0 🍺
|
|
10232
|
+
* Do not edit manually.
|
|
10233
|
+
* ElevenLabs Speech-to-Text API
|
|
10234
|
+
* ElevenLabs Speech-to-Text API - Batch and realtime transcription endpoints. Filtered from the official ElevenLabs API spec.
|
|
10235
|
+
* OpenAPI spec version: 1.0
|
|
10236
|
+
*/
|
|
10237
|
+
/**
|
|
10238
|
+
* The duration of the audio that was transcribed in seconds.
|
|
10239
|
+
*/
|
|
10240
|
+
type SpeechToTextChunkResponseModelAudioDurationSecs = number | null;
|
|
10786
10241
|
|
|
10787
10242
|
/**
|
|
10788
10243
|
* Generated by orval v7.9.0 🍺
|
|
@@ -10812,6 +10267,8 @@ interface SpeechToTextChunkResponseModel {
|
|
|
10812
10267
|
transcription_id?: SpeechToTextChunkResponseModelTranscriptionId;
|
|
10813
10268
|
/** List of detected entities with their text, type, and character positions in the transcript. */
|
|
10814
10269
|
entities?: SpeechToTextChunkResponseModelEntities;
|
|
10270
|
+
/** The duration of the audio that was transcribed in seconds. */
|
|
10271
|
+
audio_duration_secs?: SpeechToTextChunkResponseModelAudioDurationSecs;
|
|
10815
10272
|
}
|
|
10816
10273
|
|
|
10817
|
-
export { type CustomSpellingConfigDTO as $, type AssemblyAIStreamingOptions as A, type AudioToLlmListDTOError as B, type CallbackConfig as C, type DeepgramStreamingOptions as D, type AudioToLlmResultDTO as E, type FileResponse as F, type GladiaStreamingOptions as G, type CallbackConfigDto as H, CallbackMethodEnum as I, type CallbackTranscriptionErrorPayload as J, type CallbackTranscriptionErrorPayloadCustomMetadata as K, type ListTranscriptsOptions as L, type MessagesConfig as M, type NamedEntityRecognitionDTO as N, CallbackTranscriptionErrorPayloadEvent as O, type PreProcessingConfig as P, type CallbackTranscriptionSuccessPayload as Q, type RealtimeProcessingConfig as R, type StreamingOptions as S, type TranscribeOptions as T, type UnifiedTranscriptResponse as U, type CallbackTranscriptionSuccessPayloadCustomMetadata as V, type WordDTO as W, CallbackTranscriptionSuccessPayloadEvent as X, type ChapterizationDTOError as Y, type ChapterizationDTOResults as Z, type CodeSwitchingConfigDTO as _, type StreamingCallbacks as a, type CustomFormattingRequestBodyCustomFormatting as a$, type CustomSpellingConfigDTOSpellingDictionary as a0, type CustomVocabularyConfigDTO as a1, type CustomVocabularyConfigDTOVocabularyItem as a2, type CustomVocabularyEntryDTO as a3, type DiarizationConfigDTO as a4, type DiarizationDTO as a5, type DiarizationDTOError as a6, type DisplayModeDTO as a7, type DisplayModeDTOError as a8, type ErrorDTO as a9, type SubtitlesConfigDTO as aA, SubtitlesFormatEnum as aB, SubtitlesStyleEnum as aC, type SummarizationConfigDTO as aD, type SummarizationDTOError as aE, SummaryTypesEnum as aF, TranscriptionControllerListV2KindItem as aG, type TranscriptionControllerListV2Params as aH, TranscriptionControllerListV2StatusItem as aI, type TranscriptionResultDTO as aJ, type TranslationConfigDTO as aK, type TranslationDTOError as aL, TranslationModelEnum as aM, type TranslationResultDTO as aN, type TranslationResultDTOError as aO, TranscriptStatus as aP, type TranscriptWord as aQ, AudioIntelligenceModelStatus as aR, type AutoHighlightResult as aS, type AutoHighlightsResult as aT, type Chapter$1 as aU, type ContentSafetyLabel as aV, type ContentSafetyLabelResult as aW, type ContentSafetyLabelsResult as aX, type ContentSafetyLabelsResultSeverityScoreSummary as aY, type ContentSafetyLabelsResultSummary as aZ, type CustomFormattingRequestBody as a_, type InitTranscriptionRequest as aa, type InitTranscriptionRequestCustomMetadata as ab, type ModerationDTO as ac, type ModerationDTOError as ad, type NamedEntityRecognitionDTOError as ae, type NamesConsistencyDTO as af, type NamesConsistencyDTOError as ag, type PreRecordedRequestParamsResponse as ah, type PreRecordedResponseCustomMetadata as ai, type PreRecordedResponseFile as aj, PreRecordedResponseKind as ak, type PreRecordedResponsePostSessionMetadata as al, type PreRecordedResponseRequestParams as am, type PreRecordedResponseResult as an, PreRecordedResponseStatus as ao, type SentencesDTO as ap, type SentencesDTOError as aq, type SentimentAnalysisDTOError as ar, type SpeakerReidentificationDTO as as, type SpeakerReidentificationDTOError as at, type StreamingRequest as au, type StreamingRequestCustomMetadata as av, type StructuredDataExtractionConfigDTO as aw, type StructuredDataExtractionDTO as ax, type StructuredDataExtractionDTOError as ay, type SubtitleDTO as az, type StreamingSession as b, type TranscriptOptionalParamsSpeechUnderstandingRequest as b$, type CustomFormattingResponse as b0, type CustomFormattingResponseCustomFormatting as b1, type CustomFormattingResponseCustomFormattingMapping as b2, type Entity as b3, EntityType as b4, type ListTranscriptsParams as b5, PiiPolicy as b6, RedactPiiAudioQuality as b7, Sentiment as b8, type SentimentAnalysisResult$1 as b9, type TranscriptConfidence as bA, type TranscriptContentSafety as bB, type TranscriptContentSafetyLabels as bC, type TranscriptCustomSpelling as bD, type TranscriptCustomSpellingProperty as bE, type TranscriptCustomTopics as bF, type TranscriptDisfluencies as bG, type TranscriptDomain as bH, type TranscriptEntities as bI, type TranscriptEntityDetection as bJ, type TranscriptFilterProfanity as bK, type TranscriptFormatText as bL, type TranscriptIabCategories as bM, type TranscriptIabCategoriesResult as bN, TranscriptLanguageCode as bO, type TranscriptLanguageCodes as bP, type TranscriptLanguageConfidence as bQ, type TranscriptLanguageConfidenceThreshold as bR, type TranscriptLanguageDetection as bS, type TranscriptLanguageDetectionOptions as bT, type TranscriptMultichannel as bU, type TranscriptOptionalParamsLanguageDetectionOptions as bV, type TranscriptOptionalParamsRedactPiiAudioOptions as bW, TranscriptOptionalParamsRedactPiiAudioOptionsOverrideAudioRedactionMethod as bX, TranscriptOptionalParamsRemoveAudioTags as bY, type TranscriptOptionalParamsSpeakerOptions as bZ, type TranscriptOptionalParamsSpeechUnderstanding as b_, type SentimentAnalysisResultChannel as ba, type SentimentAnalysisResultSpeaker as bb, type SeverityScoreSummary as bc, type SpeakerIdentificationRequestBody as bd, type SpeakerIdentificationRequestBodySpeakerIdentification as be, SpeakerIdentificationRequestBodySpeakerIdentificationSpeakerType as bf, type SpeakerIdentificationRequestBodySpeakerIdentificationSpeakersItems as bg, type SpeakerIdentificationResponse as bh, type SpeakerIdentificationResponseSpeakerIdentification as bi, type SpeakerIdentificationResponseSpeakerIdentificationMapping as bj, type SpeechModel as bk, SubstitutionPolicy as bl, SummaryModel as bm, SummaryType as bn, type Timestamp as bo, type TopicDetectionModelResult as bp, type TopicDetectionModelResultSummary as bq, type TopicDetectionResult$1 as br, type TopicDetectionResultLabelsItems as bs, type Transcript as bt, type TranscriptAudioDuration as bu, type TranscriptAudioEndAt as bv, type TranscriptAudioStartFrom as bw, type TranscriptAutoChapters as bx, type TranscriptAutoHighlightsResult as by, type TranscriptChapters as bz, type StreamEvent as c, type TranscriptionCustomProperties as c$, type TranscriptParams as c0, type TranscriptParamsDomain as c1, type TranscriptParamsLanguageCode as c2, type TranscriptParamsLanguageCodes as c3, type TranscriptParamsRedactPiiSub as c4, type TranscriptParamsRemoveAudioTags as c5, type TranscriptParamsSpeakersExpected as c6, type TranscriptParamsSpeechModel as c7, type TranscriptParamsSpeechThreshold as c8, type TranscriptParamsWebhookAuthHeaderName as c9, type TranscriptTranslatedTexts as cA, type TranscriptUtterance as cB, type TranscriptUtteranceChannel as cC, type TranscriptUtteranceTranslatedTexts as cD, type TranscriptUtterances as cE, type TranscriptWebhookAuthHeaderName as cF, type TranscriptWebhookStatusCode as cG, type TranscriptWebhookUrl as cH, type TranscriptWordChannel as cI, type TranscriptWordSpeaker as cJ, type TranscriptWords as cK, type TranslationRequestBody as cL, type TranslationRequestBodyTranslation as cM, type TranslationResponse as cN, type TranslationResponseTranslation as cO, type StreamingUpdateConfiguration as cP, type Transcription as cQ, type EntityError as cR, Status as cS, type EntityReference as cT, type DiarizationProperties as cU, type DiarizationSpeakersProperties as cV, LanguageIdentificationMode as cW, type LanguageIdentificationProperties as cX, type LanguageIdentificationPropertiesSpeechModelMapping as cY, ProfanityFilterMode as cZ, PunctuationMode as c_, type TranscriptParamsWebhookAuthHeaderValue as ca, type TranscriptPunctuate as cb, type TranscriptRedactPiiAudio as cc, type TranscriptRedactPiiAudioOptions as cd, TranscriptRedactPiiAudioOptionsOverrideAudioRedactionMethod as ce, type TranscriptRedactPiiAudioQuality as cf, type TranscriptRedactPiiPolicies as cg, TranscriptRemoveAudioTags as ch, type TranscriptRemoveAudioTagsProperty as ci, type TranscriptSentimentAnalysis as cj, type TranscriptSentimentAnalysisResults as ck, type TranscriptSpeakerLabels as cl, type TranscriptSpeakersExpected as cm, type TranscriptSpeechModel as cn, type TranscriptSpeechModels as co, type TranscriptSpeechThreshold as cp, type TranscriptSpeechUnderstanding as cq, type TranscriptSpeechUnderstandingRequest as cr, type TranscriptSpeechUnderstandingResponse as cs, type TranscriptSpeedBoost as ct, type TranscriptSummary as cu, type TranscriptSummaryModel as cv, type TranscriptSummaryType as cw, type TranscriptTemperature as cx, type TranscriptText as cy, type TranscriptThrottled as cz, StreamingSupportedEncodingEnum as d, type ListenV1LanguageParameter as d$, type TranscriptionLinks as d0, type TranscriptionProperties as d1, type TranscriptTextUsageTokens as d2, type TranscriptionSegment as d3, type RealtimeSessionCreateRequestGAModel as d4, RealtimeTranscriptionSessionCreateRequestTurnDetectionType as d5, RealtimeTranscriptionSessionCreateRequestInputAudioFormat as d6, AudioResponseFormat as d7, type CreateTranscription200One as d8, type CreateTranscriptionRequest as d9, type ManageV1LimitParameter as dA, type ManageV1PageParameter as dB, ManageV1FilterEndpointParameter as dC, ManageV1FilterMethodParameter as dD, type SharedTopics as dE, type SharedIntents as dF, type SharedSentiments as dG, type SharedCallbackParameter as dH, SharedCallbackMethodParameter as dI, type SharedSentimentParameter as dJ, type SharedSummarizeParameter as dK, type SharedTagParameter as dL, type SharedTopicsParameter as dM, type SharedCustomTopicParameter as dN, SharedCustomTopicModeParameter as dO, type SharedIntentsParameter as dP, type SharedCustomIntentParameter as dQ, SharedCustomIntentModeParameter as dR, type SharedMipOptOutParameter as dS, type ListenV1DetectEntitiesParameter as dT, type ListenV1DetectLanguageParameter as dU, type ListenV1DiarizeParameter as dV, type ListenV1DictationParameter as dW, ListenV1EncodingParameter as dX, type ListenV1FillerWordsParameter as dY, type ListenV1KeytermParameter as dZ, type ListenV1KeywordsParameter as d_, type CreateTranscriptionRequestModel as da, type CreateTranscriptionRequestStream as db, CreateTranscriptionRequestTimestampGranularitiesItem as dc, type CreateTranscriptionResponseDiarizedJson as dd, CreateTranscriptionResponseDiarizedJsonTask as de, type CreateTranscriptionResponseDiarizedJsonUsage as df, type CreateTranscriptionResponseJson as dg, type CreateTranscriptionResponseJsonLogprobsItem as dh, type CreateTranscriptionResponseJsonUsage as di, type CreateTranscriptionResponseVerboseJson as dj, type TranscriptTextUsageDuration as dk, TranscriptTextUsageDurationType as dl, type TranscriptTextUsageTokensInputTokenDetails as dm, TranscriptTextUsageTokensType as dn, type TranscriptionChunkingStrategy as dp, type TranscriptionChunkingStrategyAnyOf as dq, type TranscriptionDiarizedSegment as dr, TranscriptionDiarizedSegmentType as ds, TranscriptionInclude as dt, type TranscriptionWord as du, type VadConfig as dv, VadConfigType as dw, type ListenV1Response as dx, type ManageV1FilterAccessorParameter as dy, ManageV1FilterDeploymentParameter as dz, StreamingSupportedBitDepthEnum as e, type AudioEventSummaryItem as e$, type ListenV1MeasurementsParameter as e0, type ListenV1MediaTranscribeParams as e1, type ListenV1ModelParameter as e2, type ListenV1MultichannelParameter as e3, type ListenV1NumeralsParameter as e4, type ListenV1ParagraphsParameter as e5, type ListenV1ProfanityFilterParameter as e6, type ListenV1PunctuateParameter as e7, type ListenV1RedactParameter as e8, ListenV1RedactParameterOneOfItem as e9, type ListenV1UttSplitParameter as eA, type ListenV1UtterancesParameter as eB, type ListenV1VersionParameter as eC, type ManageV1EndDateTimeParameter as eD, type ManageV1FilterRequestIdParameter as eE, ManageV1FilterStatusParameter as eF, type ManageV1ProjectsRequestsListParams as eG, type ManageV1StartDateTimeParameter as eH, type SharedExtraParameter as eI, type SharedIntentsResults as eJ, type SharedIntentsResultsIntents as eK, type SharedIntentsResultsIntentsSegmentsItem as eL, type SharedIntentsResultsIntentsSegmentsItemIntentsItem as eM, type SharedSentimentsAverage as eN, type SharedSentimentsSegmentsItem as eO, type SharedTopicsResults as eP, type SharedTopicsResultsTopics as eQ, type SharedTopicsResultsTopicsSegmentsItem as eR, type SharedTopicsResultsTopicsSegmentsItemTopicsItem as eS, JobType as eT, type AlignmentConfig as eU, type TranscriptionConfig as eV, type TrackingData as eW, type OutputConfig as eX, OperatingPoint as eY, type AudioEventItem as eZ, type AudioEventSummary as e_, type ListenV1ReplaceParameter as ea, type ListenV1ResponseMetadata as eb, type ListenV1ResponseMetadataIntentsInfo as ec, type ListenV1ResponseMetadataModelInfo as ed, type ListenV1ResponseMetadataSentimentInfo as ee, type ListenV1ResponseMetadataSummaryInfo as ef, type ListenV1ResponseMetadataTopicsInfo as eg, type ListenV1ResponseResults as eh, type ListenV1ResponseResultsChannels as ei, type ListenV1ResponseResultsChannelsItem as ej, type ListenV1ResponseResultsChannelsItemAlternativesItem as ek, type ListenV1ResponseResultsChannelsItemAlternativesItemEntitiesItem as el, type ListenV1ResponseResultsChannelsItemAlternativesItemParagraphs as em, type ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItem as en, type ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItemSentencesItem as eo, type ListenV1ResponseResultsChannelsItemAlternativesItemSummariesItem as ep, type ListenV1ResponseResultsChannelsItemAlternativesItemTopicsItem as eq, type ListenV1ResponseResultsChannelsItemAlternativesItemWordsItem as er, type ListenV1ResponseResultsChannelsItemSearchItem as es, type ListenV1ResponseResultsChannelsItemSearchItemHitsItem as et, type ListenV1ResponseResultsSummary as eu, type ListenV1ResponseResultsUtterances as ev, type ListenV1ResponseResultsUtterancesItem as ew, type ListenV1ResponseResultsUtterancesItemWordsItem as ex, type ListenV1SearchParameter as ey, type ListenV1SmartFormatParameter as ez, StreamingSupportedSampleRateEnum as f, type BodySpeechToTextV1SpeechToTextPostEntityDetection as f$, type AutoChaptersResult as f0, type AutoChaptersResultError as f1, AutoChaptersResultErrorType as f2, type Chapter as f3, type JobInfo as f4, type LanguageIdentificationResult as f5, type LanguageIdentificationResultAlternative as f6, LanguageIdentificationResultError as f7, type LanguageIdentificationResultItem as f8, type LanguagePackInfo as f9, type SummarizationResult as fA, type TopicDetectionError as fB, TopicDetectionErrorType as fC, type TopicDetectionResult as fD, type TopicDetectionSegment as fE, type TopicDetectionSegmentTopic as fF, type TopicDetectionSummary as fG, type TopicDetectionSummaryOverall as fH, type TrackingDataDetails as fI, type TranscriptionConfigAdditionalVocabItem as fJ, TranscriptionConfigDiarization as fK, TranscriptionConfigMaxDelayMode as fL, type TranscriptionConfigPunctuationOverrides as fM, type TranscriptionConfigSpeakerDiarizationConfig as fN, type TranscriptionConfigTranscriptFilteringConfig as fO, type TranscriptionConfigTranscriptFilteringConfigReplacementsItem as fP, type TranslationError as fQ, TranslationErrorType as fR, type TranslationSentence as fS, type WrittenFormRecognitionResult as fT, WrittenFormRecognitionResultType as fU, type SpeechToTextChunkResponseModel as fV, type AdditionalFormatResponseModel as fW, type AdditionalFormats as fX, type BodySpeechToTextV1SpeechToTextPost as fY, type BodySpeechToTextV1SpeechToTextPostCloudStorageUrl as fZ, type BodySpeechToTextV1SpeechToTextPostDiarizationThreshold as f_, LanguagePackInfoWritingDirection as fa, type OutputConfigSrtOverrides as fb, type RecognitionAlternative as fc, type RecognitionDisplay as fd, RecognitionDisplayDirection as fe, type RecognitionMetadata as ff, type RecognitionResult as fg, RecognitionResultAttachesTo as fh, RecognitionResultType as fi, type RetrieveTranscriptResponse as fj, type RetrieveTranscriptResponseAudioEventSummary as fk, type RetrieveTranscriptResponseAudioEventSummaryChannels as fl, type RetrieveTranscriptResponseTranslations as fm, type SentimentAnalysisError as fn, SentimentAnalysisErrorType as fo, type SentimentAnalysisResult as fp, type SentimentAnalysisResultSentimentAnalysis as fq, type SentimentChannelSummary as fr, type SentimentSegment as fs, type SentimentSpeakerSummary as ft, type SentimentSummary as fu, type SentimentSummaryDetail as fv, type SpokenFormRecognitionResult as fw, SpokenFormRecognitionResultType as fx, type SummarizationError as fy, SummarizationErrorType as fz, StreamingSupportedModels as g, type GladiaExtendedData as g$, type BodySpeechToTextV1SpeechToTextPostFile as g0, BodySpeechToTextV1SpeechToTextPostFileFormat as g1, type BodySpeechToTextV1SpeechToTextPostLanguageCode as g2, BodySpeechToTextV1SpeechToTextPostModelId as g3, type BodySpeechToTextV1SpeechToTextPostNumSpeakers as g4, type BodySpeechToTextV1SpeechToTextPostSeed as g5, type BodySpeechToTextV1SpeechToTextPostTemperature as g6, BodySpeechToTextV1SpeechToTextPostTimestampsGranularity as g7, type BodySpeechToTextV1SpeechToTextPostWebhookId as g8, type BodySpeechToTextV1SpeechToTextPostWebhookMetadata as g9, type SpeechToTextChunkResponseModelAdditionalFormats as gA, type SpeechToTextChunkResponseModelAdditionalFormatsAnyOfItem as gB, type SpeechToTextChunkResponseModelChannelIndex as gC, type SpeechToTextChunkResponseModelEntities as gD, type SpeechToTextChunkResponseModelTranscriptionId as gE, type SpeechToTextWordResponseModel as gF, type SpeechToTextWordResponseModelCharacters as gG, type SpeechToTextWordResponseModelEnd as gH, type SpeechToTextWordResponseModelSpeakerId as gI, type SpeechToTextWordResponseModelStart as gJ, SpeechToTextWordResponseModelType as gK, type SrtExportOptions as gL, SrtExportOptionsFormat as gM, type SrtExportOptionsMaxCharactersPerLine as gN, type SrtExportOptionsMaxSegmentChars as gO, type SrtExportOptionsMaxSegmentDurationS as gP, type SrtExportOptionsSegmentOnSilenceLongerThanS as gQ, type TxtExportOptions as gR, TxtExportOptionsFormat as gS, type TxtExportOptionsMaxCharactersPerLine as gT, type TxtExportOptionsMaxSegmentChars as gU, type TxtExportOptionsMaxSegmentDurationS as gV, type TxtExportOptionsSegmentOnSilenceLongerThanS as gW, type SpeechmaticsOperatingPoint as gX, type TranscriptionModel as gY, type TranscriptionLanguage as gZ, type AssemblyAIExtendedData as g_, type BodySpeechToTextV1SpeechToTextPostWebhookMetadataAnyOf as ga, type DetectedEntity as gb, type DocxExportOptions as gc, DocxExportOptionsFormat as gd, type DocxExportOptionsMaxSegmentChars as ge, type DocxExportOptionsMaxSegmentDurationS as gf, type DocxExportOptionsSegmentOnSilenceLongerThanS as gg, type ExportOptions as gh, type HtmlExportOptions as gi, HtmlExportOptionsFormat as gj, type HtmlExportOptionsMaxSegmentChars as gk, type HtmlExportOptionsMaxSegmentDurationS as gl, type HtmlExportOptionsSegmentOnSilenceLongerThanS as gm, type PdfExportOptions as gn, PdfExportOptionsFormat as go, type PdfExportOptionsMaxSegmentChars as gp, type PdfExportOptionsMaxSegmentDurationS as gq, type PdfExportOptionsSegmentOnSilenceLongerThanS as gr, type SegmentedJsonExportOptions as gs, SegmentedJsonExportOptionsFormat as gt, type SegmentedJsonExportOptionsMaxSegmentChars as gu, type SegmentedJsonExportOptionsMaxSegmentDurationS as gv, type SegmentedJsonExportOptionsSegmentOnSilenceLongerThanS as gw, type SpeechToTextCharacterResponseModel as gx, type SpeechToTextCharacterResponseModelEnd as gy, type SpeechToTextCharacterResponseModelStart as gz, type LanguageConfig as h, type DeepgramExtendedData as h0, type ElevenLabsExtendedData as h1, type ProviderExtendedDataMap as h2, type StreamingProvider as h3, type BatchOnlyProvider as h4, type SessionStatus as h5, type Speaker as h6, type Word as h7, type Utterance as h8, type TranscriptionStatus as h9, type StreamingEventMessage as hA, type StreamingWord as hB, type StreamingForceEndpoint as hC, type TranscriptMetadata as ha, type TranscriptData as hb, type ListTranscriptsResponse as hc, type ProviderRawResponseMap as hd, type StreamEventType as he, type SpeechEvent as hf, type TranslationEvent as hg, type SentimentEvent as hh, type EntityEvent as hi, type SummarizationEvent as hj, type ChapterizationEvent as hk, type AudioAckEvent as hl, type LifecycleEvent as hm, type AudioChunk as hn, type RawWebSocketMessage as ho, type AssemblyAIUpdateConfiguration as hp, type OpenAIStreamingOptions as hq, type SonioxStreamingOptions as hr, type ElevenLabsStreamingOptions as hs, type ProviderStreamingOptions as ht, type StreamingOptionsForProvider as hu, type TranscribeStreamParams as hv, type BeginEvent as hw, type TurnEvent as hx, type TerminationEvent as hy, type ErrorEvent as hz, type PostProcessingConfig as i, type TranscriptionMetadataDTO as j, type TranscriptionDTO as k, type TranslationDTO as l, type SummarizationDTO as m, type SentimentAnalysisDTO as n, type ChapterizationDTO as o, type PreRecordedResponse as p, type UtteranceDTO as q, TranscriptionLanguageCodeEnum as r, TranslationLanguageCodeEnum as s, StreamingSupportedRegions as t, type AddonErrorDTO as u, type AudioToLlmDTO as v, type AudioToLlmDTOError as w, type AudioToLlmDTOResults as x, type AudioToLlmListConfigDTO as y, type AudioToLlmListDTO as z };
|
|
10274
|
+
export { type CustomSpellingConfigDTO as $, type AssemblyAIStreamingOptions as A, type AudioToLlmListDTO as B, type CallbackConfig as C, type DeepgramStreamingOptions as D, type AudioToLlmListDTOError as E, type FileResponse as F, type GladiaStreamingOptions as G, type AudioToLlmResultDTO as H, type CallbackConfigDto as I, CallbackMethodEnum as J, type CallbackTranscriptionErrorPayload as K, type ListTranscriptsOptions as L, type MessagesConfig as M, type NamedEntityRecognitionDTO as N, type CallbackTranscriptionErrorPayloadCustomMetadata as O, type PreProcessingConfig as P, CallbackTranscriptionErrorPayloadEvent as Q, type RealtimeProcessingConfig as R, type StreamingOptions as S, type TranscribeOptions as T, type UnifiedTranscriptResponse as U, type CallbackTranscriptionSuccessPayload as V, type WordDTO as W, type CallbackTranscriptionSuccessPayloadCustomMetadata as X, CallbackTranscriptionSuccessPayloadEvent as Y, type ChapterizationDTOError as Z, type ChapterizationDTOResults as _, type StreamingCallbacks as a, type ContentSafetyLabelsResultSummary as a$, type CustomSpellingConfigDTOSpellingDictionary as a0, type CustomVocabularyConfigDTO as a1, type CustomVocabularyConfigDTOVocabularyItem as a2, type CustomVocabularyEntryDTO as a3, type DiarizationConfigDTO as a4, type DiarizationDTO as a5, type DiarizationDTOError as a6, type DisplayModeDTO as a7, type DisplayModeDTOError as a8, type ErrorDTO as a9, type StructuredDataExtractionDTOError as aA, type SubtitleDTO as aB, type SubtitlesConfigDTO as aC, SubtitlesFormatEnum as aD, SubtitlesStyleEnum as aE, type SummarizationConfigDTO as aF, type SummarizationDTOError as aG, SummaryTypesEnum as aH, TranscriptionControllerListV2KindItem as aI, type TranscriptionControllerListV2Params as aJ, TranscriptionControllerListV2StatusItem as aK, type TranscriptionResultDTO as aL, type TranslationConfigDTO as aM, type TranslationDTOError as aN, TranslationModelEnum as aO, type TranslationResultDTO as aP, type TranslationResultDTOError as aQ, TranscriptStatus as aR, type TranscriptWord as aS, AudioIntelligenceModelStatus as aT, type AutoHighlightResult as aU, type AutoHighlightsResult as aV, type Chapter$1 as aW, type ContentSafetyLabel as aX, type ContentSafetyLabelResult as aY, type ContentSafetyLabelsResult as aZ, type ContentSafetyLabelsResultSeverityScoreSummary as a_, type InitTranscriptionRequest as aa, type InitTranscriptionRequestCustomMetadata as ab, type ModerationDTO as ac, type ModerationDTOError as ad, type NamedEntityRecognitionDTOError as ae, type NamesConsistencyDTO as af, type NamesConsistencyDTOError as ag, type PiiRedactionConfigDTO as ah, PiiRedactionConfigDTOProcessedTextType as ai, PiiRedactionEntityTypeEnum as aj, type PreRecordedRequestParamsResponse as ak, type PreRecordedResponseCustomMetadata as al, type PreRecordedResponseFile as am, PreRecordedResponseKind as an, type PreRecordedResponsePostSessionMetadata as ao, type PreRecordedResponseRequestParams as ap, type PreRecordedResponseResult as aq, PreRecordedResponseStatus as ar, type SentencesDTO as as, type SentencesDTOError as at, type SentimentAnalysisDTOError as au, type SpeakerReidentificationDTO as av, type SpeakerReidentificationDTOError as aw, type StreamingRequest as ax, type StreamingRequestCustomMetadata as ay, type StructuredDataExtractionDTO as az, type StreamingSession as b, type TranscriptOptionalParamsSpeakerOptions as b$, type CustomFormattingRequestBody as b0, type CustomFormattingRequestBodyCustomFormatting as b1, type CustomFormattingResponse as b2, type CustomFormattingResponseCustomFormatting as b3, type CustomFormattingResponseCustomFormattingMapping as b4, type Entity as b5, EntityType as b6, type ListTranscriptsParams as b7, PiiPolicy as b8, RedactPiiAudioQuality as b9, type TranscriptAutoHighlightsResult as bA, type TranscriptChapters as bB, type TranscriptConfidence as bC, type TranscriptContentSafety as bD, type TranscriptContentSafetyLabels as bE, type TranscriptCustomSpelling as bF, type TranscriptCustomSpellingProperty as bG, type TranscriptCustomTopics as bH, type TranscriptDisfluencies as bI, type TranscriptDomain as bJ, type TranscriptEntities as bK, type TranscriptEntityDetection as bL, type TranscriptFilterProfanity as bM, type TranscriptFormatText as bN, type TranscriptIabCategories as bO, type TranscriptIabCategoriesResult as bP, TranscriptLanguageCode as bQ, type TranscriptLanguageCodes as bR, type TranscriptLanguageConfidence as bS, type TranscriptLanguageConfidenceThreshold as bT, type TranscriptLanguageDetection as bU, type TranscriptLanguageDetectionOptions as bV, type TranscriptMultichannel as bW, type TranscriptOptionalParamsLanguageDetectionOptions as bX, type TranscriptOptionalParamsRedactPiiAudioOptions as bY, TranscriptOptionalParamsRedactPiiAudioOptionsOverrideAudioRedactionMethod as bZ, TranscriptOptionalParamsRemoveAudioTags as b_, Sentiment as ba, type SentimentAnalysisResult$1 as bb, type SentimentAnalysisResultChannel as bc, type SentimentAnalysisResultSpeaker as bd, type SeverityScoreSummary as be, type SpeakerIdentificationRequestBody as bf, type SpeakerIdentificationRequestBodySpeakerIdentification as bg, SpeakerIdentificationRequestBodySpeakerIdentificationSpeakerType as bh, type SpeakerIdentificationRequestBodySpeakerIdentificationSpeakersItems as bi, type SpeakerIdentificationResponse as bj, type SpeakerIdentificationResponseSpeakerIdentification as bk, type SpeakerIdentificationResponseSpeakerIdentificationMapping as bl, type SpeechModel as bm, SubstitutionPolicy as bn, SummaryModel as bo, SummaryType as bp, type Timestamp as bq, type TopicDetectionModelResult as br, type TopicDetectionModelResultSummary as bs, type TopicDetectionResult$1 as bt, type TopicDetectionResultLabelsItems as bu, type Transcript as bv, type TranscriptAudioDuration as bw, type TranscriptAudioEndAt as bx, type TranscriptAudioStartFrom as by, type TranscriptAutoChapters as bz, type StreamEvent as c, type TranscriptionCustomProperties as c$, type TranscriptOptionalParamsSpeechUnderstanding as c0, type TranscriptOptionalParamsSpeechUnderstandingRequest as c1, type TranscriptParams as c2, type TranscriptParamsDomain as c3, type TranscriptParamsLanguageCode as c4, type TranscriptParamsLanguageCodes as c5, type TranscriptParamsRedactPiiSub as c6, type TranscriptParamsRemoveAudioTags as c7, type TranscriptParamsSpeakersExpected as c8, type TranscriptParamsSpeechThreshold as c9, type TranscriptTranslatedTexts as cA, type TranscriptUtterance as cB, type TranscriptUtteranceChannel as cC, type TranscriptUtteranceTranslatedTexts as cD, type TranscriptUtterances as cE, type TranscriptWebhookAuthHeaderName as cF, type TranscriptWebhookStatusCode as cG, type TranscriptWebhookUrl as cH, type TranscriptWordChannel as cI, type TranscriptWordSpeaker as cJ, type TranscriptWords as cK, type TranslationRequestBody as cL, type TranslationRequestBodyTranslation as cM, type TranslationResponse as cN, type TranslationResponseTranslation as cO, type StreamingUpdateConfiguration as cP, type Transcription as cQ, type EntityError as cR, Status as cS, type EntityReference as cT, type DiarizationProperties as cU, type DiarizationSpeakersProperties as cV, LanguageIdentificationMode as cW, type LanguageIdentificationProperties as cX, type LanguageIdentificationPropertiesSpeechModelMapping as cY, ProfanityFilterMode as cZ, PunctuationMode as c_, type TranscriptParamsWebhookAuthHeaderName as ca, type TranscriptParamsWebhookAuthHeaderValue as cb, type TranscriptPunctuate as cc, type TranscriptRedactPiiAudio as cd, type TranscriptRedactPiiAudioOptions as ce, TranscriptRedactPiiAudioOptionsOverrideAudioRedactionMethod as cf, type TranscriptRedactPiiAudioQuality as cg, type TranscriptRedactPiiPolicies as ch, TranscriptRemoveAudioTags as ci, type TranscriptRemoveAudioTagsProperty as cj, type TranscriptSentimentAnalysis as ck, type TranscriptSentimentAnalysisResults as cl, type TranscriptSpeakerLabels as cm, type TranscriptSpeakersExpected as cn, type TranscriptSpeechModel as co, type TranscriptSpeechModels as cp, type TranscriptSpeechThreshold as cq, type TranscriptSpeechUnderstanding as cr, type TranscriptSpeechUnderstandingRequest as cs, type TranscriptSpeechUnderstandingResponse as ct, type TranscriptSpeedBoost as cu, type TranscriptSummary as cv, type TranscriptSummaryModel as cw, type TranscriptSummaryType as cx, type TranscriptText as cy, type TranscriptThrottled as cz, StreamingSupportedEncodingEnum as d, type SharedIntentsResultsIntents as d$, type TranscriptionLinks as d0, type TranscriptionProperties as d1, type TranscriptTextUsageTokens as d2, type TranscriptionSegment as d3, type RealtimeSessionCreateRequestGAModel as d4, RealtimeTranscriptionSessionCreateRequestTurnDetectionType as d5, RealtimeTranscriptionSessionCreateRequestInputAudioFormat as d6, AudioResponseFormat as d7, type CreateTranscription200One as d8, type CreateTranscriptionRequest as d9, type SharedSentiments as dA, type ListProjectRequestsParams as dB, type ListenTranscribeParams as dC, type ListenV1ResponseMetadata as dD, type ListenV1ResponseMetadataIntentsInfo as dE, type ListenV1ResponseMetadataModelInfo as dF, type ListenV1ResponseMetadataSentimentInfo as dG, type ListenV1ResponseMetadataSummaryInfo as dH, type ListenV1ResponseMetadataTopicsInfo as dI, type ListenV1ResponseResults as dJ, type ListenV1ResponseResultsChannels as dK, type ListenV1ResponseResultsChannelsItems as dL, type ListenV1ResponseResultsChannelsItemsAlternativesItems as dM, type ListenV1ResponseResultsChannelsItemsAlternativesItemsEntitiesItems as dN, type ListenV1ResponseResultsChannelsItemsAlternativesItemsParagraphs as dO, type ListenV1ResponseResultsChannelsItemsAlternativesItemsParagraphsParagraphsItems as dP, type ListenV1ResponseResultsChannelsItemsAlternativesItemsParagraphsParagraphsItemsSentencesItems as dQ, type ListenV1ResponseResultsChannelsItemsAlternativesItemsSummariesItems as dR, type ListenV1ResponseResultsChannelsItemsAlternativesItemsTopicsItems as dS, type ListenV1ResponseResultsChannelsItemsAlternativesItemsWordsItems as dT, type ListenV1ResponseResultsChannelsItemsSearchItems as dU, type ListenV1ResponseResultsChannelsItemsSearchItemsHitsItems as dV, type ListenV1ResponseResultsSummary as dW, type ListenV1ResponseResultsUtterances as dX, type ListenV1ResponseResultsUtterancesItems as dY, type ListenV1ResponseResultsUtterancesItemsWordsItems as dZ, type SharedIntentsResults as d_, type CreateTranscriptionRequestChunkingStrategy as da, type CreateTranscriptionRequestChunkingStrategyAnyOf as db, type CreateTranscriptionRequestModel as dc, type CreateTranscriptionRequestStream as dd, CreateTranscriptionRequestTimestampGranularitiesItem as de, type CreateTranscriptionResponseDiarizedJson as df, CreateTranscriptionResponseDiarizedJsonTask as dg, type CreateTranscriptionResponseDiarizedJsonUsage as dh, type CreateTranscriptionResponseJson as di, type CreateTranscriptionResponseJsonLogprobsItem as dj, type CreateTranscriptionResponseJsonUsage as dk, type CreateTranscriptionResponseVerboseJson as dl, type TranscriptTextUsageDuration as dm, TranscriptTextUsageDurationType as dn, type TranscriptTextUsageTokensInputTokenDetails as dp, TranscriptTextUsageTokensType as dq, type TranscriptionDiarizedSegment as dr, TranscriptionDiarizedSegmentType as ds, TranscriptionInclude as dt, type TranscriptionWord as du, type VadConfig as dv, VadConfigType as dw, type ListenV1Response as dx, type SharedTopics as dy, type SharedIntents as dz, StreamingSupportedBitDepthEnum as e, type RetrieveTranscriptResponseAudioEventSummaryChannels as e$, type SharedIntentsResultsIntentsSegmentsItems as e0, type SharedIntentsResultsIntentsSegmentsItemsIntentsItems as e1, type SharedSentimentsAverage as e2, type SharedSentimentsSegmentsItems as e3, type SharedTopicsResults as e4, type SharedTopicsResultsTopics as e5, type SharedTopicsResultsTopicsSegmentsItems as e6, type SharedTopicsResultsTopicsSegmentsItemsTopicsItems as e7, V1ListenPostParametersCallbackMethod as e8, type V1ListenPostParametersCustomIntent as e9, type TrackingData as eA, type OutputConfig as eB, OperatingPoint as eC, type AudioEventItem as eD, type AudioEventSummary as eE, type AudioEventSummaryItem as eF, type AutoChaptersResult as eG, type AutoChaptersResultError as eH, AutoChaptersResultErrorType as eI, type Chapter as eJ, type JobInfo as eK, type LanguageIdentificationResult as eL, type LanguageIdentificationResultAlternative as eM, LanguageIdentificationResultError as eN, type LanguageIdentificationResultItem as eO, type LanguagePackInfo as eP, LanguagePackInfoWritingDirection as eQ, type OutputConfigSrtOverrides as eR, type RecognitionAlternative as eS, type RecognitionDisplay as eT, RecognitionDisplayDirection as eU, type RecognitionMetadata as eV, type RecognitionResult as eW, RecognitionResultAttachesTo as eX, RecognitionResultType as eY, type RetrieveTranscriptResponse as eZ, type RetrieveTranscriptResponseAudioEventSummary as e_, V1ListenPostParametersCustomIntentMode as ea, type V1ListenPostParametersCustomTopic as eb, V1ListenPostParametersCustomTopicMode as ec, type V1ListenPostParametersDetectLanguage as ed, V1ListenPostParametersEncoding as ee, type V1ListenPostParametersExtra as ef, type V1ListenPostParametersKeywords as eg, type V1ListenPostParametersModel as eh, V1ListenPostParametersModel0 as ei, type V1ListenPostParametersRedact as ej, type V1ListenPostParametersRedact1 as ek, V1ListenPostParametersRedactSchemaOneOf1Items as el, type V1ListenPostParametersReplace as em, type V1ListenPostParametersSearch as en, type V1ListenPostParametersSummarize as eo, V1ListenPostParametersSummarize0 as ep, type V1ListenPostParametersTag as eq, type V1ListenPostParametersVersion as er, V1ListenPostParametersVersion0 as es, V1ProjectsProjectIdRequestsGetParametersDeployment as et, V1ProjectsProjectIdRequestsGetParametersEndpoint as eu, V1ProjectsProjectIdRequestsGetParametersMethod as ev, V1ProjectsProjectIdRequestsGetParametersStatus as ew, JobType as ex, type AlignmentConfig as ey, type TranscriptionConfig as ez, StreamingSupportedSampleRateEnum as f, HtmlExportOptionsFormat as f$, type RetrieveTranscriptResponseTranslations as f0, type SentimentAnalysisError as f1, SentimentAnalysisErrorType as f2, type SentimentAnalysisResult as f3, type SentimentAnalysisResultSentimentAnalysis as f4, type SentimentChannelSummary as f5, type SentimentSegment as f6, type SentimentSpeakerSummary as f7, type SentimentSummary as f8, type SentimentSummaryDetail as f9, type AdditionalFormatResponseModel as fA, type AdditionalFormats as fB, type BodySpeechToTextV1SpeechToTextPost as fC, type BodySpeechToTextV1SpeechToTextPostCloudStorageUrl as fD, type BodySpeechToTextV1SpeechToTextPostDiarizationThreshold as fE, type BodySpeechToTextV1SpeechToTextPostEntityDetection as fF, type BodySpeechToTextV1SpeechToTextPostEntityRedaction as fG, type BodySpeechToTextV1SpeechToTextPostFile as fH, BodySpeechToTextV1SpeechToTextPostFileFormat as fI, type BodySpeechToTextV1SpeechToTextPostLanguageCode as fJ, BodySpeechToTextV1SpeechToTextPostModelId as fK, type BodySpeechToTextV1SpeechToTextPostNumSpeakers as fL, type BodySpeechToTextV1SpeechToTextPostSeed as fM, type BodySpeechToTextV1SpeechToTextPostSourceUrl as fN, type BodySpeechToTextV1SpeechToTextPostTemperature as fO, BodySpeechToTextV1SpeechToTextPostTimestampsGranularity as fP, type BodySpeechToTextV1SpeechToTextPostWebhookId as fQ, type BodySpeechToTextV1SpeechToTextPostWebhookMetadata as fR, type BodySpeechToTextV1SpeechToTextPostWebhookMetadataAnyOf as fS, type DetectedEntity as fT, type DocxExportOptions as fU, DocxExportOptionsFormat as fV, type DocxExportOptionsMaxSegmentChars as fW, type DocxExportOptionsMaxSegmentDurationS as fX, type DocxExportOptionsSegmentOnSilenceLongerThanS as fY, type ExportOptions as fZ, type HtmlExportOptions as f_, type SpokenFormRecognitionResult as fa, SpokenFormRecognitionResultType as fb, type SummarizationError as fc, SummarizationErrorType as fd, type SummarizationResult as fe, type TopicDetectionError as ff, TopicDetectionErrorType as fg, type TopicDetectionResult as fh, type TopicDetectionSegment as fi, type TopicDetectionSegmentTopic as fj, type TopicDetectionSummary as fk, type TopicDetectionSummaryOverall as fl, type TrackingDataDetails as fm, type TranscriptionConfigAdditionalVocabItem as fn, TranscriptionConfigDiarization as fo, TranscriptionConfigMaxDelayMode as fp, type TranscriptionConfigPunctuationOverrides as fq, type TranscriptionConfigSpeakerDiarizationConfig as fr, type TranscriptionConfigTranscriptFilteringConfig as fs, type TranscriptionConfigTranscriptFilteringConfigReplacementsItem as ft, type TranslationError as fu, TranslationErrorType as fv, type TranslationSentence as fw, type WrittenFormRecognitionResult as fx, WrittenFormRecognitionResultType as fy, type SpeechToTextChunkResponseModel as fz, StreamingSupportedModels as g, type EntityEvent as g$, type HtmlExportOptionsMaxSegmentChars as g0, type HtmlExportOptionsMaxSegmentDurationS as g1, type HtmlExportOptionsSegmentOnSilenceLongerThanS as g2, type PdfExportOptions as g3, PdfExportOptionsFormat as g4, type PdfExportOptionsMaxSegmentChars as g5, type PdfExportOptionsMaxSegmentDurationS as g6, type PdfExportOptionsSegmentOnSilenceLongerThanS as g7, type SegmentedJsonExportOptions as g8, SegmentedJsonExportOptionsFormat as g9, type TxtExportOptionsMaxCharactersPerLine as gA, type TxtExportOptionsMaxSegmentChars as gB, type TxtExportOptionsMaxSegmentDurationS as gC, type TxtExportOptionsSegmentOnSilenceLongerThanS as gD, type SpeechmaticsOperatingPoint as gE, type TranscriptionModel as gF, type TranscriptionLanguage as gG, type AssemblyAIExtendedData as gH, type GladiaExtendedData as gI, type DeepgramExtendedData as gJ, type ElevenLabsExtendedData as gK, type ProviderExtendedDataMap as gL, type StreamingProvider as gM, type BatchOnlyProvider as gN, type SessionStatus as gO, type Speaker as gP, type Word as gQ, type Utterance as gR, type TranscriptionStatus as gS, type TranscriptMetadata as gT, type TranscriptData as gU, type ListTranscriptsResponse as gV, type ProviderRawResponseMap as gW, type StreamEventType as gX, type SpeechEvent as gY, type TranslationEvent as gZ, type SentimentEvent as g_, type SegmentedJsonExportOptionsMaxSegmentChars as ga, type SegmentedJsonExportOptionsMaxSegmentDurationS as gb, type SegmentedJsonExportOptionsSegmentOnSilenceLongerThanS as gc, type SpeechToTextCharacterResponseModel as gd, type SpeechToTextCharacterResponseModelEnd as ge, type SpeechToTextCharacterResponseModelStart as gf, type SpeechToTextChunkResponseModelAdditionalFormats as gg, type SpeechToTextChunkResponseModelAdditionalFormatsAnyOfItem as gh, type SpeechToTextChunkResponseModelAudioDurationSecs as gi, type SpeechToTextChunkResponseModelChannelIndex as gj, type SpeechToTextChunkResponseModelEntities as gk, type SpeechToTextChunkResponseModelTranscriptionId as gl, type SpeechToTextWordResponseModel as gm, type SpeechToTextWordResponseModelCharacters as gn, type SpeechToTextWordResponseModelEnd as go, type SpeechToTextWordResponseModelSpeakerId as gp, type SpeechToTextWordResponseModelStart as gq, SpeechToTextWordResponseModelType as gr, type SrtExportOptions as gs, SrtExportOptionsFormat as gt, type SrtExportOptionsMaxCharactersPerLine as gu, type SrtExportOptionsMaxSegmentChars as gv, type SrtExportOptionsMaxSegmentDurationS as gw, type SrtExportOptionsSegmentOnSilenceLongerThanS as gx, type TxtExportOptions as gy, TxtExportOptionsFormat as gz, type LanguageConfig as h, type SummarizationEvent as h0, type ChapterizationEvent as h1, type AudioAckEvent as h2, type LifecycleEvent as h3, type AudioChunk as h4, type RawWebSocketMessage as h5, type AssemblyAIUpdateConfiguration as h6, type OpenAIStreamingOptions as h7, type SonioxStreamingOptions as h8, type ElevenLabsStreamingOptions as h9, type ProviderStreamingOptions as ha, type StreamingOptionsForProvider as hb, type TranscribeStreamParams as hc, type BeginEvent as hd, type TurnEvent as he, type TerminationEvent as hf, type ErrorEvent as hg, type StreamingEventMessage as hh, type StreamingWord as hi, type StreamingForceEndpoint as hj, type PostProcessingConfig as i, type TranscriptionMetadataDTO as j, type TranscriptionDTO as k, type TranslationDTO as l, type SummarizationDTO as m, type SentimentAnalysisDTO as n, type ChapterizationDTO as o, type PreRecordedResponse as p, type UtteranceDTO as q, TranscriptionLanguageCodeEnum as r, TranslationLanguageCodeEnum as s, type NamedEntityRecognitionResult as t, StreamingSupportedRegions as u, type AddonErrorDTO as v, type AudioToLlmDTO as w, type AudioToLlmDTOError as x, type AudioToLlmDTOResults as y, type AudioToLlmListConfigDTO as z };
|